blob: 8d8908d2a9b14ab62244f8cf0f5f3ccf2e1cfa41 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000049static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114static void __devexit e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100137static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700140static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000148 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800149 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000159static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Jiri Pirko5622e402011-07-21 03:26:31 +0000164static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
Jiri Pirko8e586132011-12-08 19:52:37 -0500169static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
Auke Kok6fdfef12006-06-27 09:06:36 -0700173#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000174static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static int e1000_resume(struct pci_dev *pdev);
176#endif
Auke Kokc653e632006-05-23 13:35:57 -0700177static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
179#ifdef CONFIG_NET_POLL_CONTROLLER
180/* for netdump / net console */
181static void e1000_netpoll (struct net_device *netdev);
182#endif
183
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100184#define COPYBREAK_DEFAULT 256
185static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186module_param(copybreak, uint, 0644);
187MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
189
Auke Kok90267292006-06-08 09:30:24 -0700190static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193static void e1000_io_resume(struct pci_dev *pdev);
194
195static struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
199};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
Auke Kokc4e24f02006-09-27 12:53:19 -0700206#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300207 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700209 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210#endif
Auke Kok90267292006-06-08 09:30:24 -0700211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213};
214
215MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217MODULE_LICENSE("GPL");
218MODULE_VERSION(DRV_VERSION);
219
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000220#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222module_param(debug, int, 0);
223MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
228 *
229 **/
230struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231{
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
234}
235
236/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 * e1000_init_module - Driver Registration Routine
238 *
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
241 **/
242
Joe Perches64798842008-07-11 15:17:02 -0700243static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
245 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Emil Tantilov675ad472010-04-27 14:02:58 +0000248 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Jeff Garzik29917622006-08-19 17:48:59 -0400250 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100251 if (copybreak != COPYBREAK_DEFAULT) {
252 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000253 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100254 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 return ret;
259}
260
261module_init(e1000_init_module);
262
263/**
264 * e1000_exit_module - Driver Exit Cleanup Routine
265 *
266 * e1000_exit_module is called just before the driver is removed
267 * from memory.
268 **/
269
Joe Perches64798842008-07-11 15:17:02 -0700270static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
Auke Kok2db10a02006-06-27 09:06:28 -0700277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000280 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700281 int irq_flags = IRQF_SHARED;
282 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700283
Auke Koke94bd232007-05-16 01:49:46 -0700284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700288 }
Auke Kok2db10a02006-06-27 09:06:28 -0700289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/**
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
303 **/
304
Joe Perches64798842008-07-11 15:17:02 -0700305static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306{
Joe Perches1dc32912008-07-11 15:17:08 -0700307 struct e1000_hw *hw = &adapter->hw;
308
309 ew32(IMC, ~0);
310 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 synchronize_irq(adapter->pdev->irq);
312}
313
314/**
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
317 **/
318
Joe Perches64798842008-07-11 15:17:02 -0700319static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Joe Perches1dc32912008-07-11 15:17:08 -0700321 struct e1000_hw *hw = &adapter->hw;
322
323 ew32(IMS, IMS_ENABLE_MASK);
324 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100326
Joe Perches64798842008-07-11 15:17:02 -0700327static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700328{
Joe Perches1dc32912008-07-11 15:17:08 -0700329 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700330 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700331 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700332 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800333
Jiri Pirko5622e402011-07-21 03:26:31 +0000334 if (!e1000_vlan_used(adapter))
335 return;
336
337 if (!test_bit(vid, adapter->active_vlans)) {
338 if (hw->mng_cookie.status &
339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 e1000_vlan_rx_add_vid(netdev, vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800341 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000342 } else {
343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344 }
345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346 (vid != old_vid) &&
347 !test_bit(old_vid, adapter->active_vlans))
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
349 } else {
350 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700351 }
352}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800353
Joe Perches64798842008-07-11 15:17:02 -0700354static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500355{
Joe Perches1dc32912008-07-11 15:17:08 -0700356 struct e1000_hw *hw = &adapter->hw;
357
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500358 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700359 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500360
361 /* disable hardware interception of ARP */
362 manc &= ~(E1000_MANC_ARP_EN);
363
Joe Perches1dc32912008-07-11 15:17:08 -0700364 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500365 }
366}
367
Joe Perches64798842008-07-11 15:17:02 -0700368static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500369{
Joe Perches1dc32912008-07-11 15:17:08 -0700370 struct e1000_hw *hw = &adapter->hw;
371
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500372 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700373 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500374
375 /* re-enable hardware interception of ARP */
376 manc |= E1000_MANC_ARP_EN;
377
Joe Perches1dc32912008-07-11 15:17:08 -0700378 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500379 }
380}
381
Auke Koke0aac5a2007-03-06 08:57:21 -0800382/**
383 * e1000_configure - configure the hardware for RX and TX
384 * @adapter = private board structure
385 **/
386static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387{
388 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700389 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Patrick McHardydb0ce502007-11-13 20:54:59 -0800391 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500394 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800399 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800402 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800406 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800407}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800408
Auke Koke0aac5a2007-03-06 08:57:21 -0800409int e1000_up(struct e1000_adapter *adapter)
410{
Joe Perches1dc32912008-07-11 15:17:08 -0700411 struct e1000_hw *hw = &adapter->hw;
412
Auke Koke0aac5a2007-03-06 08:57:21 -0800413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700415
Auke Kok1314bbf2006-09-27 12:54:02 -0700416 clear_bit(__E1000_DOWN, &adapter->flags);
417
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700418 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700419
Auke Koke0aac5a2007-03-06 08:57:21 -0800420 e1000_irq_enable(adapter);
421
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000422 netif_wake_queue(adapter->netdev);
423
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100424 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700425 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return 0;
427}
428
Auke Kok79f05bf2006-06-27 09:06:32 -0700429/**
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
432 *
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
436 *
437 **/
438
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700439void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700440{
Joe Perches1dc32912008-07-11 15:17:08 -0700441 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700442 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700443
444 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700445 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700446 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */
Joe Perches1dc32912008-07-11 15:17:08 -0700448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700449 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700451 }
452}
453
454static void e1000_power_down_phy(struct e1000_adapter *adapter)
455{
Joe Perches1dc32912008-07-11 15:17:08 -0700456 struct e1000_hw *hw = &adapter->hw;
457
Bruce Allan61c25052006-09-27 12:53:54 -0700458 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700459 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700460 * (a) WoL is enabled
461 * (b) AMT is active
462 * (c) SoL/IDER session is active */
Joe Perches1dc32912008-07-11 15:17:08 -0700463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700465 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700466
Joe Perches1dc32912008-07-11 15:17:08 -0700467 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700468 case e1000_82540:
469 case e1000_82545:
470 case e1000_82545_rev_3:
471 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000472 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700473 case e1000_82546_rev_3:
474 case e1000_82541:
475 case e1000_82541_rev_2:
476 case e1000_82547:
477 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700479 goto out;
480 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700481 default:
482 goto out;
483 }
Joe Perches1dc32912008-07-11 15:17:08 -0700484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700485 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000487 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700488 }
Bruce Allan61c25052006-09-27 12:53:54 -0700489out:
490 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700491}
492
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{
495 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
504}
505
Joe Perches64798842008-07-11 15:17:02 -0700506void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000508 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000510 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Auke Kok1314bbf2006-09-27 12:54:02 -0700512
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000513 /* disable receives in the hardware */
514 rctl = er32(RCTL);
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
517
Jesse Brandeburg51851072009-09-25 12:17:01 +0000518 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000519
520 /* disable transmits in the hardware */
521 tctl = er32(TCTL);
522 tctl &= ~E1000_TCTL_EN;
523 ew32(TCTL, tctl);
524 /* flush both disables and wait for them to finish */
525 E1000_WRITE_FLUSH();
526 msleep(10);
527
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700528 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800531
Anupam Chandaab088532010-11-21 09:54:21 -0800532 /*
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000535 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800536 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000537 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542
543 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547
stephen hemminger38df7a32010-10-21 07:50:57 +0000548static void e1000_reinit_safe(struct e1000_adapter *adapter)
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000549{
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551 msleep(1);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000552 mutex_lock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000553 e1000_down(adapter);
554 e1000_up(adapter);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +0000555 mutex_unlock(&adapter->mutex);
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000556 clear_bit(__E1000_RESETTING, &adapter->flags);
557}
558
Joe Perches64798842008-07-11 15:17:02 -0700559void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700560{
Jesse Brandeburg338c15e2010-09-22 18:22:42 +0000561 /* if rtnl_lock is not held the call path is bogus */
562 ASSERT_RTNL();
Auke Kok2db10a02006-06-27 09:06:28 -0700563 WARN_ON(in_interrupt());
564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
565 msleep(1);
566 e1000_down(adapter);
567 e1000_up(adapter);
568 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
Joe Perches64798842008-07-11 15:17:02 -0700571void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Joe Perches1dc32912008-07-11 15:17:08 -0700573 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700575 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000576 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /* Repartition Pba for greater than 9k mtu
579 * To take effect CTRL.RST is required.
580 */
581
Joe Perches1dc32912008-07-11 15:17:08 -0700582 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
585 case e1000_82543:
586 case e1000_82544:
587 case e1000_82540:
588 case e1000_82541:
589 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700590 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100591 pba = E1000_PBA_48K;
592 break;
593 case e1000_82545:
594 case e1000_82545_rev_3:
595 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000596 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100597 case e1000_82546_rev_3:
598 pba = E1000_PBA_48K;
599 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700600 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700601 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700602 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700603 pba = E1000_PBA_30K;
604 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100605 case e1000_undefined:
606 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700607 break;
608 }
609
Joe Perchesc3033b02008-03-21 11:06:25 -0700610 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000611 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100612 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700613
Joe Perches1dc32912008-07-11 15:17:08 -0700614 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100615 adapter->tx_fifo_head = 0;
616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617 adapter->tx_fifo_size =
618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619 atomic_set(&adapter->tx_fifo_stall, 0);
620 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100622 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700623 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700624
Bruce Allan018ea442006-12-15 10:39:45 +0100625 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000626 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100627 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000628 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100629 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */
Joe Perches1dc32912008-07-11 15:17:08 -0700631 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100632 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */
635 pba &= 0xffff;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000636 /*
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it
639 */
640 min_tx_space = (hw->max_frame_size +
641 sizeof(struct e1000_tx_desc) -
642 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700643 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100644 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000645 /* software strips receive CRC, so leave room for it */
646 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700647 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100648 min_rx_space >>= 10;
649
650 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
656
657 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700658 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100659 case e1000_82545 ... e1000_82546_rev_3:
660 pba &= ~(E1000_PBA_8K - 1);
661 break;
662 default:
663 break;
664 }
665
666 /* if short on rx space, rx wins and must trump tx
667 * adjustment or use Early Receive if available */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000668 if (pba < min_rx_space)
669 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700672
Joe Perches1dc32912008-07-11 15:17:08 -0700673 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000675 /*
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of:
680 * - 90% of the Rx FIFO size, and
681 * - the full Rx FIFO size minus the early receive size (for parts
682 * with ERT support assuming ERT set to E1000_ERT_2048), or
683 * - the full Rx FIFO size minus one full frame
684 */
685 hwm = min(((pba << 10) * 9 / 10),
686 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800687
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000690 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700691 hw->fc_send_xon = 1;
692 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700694 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700695 e1000_reset_hw(hw);
696 if (hw->mac_type >= e1000_82544)
697 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700698
Joe Perches1dc32912008-07-11 15:17:08 -0700699 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700700 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700701 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100702
703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700704 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700705 hw->autoneg == 1 &&
706 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100708 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700712 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100713 }
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Joe Perches1dc32912008-07-11 15:17:08 -0700718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700720
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500721 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
724/**
Auke Kok67b3c272007-12-17 13:50:23 -0800725 * Dump the eeprom for users having checksum issues
726 **/
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800727static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800728{
729 struct net_device *netdev = adapter->netdev;
730 struct ethtool_eeprom eeprom;
731 const struct ethtool_ops *ops = netdev->ethtool_ops;
732 u8 *data;
733 int i;
734 u16 csum_old, csum_new = 0;
735
736 eeprom.len = ops->get_eeprom_len(netdev);
737 eeprom.offset = 0;
738
739 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000740 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800741 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800742
743 ops->get_eeprom(netdev, &eeprom, data);
744
745 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
746 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
747 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
748 csum_new += data[i] + (data[i + 1] << 8);
749 csum_new = EEPROM_SUM - csum_new;
750
Emil Tantilov675ad472010-04-27 14:02:58 +0000751 pr_err("/*********************/\n");
752 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
753 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800754
Emil Tantilov675ad472010-04-27 14:02:58 +0000755 pr_err("Offset Values\n");
756 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800757 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
758
Emil Tantilov675ad472010-04-27 14:02:58 +0000759 pr_err("Include this output when contacting your support provider.\n");
760 pr_err("This is not a software error! Something bad happened to\n");
761 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
762 pr_err("result in further problems, possibly loss of data,\n");
763 pr_err("corruption or system hangs!\n");
764 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
765 pr_err("which is invalid and requires you to set the proper MAC\n");
766 pr_err("address manually before continuing to enable this network\n");
767 pr_err("device. Please inspect the EEPROM dump and report the\n");
768 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
769 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800770
771 kfree(data);
772}
773
774/**
Taku Izumi81250292008-07-11 15:17:44 -0700775 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
776 * @pdev: PCI device information struct
777 *
778 * Return true if an adapter needs ioport resources
779 **/
780static int e1000_is_need_ioport(struct pci_dev *pdev)
781{
782 switch (pdev->device) {
783 case E1000_DEV_ID_82540EM:
784 case E1000_DEV_ID_82540EM_LOM:
785 case E1000_DEV_ID_82540EP:
786 case E1000_DEV_ID_82540EP_LOM:
787 case E1000_DEV_ID_82540EP_LP:
788 case E1000_DEV_ID_82541EI:
789 case E1000_DEV_ID_82541EI_MOBILE:
790 case E1000_DEV_ID_82541ER:
791 case E1000_DEV_ID_82541ER_LOM:
792 case E1000_DEV_ID_82541GI:
793 case E1000_DEV_ID_82541GI_LF:
794 case E1000_DEV_ID_82541GI_MOBILE:
795 case E1000_DEV_ID_82544EI_COPPER:
796 case E1000_DEV_ID_82544EI_FIBER:
797 case E1000_DEV_ID_82544GC_COPPER:
798 case E1000_DEV_ID_82544GC_LOM:
799 case E1000_DEV_ID_82545EM_COPPER:
800 case E1000_DEV_ID_82545EM_FIBER:
801 case E1000_DEV_ID_82546EB_COPPER:
802 case E1000_DEV_ID_82546EB_FIBER:
803 case E1000_DEV_ID_82546EB_QUAD_COPPER:
804 return true;
805 default:
806 return false;
807 }
808}
809
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000810static netdev_features_t e1000_fix_features(struct net_device *netdev,
811 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000812{
813 /*
814 * Since there is no support for separate rx/tx vlan accel
815 * enable/disable make sure tx flag is always in same state as rx.
816 */
817 if (features & NETIF_F_HW_VLAN_RX)
818 features |= NETIF_F_HW_VLAN_TX;
819 else
820 features &= ~NETIF_F_HW_VLAN_TX;
821
822 return features;
823}
824
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000825static int e1000_set_features(struct net_device *netdev,
826 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000827{
828 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000829 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000830
Jiri Pirko5622e402011-07-21 03:26:31 +0000831 if (changed & NETIF_F_HW_VLAN_RX)
832 e1000_vlan_mode(netdev, features);
833
Michał Mirosławe97d3202011-06-08 08:36:42 +0000834 if (!(changed & NETIF_F_RXCSUM))
835 return 0;
836
837 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
838
839 if (netif_running(netdev))
840 e1000_reinit_locked(adapter);
841 else
842 e1000_reset(adapter);
843
844 return 0;
845}
846
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800847static const struct net_device_ops e1000_netdev_ops = {
848 .ndo_open = e1000_open,
849 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800850 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800851 .ndo_get_stats = e1000_get_stats,
852 .ndo_set_rx_mode = e1000_set_rx_mode,
853 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000854 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800855 .ndo_change_mtu = e1000_change_mtu,
856 .ndo_do_ioctl = e1000_ioctl,
857 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800858 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
859 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
860#ifdef CONFIG_NET_POLL_CONTROLLER
861 .ndo_poll_controller = e1000_netpoll,
862#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000863 .ndo_fix_features = e1000_fix_features,
864 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800865};
866
Taku Izumi81250292008-07-11 15:17:44 -0700867/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000868 * e1000_init_hw_struct - initialize members of hw struct
869 * @adapter: board private struct
870 * @hw: structure used by e1000_hw.c
871 *
872 * Factors out initialization of the e1000_hw struct to its own function
873 * that can be called very early at init (just after struct allocation).
874 * Fields are initialized based on PCI device information and
875 * OS network device settings (MTU size).
876 * Returns negative error codes if MAC type setup fails.
877 */
878static int e1000_init_hw_struct(struct e1000_adapter *adapter,
879 struct e1000_hw *hw)
880{
881 struct pci_dev *pdev = adapter->pdev;
882
883 /* PCI config space info */
884 hw->vendor_id = pdev->vendor;
885 hw->device_id = pdev->device;
886 hw->subsystem_vendor_id = pdev->subsystem_vendor;
887 hw->subsystem_id = pdev->subsystem_device;
888 hw->revision_id = pdev->revision;
889
890 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
891
892 hw->max_frame_size = adapter->netdev->mtu +
893 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
894 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
895
896 /* identify the MAC */
897 if (e1000_set_mac_type(hw)) {
898 e_err(probe, "Unknown MAC Type\n");
899 return -EIO;
900 }
901
902 switch (hw->mac_type) {
903 default:
904 break;
905 case e1000_82541:
906 case e1000_82547:
907 case e1000_82541_rev_2:
908 case e1000_82547_rev_2:
909 hw->phy_init_script = 1;
910 break;
911 }
912
913 e1000_set_media_type(hw);
914 e1000_get_bus_info(hw);
915
916 hw->wait_autoneg_complete = false;
917 hw->tbi_compatibility_en = true;
918 hw->adaptive_ifs = true;
919
920 /* Copper options */
921
922 if (hw->media_type == e1000_media_type_copper) {
923 hw->mdix = AUTO_ALL_MODES;
924 hw->disable_polarity_correction = false;
925 hw->master_slave = E1000_MASTER_SLAVE;
926 }
927
928 return 0;
929}
930
931/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 * e1000_probe - Device Initialization Routine
933 * @pdev: PCI device information struct
934 * @ent: entry in e1000_pci_tbl
935 *
936 * Returns 0 on success, negative on failure
937 *
938 * e1000_probe initializes an adapter identified by a pci_dev structure.
939 * The OS initialization, configuring of the adapter private structure,
940 * and a hardware reset occur.
941 **/
Joe Perches1dc32912008-07-11 15:17:08 -0700942static int __devinit e1000_probe(struct pci_dev *pdev,
943 const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944{
945 struct net_device *netdev;
946 struct e1000_adapter *adapter;
Joe Perches1dc32912008-07-11 15:17:08 -0700947 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 static int cards_found = 0;
Jesse Brandeburg120cd572006-08-31 14:27:46 -0700950 static int global_quad_port_a = 0; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700951 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700952 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000953 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700954 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700955 int bars, need_ioport;
Joe Perches0795af52007-10-03 17:59:30 -0700956
Taku Izumi81250292008-07-11 15:17:44 -0700957 /* do not allocate ioport bars when not needed */
958 need_ioport = e1000_is_need_ioport(pdev);
959 if (need_ioport) {
960 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
961 err = pci_enable_device(pdev);
962 } else {
963 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800964 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700965 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700966 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 return err;
968
Taku Izumi81250292008-07-11 15:17:44 -0700969 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700970 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700971 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972
973 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000974 err = pci_save_state(pdev);
975 if (err)
976 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700978 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700980 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 SET_NETDEV_DEV(netdev, &pdev->dev);
984
985 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700986 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 adapter->netdev = netdev;
988 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000989 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700990 adapter->bars = bars;
991 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
Joe Perches1dc32912008-07-11 15:17:08 -0700993 hw = &adapter->hw;
994 hw->back = adapter;
995
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700996 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -0700997 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -0700998 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
Taku Izumi81250292008-07-11 15:17:44 -07001001 if (adapter->need_ioport) {
1002 for (i = BAR_1; i <= BAR_5; i++) {
1003 if (pci_resource_len(pdev, i) == 0)
1004 continue;
1005 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1006 hw->io_base = pci_resource_start(pdev, i);
1007 break;
1008 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 }
1010 }
1011
Jesse Brandeburge508be12010-09-07 21:01:12 +00001012 /* make ready for any if (hw->...) below */
1013 err = e1000_init_hw_struct(adapter, hw);
1014 if (err)
1015 goto err_sw_init;
1016
1017 /*
1018 * there is a workaround being applied below that limits
1019 * 64-bit DMA addresses to 64-bit hardware. There are some
1020 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1021 */
1022 pci_using_dac = 0;
1023 if ((hw->bus_type == e1000_bus_type_pcix) &&
1024 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1025 /*
1026 * according to DMA-API-HOWTO, coherent calls will always
1027 * succeed if the set call did
1028 */
1029 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1030 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001031 } else {
Dean Nelson19a0b672010-11-11 05:50:25 +00001032 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1033 if (err) {
1034 pr_err("No usable DMA config, aborting\n");
1035 goto err_dma;
1036 }
1037 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
Jesse Brandeburge508be12010-09-07 21:01:12 +00001038 }
1039
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001040 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001043 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001044
Auke Kok0eb5a342006-09-27 12:53:17 -07001045 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 adapter->bd_number = cards_found;
1048
1049 /* setup the private structure */
1050
Joe Perchesc7be73b2008-07-11 15:17:28 -07001051 err = e1000_sw_init(adapter);
1052 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 goto err_sw_init;
1054
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001055 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001056 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001057 hw->ce4100_gbe_mdio_base_virt =
1058 ioremap(pci_resource_start(pdev, BAR_1),
Dirk Brandewie5377a412011-01-06 14:29:54 +00001059 pci_resource_len(pdev, BAR_1));
1060
Florian Fainelli13acde82012-01-04 20:23:35 +00001061 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001062 goto err_mdio_ioremap;
1063 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001064
Joe Perches1dc32912008-07-11 15:17:08 -07001065 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001066 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001067 NETIF_F_HW_CSUM |
1068 NETIF_F_HW_VLAN_RX;
Michał Mirosławe97d3202011-06-08 08:36:42 +00001069 netdev->features = NETIF_F_HW_VLAN_TX |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 NETIF_F_HW_VLAN_FILTER;
1071 }
1072
Joe Perches1dc32912008-07-11 15:17:08 -07001073 if ((hw->mac_type >= e1000_82544) &&
1074 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001075 netdev->hw_features |= NETIF_F_TSO;
1076
Ben Greear11a78dc2012-02-11 15:40:01 +00001077 netdev->priv_flags |= IFF_SUPP_NOFCS;
1078
Michał Mirosławe97d3202011-06-08 08:36:42 +00001079 netdev->features |= netdev->hw_features;
1080 netdev->hw_features |= NETIF_F_RXCSUM;
Ben Greearb0d15622012-02-11 15:40:11 +00001081 netdev->hw_features |= NETIF_F_RXFCS;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001082
Yi Zou7b872a52010-09-22 17:57:58 +00001083 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001085 netdev->vlan_features |= NETIF_F_HIGHDMA;
1086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Patrick McHardy20501a62008-10-11 12:25:59 -07001088 netdev->vlan_features |= NETIF_F_TSO;
Patrick McHardy20501a62008-10-11 12:25:59 -07001089 netdev->vlan_features |= NETIF_F_HW_CSUM;
1090 netdev->vlan_features |= NETIF_F_SG;
1091
Jiri Pirko01789342011-08-16 06:29:00 +00001092 netdev->priv_flags |= IFF_UNICAST_FLT;
1093
Joe Perches1dc32912008-07-11 15:17:08 -07001094 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001095
Auke Kokcd94dd02006-06-27 09:08:22 -07001096 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001097 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001098 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001099 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001100 }
1101
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001102 /* before reading the EEPROM, reset the controller to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 * put the device in a known good starting state */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001104
Joe Perches1dc32912008-07-11 15:17:08 -07001105 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001108 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001109 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001110 e1000_dump_eeprom(adapter);
1111 /*
1112 * set MAC address to all zeroes to invalidate and temporary
1113 * disable this device for the user. This blocks regular
1114 * traffic while still permitting ethtool ioctls from reaching
1115 * the hardware as well as allowing the user to run the
1116 * interface after manually setting a hw addr using
1117 * `ip set address`
1118 */
Joe Perches1dc32912008-07-11 15:17:08 -07001119 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001120 } else {
1121 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001122 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001123 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124 }
Auke Kok67b3c272007-12-17 13:50:23 -08001125 /* don't block initalization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001126 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1127 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Auke Kok67b3c272007-12-17 13:50:23 -08001129 if (!is_valid_ether_addr(netdev->perm_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001130 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001133 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1134 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1135 e1000_82547_tx_fifo_stall_task);
1136 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001137 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 e1000_check_options(adapter);
1140
1141 /* Initial Wake on LAN setting
1142 * If APM wake is enabled in the EEPROM,
1143 * enable the ACPI Magic Packet filter
1144 */
1145
Joe Perches1dc32912008-07-11 15:17:08 -07001146 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 case e1000_82542_rev2_0:
1148 case e1000_82542_rev2_1:
1149 case e1000_82543:
1150 break;
1151 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001152 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155 break;
1156 case e1000_82546:
1157 case e1000_82546_rev_3:
Joe Perches1dc32912008-07-11 15:17:08 -07001158 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1159 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1161 break;
1162 }
1163 /* Fall Through */
1164 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001165 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1167 break;
1168 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001169 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001170 adapter->eeprom_wol |= E1000_WUFC_MAG;
1171
1172 /* now that we have the eeprom settings, apply the special cases
1173 * where the eeprom may be wrong or the board simply won't support
1174 * wake on lan on a particular port */
1175 switch (pdev->device) {
1176 case E1000_DEV_ID_82546GB_PCIE:
1177 adapter->eeprom_wol = 0;
1178 break;
1179 case E1000_DEV_ID_82546EB_FIBER:
1180 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001181 /* Wake events only supported on port A for dual fiber
1182 * regardless of eeprom setting */
Joe Perches1dc32912008-07-11 15:17:08 -07001183 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001184 adapter->eeprom_wol = 0;
1185 break;
1186 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1187 /* if quad port adapter, disable WoL on all but port A */
1188 if (global_quad_port_a != 0)
1189 adapter->eeprom_wol = 0;
1190 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001191 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001192 /* Reset for multiple quad port adapters */
1193 if (++global_quad_port_a == 4)
1194 global_quad_port_a = 0;
1195 break;
1196 }
1197
1198 /* initialize the wol settings based on the eeprom settings */
1199 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001200 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Dirk Brandewie5377a412011-01-06 14:29:54 +00001202 /* Auto detect PHY address */
1203 if (hw->mac_type == e1000_ce4100) {
1204 for (i = 0; i < 32; i++) {
1205 hw->phy_addr = i;
1206 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1207 if (tmp == 0 || tmp == 0xFF) {
1208 if (i == 31)
1209 goto err_eeprom;
1210 continue;
1211 } else
1212 break;
1213 }
1214 }
1215
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 /* reset the hardware with the new settings */
1217 e1000_reset(adapter);
1218
Auke Kok416b5d12007-06-01 10:22:39 -07001219 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001220 err = register_netdev(netdev);
1221 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001222 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001223
Jiri Pirko52f55092012-03-20 18:10:01 +00001224 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001225
Emil Tantilov675ad472010-04-27 14:02:58 +00001226 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001227 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001228 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1229 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1230 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1231 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1232 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1233 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1234 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001235
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001236 /* carrier off reporting is important to ethtool even BEFORE open */
1237 netif_carrier_off(netdev);
1238
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001239 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 cards_found++;
1242 return 0;
1243
1244err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001245err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001246 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001247
Joe Perches1dc32912008-07-11 15:17:08 -07001248 if (hw->flash_address)
1249 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001250 kfree(adapter->tx_ring);
1251 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001252err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001254err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001255 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001256 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257err_ioremap:
1258 free_netdev(netdev);
1259err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001260 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001261err_pci_reg:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001262 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 return err;
1264}
1265
1266/**
1267 * e1000_remove - Device Removal Routine
1268 * @pdev: PCI device information struct
1269 *
1270 * e1000_remove is called by the PCI subsystem to alert the driver
1271 * that it should release a PCI device. The could be caused by a
1272 * Hot-Plug event, or because the driver is going to be removed from
1273 * memory.
1274 **/
1275
Joe Perches64798842008-07-11 15:17:02 -07001276static void __devexit e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277{
1278 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001279 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001280 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001282 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001283 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001285 unregister_netdev(netdev);
1286
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001287 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001289 kfree(adapter->tx_ring);
1290 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001291
Florian Fainelli1c267502012-01-04 20:23:34 +00001292 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001293 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001294 iounmap(hw->hw_addr);
1295 if (hw->flash_address)
1296 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001297 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 free_netdev(netdev);
1300
1301 pci_disable_device(pdev);
1302}
1303
1304/**
1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1306 * @adapter: board private structure to initialize
1307 *
1308 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001309 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 **/
1311
Joe Perches64798842008-07-11 15:17:02 -07001312static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313{
Auke Kokeb0f8052006-07-14 16:14:48 -07001314 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001316 adapter->num_tx_queues = 1;
1317 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001318
1319 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001320 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001321 return -ENOMEM;
1322 }
1323
Herbert Xu47313052007-05-29 15:07:31 -07001324 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001325 e1000_irq_disable(adapter);
1326
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 spin_lock_init(&adapter->stats_lock);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00001328 mutex_init(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Auke Kok1314bbf2006-09-27 12:54:02 -07001330 set_bit(__E1000_DOWN, &adapter->flags);
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return 0;
1333}
1334
1335/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001336 * e1000_alloc_queues - Allocate memory for all rings
1337 * @adapter: board private structure to initialize
1338 *
1339 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001340 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001341 **/
1342
Joe Perches64798842008-07-11 15:17:02 -07001343static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001344{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001345 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1346 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001347 if (!adapter->tx_ring)
1348 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001349
Yan Burman1c7e5b12007-03-06 08:58:04 -08001350 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1351 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001352 if (!adapter->rx_ring) {
1353 kfree(adapter->tx_ring);
1354 return -ENOMEM;
1355 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001356
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001357 return E1000_SUCCESS;
1358}
1359
1360/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 * e1000_open - Called when a network interface is made active
1362 * @netdev: network interface device structure
1363 *
1364 * Returns 0 on success, negative value on failure
1365 *
1366 * The open entry point is called when a network interface is made
1367 * active by the system (IFF_UP). At this point all resources needed
1368 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001369 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 * and the stack is notified that the interface is ready.
1371 **/
1372
Joe Perches64798842008-07-11 15:17:02 -07001373static int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001375 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001376 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 int err;
1378
Auke Kok2db10a02006-06-27 09:06:28 -07001379 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001380 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001381 return -EBUSY;
1382
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001383 netif_carrier_off(netdev);
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001386 err = e1000_setup_all_tx_resources(adapter);
1387 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 goto err_setup_tx;
1389
1390 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001391 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001392 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001393 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001394
Auke Kok79f05bf2006-06-27 09:06:32 -07001395 e1000_power_up_phy(adapter);
1396
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001397 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001398 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001399 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1400 e1000_update_mng_vlan(adapter);
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Auke Koke0aac5a2007-03-06 08:57:21 -08001403 /* before we allocate an interrupt, we must be ready to handle it.
1404 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1405 * as soon as we call pci_request_irq, so we have to setup our
1406 * clean_rx handler before we do so. */
1407 e1000_configure(adapter);
1408
1409 err = e1000_request_irq(adapter);
1410 if (err)
1411 goto err_req_irq;
1412
1413 /* From here on the code is the same as e1000_up() */
1414 clear_bit(__E1000_DOWN, &adapter->flags);
1415
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001416 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001417
Auke Koke0aac5a2007-03-06 08:57:21 -08001418 e1000_irq_enable(adapter);
1419
Ben Hutchings076152d2008-07-18 17:50:57 -07001420 netif_start_queue(netdev);
1421
Auke Koke0aac5a2007-03-06 08:57:21 -08001422 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001423 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return E1000_SUCCESS;
1426
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001427err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001428 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001429 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001431 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432err_setup_tx:
1433 e1000_reset(adapter);
1434
1435 return err;
1436}
1437
1438/**
1439 * e1000_close - Disables a network interface
1440 * @netdev: network interface device structure
1441 *
1442 * Returns 0, this is not allowed to fail
1443 *
1444 * The close entry point is called when an interface is de-activated
1445 * by the OS. The hardware is still under the drivers control, but
1446 * needs to be disabled. A global MAC reset is issued to stop the
1447 * hardware, and all transmit and receive resources are freed.
1448 **/
1449
Joe Perches64798842008-07-11 15:17:02 -07001450static int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001452 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001453 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Auke Kok2db10a02006-06-27 09:06:28 -07001455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001457 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001458 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001460 e1000_free_all_tx_resources(adapter);
1461 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Bruce Allan46665602006-09-27 12:54:08 -07001463 /* kill manageability vlan ID if supported, but not if a vlan with
1464 * the same ID is registered on the host OS (let 8021q kill it) */
Joe Perches1dc32912008-07-11 15:17:08 -07001465 if ((hw->mng_cookie.status &
Bruce Allan46665602006-09-27 12:54:08 -07001466 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
Jiri Pirko5622e402011-07-21 03:26:31 +00001467 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001468 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1469 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return 0;
1472}
1473
1474/**
1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1476 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001477 * @start: address of beginning of memory
1478 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 **/
Joe Perches64798842008-07-11 15:17:02 -07001480static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1481 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482{
Joe Perches1dc32912008-07-11 15:17:08 -07001483 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001484 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 unsigned long end = begin + len;
1486
Malli Chilakala26483452005-04-28 19:44:46 -07001487 /* First rev 82545 and 82546 need to not allow any memory
1488 * write location to cross 64k boundary due to errata 23 */
Joe Perches1dc32912008-07-11 15:17:08 -07001489 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001490 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001491 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001492 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 }
1494
Joe Perchesc3033b02008-03-21 11:06:25 -07001495 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496}
1497
1498/**
1499 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1500 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001501 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 *
1503 * Return 0 on success, negative on failure
1504 **/
1505
Joe Perches64798842008-07-11 15:17:02 -07001506static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 struct pci_dev *pdev = adapter->pdev;
1510 int size;
1511
1512 size = sizeof(struct e1000_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001513 txdr->buffer_info = vzalloc(size);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001514 if (!txdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001515 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1516 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENOMEM;
1518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 /* round up to nearest 4K */
1521
1522 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001523 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001525 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1526 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001527 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 vfree(txdr->buffer_info);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001530 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1531 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return -ENOMEM;
1533 }
1534
Malli Chilakala26483452005-04-28 19:44:46 -07001535 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1537 void *olddesc = txdr->desc;
1538 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001539 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001540 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001541 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001542 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1543 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001544 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001545 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 goto setup_tx_desc_die;
1549 }
1550
1551 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1552 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001553 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1554 txdr->dma);
1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1556 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001557 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001558 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 vfree(txdr->buffer_info);
1560 return -ENOMEM;
1561 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001562 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001563 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1564 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 }
1566 }
1567 memset(txdr->desc, 0, txdr->size);
1568
1569 txdr->next_to_use = 0;
1570 txdr->next_to_clean = 0;
1571
1572 return 0;
1573}
1574
1575/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001576 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1577 * (Descriptors) for all queues
1578 * @adapter: board private structure
1579 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001580 * Return 0 on success, negative on failure
1581 **/
1582
Joe Perches64798842008-07-11 15:17:02 -07001583int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001584{
1585 int i, err = 0;
1586
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001587 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001588 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1589 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001590 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001591 for (i-- ; i >= 0; i--)
1592 e1000_free_tx_resources(adapter,
1593 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001594 break;
1595 }
1596 }
1597
1598 return err;
1599}
1600
1601/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1603 * @adapter: board private structure
1604 *
1605 * Configure the Tx unit of the MAC after a reset.
1606 **/
1607
Joe Perches64798842008-07-11 15:17:02 -07001608static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609{
Joe Perches406874a2008-04-03 10:06:32 -07001610 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001611 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001612 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001613 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614
1615 /* Setup the HW Tx Head and Tail descriptor pointers */
1616
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001617 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001618 case 1:
1619 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001620 tdba = adapter->tx_ring[0].dma;
1621 tdlen = adapter->tx_ring[0].count *
1622 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001623 ew32(TDLEN, tdlen);
1624 ew32(TDBAH, (tdba >> 32));
1625 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1626 ew32(TDT, 0);
1627 ew32(TDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001628 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1629 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001630 break;
1631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001634 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001635 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001636 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1637 else
1638 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1639
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001640 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 case e1000_82542_rev2_0:
1642 case e1000_82542_rev2_1:
1643 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001644 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1645 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 break;
1647 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001648 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1649 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1650 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001652 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1653 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001654 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 /* Set the Tx Interrupt Delay register */
1657
Joe Perches1dc32912008-07-11 15:17:08 -07001658 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001659 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001660 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662 /* Program the Transmit Control Register */
1663
Joe Perches1dc32912008-07-11 15:17:08 -07001664 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001666 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1668
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001669 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670
1671 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001672 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1673
1674 /* only set IDE if we are delaying interrupts using the timers */
1675 if (adapter->tx_int_delay)
1676 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001678 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1680 else
1681 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1682
1683 /* Cache if we're 82544 running in PCI-X because we'll
1684 * need this to apply a workaround later in the send path. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001685 if (hw->mac_type == e1000_82544 &&
1686 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001687 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001688
Joe Perches1dc32912008-07-11 15:17:08 -07001689 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691}
1692
1693/**
1694 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1695 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001696 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 *
1698 * Returns 0 on success, negative on failure
1699 **/
1700
Joe Perches64798842008-07-11 15:17:02 -07001701static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1702 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001705 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 size = sizeof(struct e1000_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001708 rxdr->buffer_info = vzalloc(size);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001709 if (!rxdr->buffer_info) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001710 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1711 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 return -ENOMEM;
1713 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001715 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 /* Round up to nearest 4K */
1718
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001719 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001720 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1723 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001725 if (!rxdr->desc) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001726 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1727 "ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 vfree(rxdr->buffer_info);
1730 return -ENOMEM;
1731 }
1732
Malli Chilakala26483452005-04-28 19:44:46 -07001733 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 void *olddesc = rxdr->desc;
1736 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001737 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001738 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001739 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001740 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1741 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001742 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001743 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001746 e_err(probe, "Unable to allocate memory for the Rx "
1747 "descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 goto setup_rx_desc_die;
1749 }
1750
1751 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1752 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001753 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1754 rxdr->dma);
1755 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1756 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001757 e_err(probe, "Unable to allocate aligned memory for "
1758 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001759 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001761 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001762 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1763 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 }
1765 }
1766 memset(rxdr->desc, 0, rxdr->size);
1767
1768 rxdr->next_to_clean = 0;
1769 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001770 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 return 0;
1773}
1774
1775/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001776 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1777 * (Descriptors) for all queues
1778 * @adapter: board private structure
1779 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001780 * Return 0 on success, negative on failure
1781 **/
1782
Joe Perches64798842008-07-11 15:17:02 -07001783int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001784{
1785 int i, err = 0;
1786
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001787 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001788 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1789 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001790 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001791 for (i-- ; i >= 0; i--)
1792 e1000_free_rx_resources(adapter,
1793 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001794 break;
1795 }
1796 }
1797
1798 return err;
1799}
1800
1801/**
Malli Chilakala26483452005-04-28 19:44:46 -07001802 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 * @adapter: Board private structure
1804 **/
Joe Perches64798842008-07-11 15:17:02 -07001805static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Joe Perches1dc32912008-07-11 15:17:08 -07001807 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001808 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Joe Perches1dc32912008-07-11 15:17:08 -07001810 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1813
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001814 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1815 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001816 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
Joe Perches1dc32912008-07-11 15:17:08 -07001818 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 rctl |= E1000_RCTL_SBP;
1820 else
1821 rctl &= ~E1000_RCTL_SBP;
1822
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001823 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1824 rctl &= ~E1000_RCTL_LPE;
1825 else
1826 rctl |= E1000_RCTL_LPE;
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001829 rctl &= ~E1000_RCTL_SZ_4096;
1830 rctl |= E1000_RCTL_BSEX;
1831 switch (adapter->rx_buffer_len) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08001832 case E1000_RXBUFFER_2048:
1833 default:
1834 rctl |= E1000_RCTL_SZ_2048;
1835 rctl &= ~E1000_RCTL_BSEX;
1836 break;
1837 case E1000_RXBUFFER_4096:
1838 rctl |= E1000_RCTL_SZ_4096;
1839 break;
1840 case E1000_RXBUFFER_8192:
1841 rctl |= E1000_RCTL_SZ_8192;
1842 break;
1843 case E1000_RXBUFFER_16384:
1844 rctl |= E1000_RCTL_SZ_16384;
1845 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001846 }
1847
Joe Perches1dc32912008-07-11 15:17:08 -07001848 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849}
1850
1851/**
1852 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1853 * @adapter: board private structure
1854 *
1855 * Configure the Rx unit of the MAC after a reset.
1856 **/
1857
Joe Perches64798842008-07-11 15:17:02 -07001858static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859{
Joe Perches406874a2008-04-03 10:06:32 -07001860 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001861 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001862 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001863
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001864 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1865 rdlen = adapter->rx_ring[0].count *
1866 sizeof(struct e1000_rx_desc);
1867 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1868 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1869 } else {
1870 rdlen = adapter->rx_ring[0].count *
1871 sizeof(struct e1000_rx_desc);
1872 adapter->clean_rx = e1000_clean_rx_irq;
1873 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
1876 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001877 rctl = er32(RCTL);
1878 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
1880 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001881 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001883 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001884 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001885 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001886 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
1888
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001889 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1890 * the Base and Length of the Rx Descriptor Ring */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001891 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001892 case 1:
1893 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001894 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001895 ew32(RDLEN, rdlen);
1896 ew32(RDBAH, (rdba >> 32));
1897 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1898 ew32(RDT, 0);
1899 ew32(RDH, 0);
Auke Kok6a951692006-09-11 14:00:21 -07001900 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1901 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001902 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001903 }
1904
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001906 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001907 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001908 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001909 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001910 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001911 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001912 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001913 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914 }
1915
1916 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001917 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918}
1919
1920/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001921 * e1000_free_tx_resources - Free Tx Resources per Queue
1922 * @adapter: board private structure
1923 * @tx_ring: Tx descriptor ring for a specific queue
1924 *
1925 * Free all transmit software resources
1926 **/
1927
Joe Perches64798842008-07-11 15:17:02 -07001928static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1929 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001930{
1931 struct pci_dev *pdev = adapter->pdev;
1932
1933 e1000_clean_tx_ring(adapter, tx_ring);
1934
1935 vfree(tx_ring->buffer_info);
1936 tx_ring->buffer_info = NULL;
1937
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1939 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001940
1941 tx_ring->desc = NULL;
1942}
1943
1944/**
1945 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 * @adapter: board private structure
1947 *
1948 * Free all transmit software resources
1949 **/
1950
Joe Perches64798842008-07-11 15:17:02 -07001951void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001953 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001955 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001956 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
Joe Perches64798842008-07-11 15:17:02 -07001959static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1960 struct e1000_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961{
Alexander Duyck602c0552009-12-02 16:46:00 +00001962 if (buffer_info->dma) {
1963 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001964 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001966 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001968 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001969 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001970 buffer_info->dma = 0;
1971 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001972 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001974 buffer_info->skb = NULL;
1975 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001976 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001977 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978}
1979
1980/**
1981 * e1000_clean_tx_ring - Free Tx Buffers
1982 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001983 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 **/
1985
Joe Perches64798842008-07-11 15:17:02 -07001986static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1987 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988{
Joe Perches1dc32912008-07-11 15:17:08 -07001989 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 struct e1000_buffer *buffer_info;
1991 unsigned long size;
1992 unsigned int i;
1993
1994 /* Free all the Tx ring sk_buffs */
1995
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001996 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 buffer_info = &tx_ring->buffer_info[i];
1998 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1999 }
2000
2001 size = sizeof(struct e1000_buffer) * tx_ring->count;
2002 memset(tx_ring->buffer_info, 0, size);
2003
2004 /* Zero out the descriptor ring */
2005
2006 memset(tx_ring->desc, 0, tx_ring->size);
2007
2008 tx_ring->next_to_use = 0;
2009 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00002010 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Joe Perches1dc32912008-07-11 15:17:08 -07002012 writel(0, hw->hw_addr + tx_ring->tdh);
2013 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002014}
2015
2016/**
2017 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2018 * @adapter: board private structure
2019 **/
2020
Joe Perches64798842008-07-11 15:17:02 -07002021static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002022{
2023 int i;
2024
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002025 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002026 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028
2029/**
2030 * e1000_free_rx_resources - Free Rx Resources
2031 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002032 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 *
2034 * Free all receive software resources
2035 **/
2036
Joe Perches64798842008-07-11 15:17:02 -07002037static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2038 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 struct pci_dev *pdev = adapter->pdev;
2041
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002042 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
2044 vfree(rx_ring->buffer_info);
2045 rx_ring->buffer_info = NULL;
2046
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002047 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2048 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
2050 rx_ring->desc = NULL;
2051}
2052
2053/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002054 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002056 *
2057 * Free all receive software resources
2058 **/
2059
Joe Perches64798842008-07-11 15:17:02 -07002060void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002061{
2062 int i;
2063
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002064 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002065 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2066}
2067
2068/**
2069 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2070 * @adapter: board private structure
2071 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 **/
2073
Joe Perches64798842008-07-11 15:17:02 -07002074static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2075 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
Joe Perches1dc32912008-07-11 15:17:08 -07002077 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 struct e1000_buffer *buffer_info;
2079 struct pci_dev *pdev = adapter->pdev;
2080 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002081 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
2083 /* Free all the Rx ring sk_buffs */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002084 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 buffer_info = &rx_ring->buffer_info[i];
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002086 if (buffer_info->dma &&
2087 adapter->clean_rx == e1000_clean_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002088 dma_unmap_single(&pdev->dev, buffer_info->dma,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002089 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002090 DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002091 } else if (buffer_info->dma &&
2092 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002093 dma_unmap_page(&pdev->dev, buffer_info->dma,
2094 buffer_info->length,
2095 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002096 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002098 buffer_info->dma = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002099 if (buffer_info->page) {
2100 put_page(buffer_info->page);
2101 buffer_info->page = NULL;
2102 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002103 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 dev_kfree_skb(buffer_info->skb);
2105 buffer_info->skb = NULL;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08002106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 }
2108
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002109 /* there also may be some cached data from a chained receive */
2110 if (rx_ring->rx_skb_top) {
2111 dev_kfree_skb(rx_ring->rx_skb_top);
2112 rx_ring->rx_skb_top = NULL;
2113 }
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 size = sizeof(struct e1000_buffer) * rx_ring->count;
2116 memset(rx_ring->buffer_info, 0, size);
2117
2118 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 memset(rx_ring->desc, 0, rx_ring->size);
2120
2121 rx_ring->next_to_clean = 0;
2122 rx_ring->next_to_use = 0;
2123
Joe Perches1dc32912008-07-11 15:17:08 -07002124 writel(0, hw->hw_addr + rx_ring->rdh);
2125 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002126}
2127
2128/**
2129 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2130 * @adapter: board private structure
2131 **/
2132
Joe Perches64798842008-07-11 15:17:02 -07002133static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002134{
2135 int i;
2136
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002137 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002138 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139}
2140
2141/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2142 * and memory write and invalidate disabled for certain operations
2143 */
Joe Perches64798842008-07-11 15:17:02 -07002144static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
Joe Perches1dc32912008-07-11 15:17:08 -07002146 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002148 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
Joe Perches1dc32912008-07-11 15:17:08 -07002150 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
Joe Perches1dc32912008-07-11 15:17:08 -07002152 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002154 ew32(RCTL, rctl);
2155 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 mdelay(5);
2157
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002158 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002159 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160}
2161
Joe Perches64798842008-07-11 15:17:02 -07002162static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163{
Joe Perches1dc32912008-07-11 15:17:08 -07002164 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002166 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Joe Perches1dc32912008-07-11 15:17:08 -07002168 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002170 ew32(RCTL, rctl);
2171 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 mdelay(5);
2173
Joe Perches1dc32912008-07-11 15:17:08 -07002174 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2175 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002177 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002178 /* No need to loop, because 82542 supports only 1 queue */
2179 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002180 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002181 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 }
2183}
2184
2185/**
2186 * e1000_set_mac - Change the Ethernet Address of the NIC
2187 * @netdev: network interface device structure
2188 * @p: pointer to an address structure
2189 *
2190 * Returns 0 on success, negative on failure
2191 **/
2192
Joe Perches64798842008-07-11 15:17:02 -07002193static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002195 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002196 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 struct sockaddr *addr = p;
2198
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002199 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 return -EADDRNOTAVAIL;
2201
2202 /* 82542 2.0 needs to be in reset to write receive address registers */
2203
Joe Perches1dc32912008-07-11 15:17:08 -07002204 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 e1000_enter_82542_rst(adapter);
2206
2207 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002208 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Joe Perches1dc32912008-07-11 15:17:08 -07002210 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Joe Perches1dc32912008-07-11 15:17:08 -07002212 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 e1000_leave_82542_rst(adapter);
2214
2215 return 0;
2216}
2217
2218/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002219 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 * @netdev: network interface device structure
2221 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002222 * The set_rx_mode entry point is called whenever the unicast or multicast
2223 * address lists or the network interface flags are updated. This routine is
2224 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 * promiscuous mode, and all-multi behavior.
2226 **/
2227
Joe Perches64798842008-07-11 15:17:02 -07002228static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002230 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002232 struct netdev_hw_addr *ha;
2233 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002234 u32 rctl;
2235 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002236 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002237 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002238 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2239
2240 if (!mcarray) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002241 e_err(probe, "memory allocation failed\n");
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002242 return;
2243 }
Auke Kokcd94dd02006-06-27 09:08:22 -07002244
Malli Chilakala26483452005-04-28 19:44:46 -07002245 /* Check for Promiscuous and All Multicast modes */
2246
Joe Perches1dc32912008-07-11 15:17:08 -07002247 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002249 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002251 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002253 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002254 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002255 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002256 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002257 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002258 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002259 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002260 }
2261
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002262 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002263 rctl |= E1000_RCTL_UPE;
2264 } else if (!(netdev->flags & IFF_PROMISC)) {
2265 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002266 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
2268
Joe Perches1dc32912008-07-11 15:17:08 -07002269 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270
2271 /* 82542 2.0 needs to be in reset to write receive address registers */
2272
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002273 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 e1000_enter_82542_rst(adapter);
2275
Patrick McHardydb0ce502007-11-13 20:54:59 -08002276 /* load the first 14 addresses into the exact filters 1-14. Unicast
2277 * addresses take precedence to avoid disabling unicast filtering
2278 * when possible.
2279 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002280 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 * if there are not 14 addresses, go ahead and clear the filters
2282 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002283 i = 1;
2284 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002285 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002286 if (i == rar_entries)
2287 break;
2288 e1000_rar_set(hw, ha->addr, i++);
2289 }
2290
Jiri Pirko22bedad32010-04-01 21:22:57 +00002291 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002292 if (i == rar_entries) {
2293 /* load any remaining addresses into the hash table */
2294 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002295 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002296 hash_reg = (hash_value >> 5) & 0x7F;
2297 hash_bit = hash_value & 0x1F;
2298 mta = (1 << hash_bit);
2299 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002300 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002301 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 }
2303 }
2304
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002305 for (; i < rar_entries; i++) {
2306 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2307 E1000_WRITE_FLUSH();
2308 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2309 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 }
2311
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002312 /* write the hash table completely, write from bottom to avoid
2313 * both stupid write combining chipsets, and flushing each write */
2314 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2315 /*
2316 * If we are on an 82544 has an errata where writing odd
2317 * offsets overwrites the previous even offset, but writing
2318 * backwards over the range solves the issue by always
2319 * writing the odd offset first
2320 */
2321 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2322 }
2323 E1000_WRITE_FLUSH();
2324
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002325 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002327
2328 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329}
2330
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002331/**
2332 * e1000_update_phy_info_task - get phy info
2333 * @work: work struct contained inside adapter struct
2334 *
2335 * Need to wait a few seconds after link up to get diagnostic information from
2336 * the phy
2337 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002338static void e1000_update_phy_info_task(struct work_struct *work)
2339{
2340 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002341 struct e1000_adapter,
2342 phy_info_task.work);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002343 if (test_bit(__E1000_DOWN, &adapter->flags))
2344 return;
2345 mutex_lock(&adapter->mutex);
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002346 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002347 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348}
2349
2350/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002351 * e1000_82547_tx_fifo_stall_task - task to complete work
2352 * @work: work struct contained inside adapter struct
2353 **/
2354static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2355{
2356 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002357 struct e1000_adapter,
2358 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002359 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002361 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002363 if (test_bit(__E1000_DOWN, &adapter->flags))
2364 return;
2365 mutex_lock(&adapter->mutex);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002366 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002367 if ((er32(TDT) == er32(TDH)) &&
2368 (er32(TDFT) == er32(TDFH)) &&
2369 (er32(TDFTS) == er32(TDFHS))) {
2370 tctl = er32(TCTL);
2371 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2372 ew32(TDFT, adapter->tx_head_addr);
2373 ew32(TDFH, adapter->tx_head_addr);
2374 ew32(TDFTS, adapter->tx_head_addr);
2375 ew32(TDFHS, adapter->tx_head_addr);
2376 ew32(TCTL, tctl);
2377 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
2379 adapter->tx_fifo_head = 0;
2380 atomic_set(&adapter->tx_fifo_stall, 0);
2381 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002382 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002383 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 }
2385 }
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002386 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387}
2388
Nick Nunleyb5481922010-02-03 14:49:28 +00002389bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002390{
2391 struct e1000_hw *hw = &adapter->hw;
2392 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002393
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002394 /* get_link_status is set on LSC (link status) interrupt or rx
2395 * sequence error interrupt (except on intel ce4100).
2396 * get_link_status will stay false until the
2397 * e1000_check_for_link establishes link for copper adapters
2398 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002399 */
2400 switch (hw->media_type) {
2401 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002402 if (hw->mac_type == e1000_ce4100)
2403 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002404 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002405 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002406 link_active = !hw->get_link_status;
2407 } else {
2408 link_active = true;
2409 }
2410 break;
2411 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002412 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002413 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2414 break;
2415 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002416 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002417 link_active = hw->serdes_has_link;
2418 break;
2419 default:
2420 break;
2421 }
2422
2423 return link_active;
2424}
2425
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002427 * e1000_watchdog - work function
2428 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002430static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002432 struct e1000_adapter *adapter = container_of(work,
2433 struct e1000_adapter,
2434 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002435 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002437 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002438 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002440 if (test_bit(__E1000_DOWN, &adapter->flags))
2441 return;
2442
2443 mutex_lock(&adapter->mutex);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002444 link = e1000_has_link(adapter);
2445 if ((netif_carrier_ok(netdev)) && link)
2446 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002448 if (link) {
2449 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002450 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002451 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002452 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002453 e1000_get_speed_and_duplex(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 &adapter->link_speed,
2455 &adapter->link_duplex);
2456
Joe Perches1dc32912008-07-11 15:17:08 -07002457 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002458 pr_info("%s NIC Link is Up %d Mbps %s, "
2459 "Flow Control: %s\n",
2460 netdev->name,
2461 adapter->link_speed,
2462 adapter->link_duplex == FULL_DUPLEX ?
2463 "Full Duplex" : "Half Duplex",
2464 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2465 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2466 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2467 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002469 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002470 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002471 switch (adapter->link_speed) {
2472 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002473 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002474 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002475 break;
2476 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002477 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002478 /* maybe add some timeout factor ? */
2479 break;
2480 }
2481
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002482 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002483 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002484 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002485 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002488 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002489 schedule_delayed_work(&adapter->phy_info_task,
2490 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 adapter->smartspeed = 0;
2492 }
2493 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002494 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 adapter->link_speed = 0;
2496 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002497 pr_info("%s NIC Link is Down\n",
2498 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002500
2501 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002502 schedule_delayed_work(&adapter->phy_info_task,
2503 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 }
2505
2506 e1000_smartspeed(adapter);
2507 }
2508
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002509link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 e1000_update_stats(adapter);
2511
Joe Perches1dc32912008-07-11 15:17:08 -07002512 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002514 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 adapter->colc_old = adapter->stats.colc;
2516
2517 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2518 adapter->gorcl_old = adapter->stats.gorcl;
2519 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2520 adapter->gotcl_old = adapter->stats.gotcl;
2521
Joe Perches1dc32912008-07-11 15:17:08 -07002522 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002524 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002525 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 /* We've lost link, so the controller stops DMA,
2527 * but we've got queued Tx work that's never going
2528 * to get done, so reset controller to flush Tx.
2529 * (Do the reset outside of interrupt context). */
Jeff Kirsher87041632006-03-02 18:21:24 -08002530 adapter->tx_timeout_count++;
2531 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002532 /* exit immediately since reset is imminent */
2533 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 }
2535 }
2536
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002537 /* Simple mode for Interrupt Throttle Rate (ITR) */
2538 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2539 /*
2540 * Symmetric Tx/Rx gets a reduced ITR=2000;
2541 * Total asymmetrical Tx or Rx gets ITR=8000;
2542 * everyone else is between 2000-8000.
2543 */
2544 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2545 u32 dif = (adapter->gotcl > adapter->gorcl ?
2546 adapter->gotcl - adapter->gorcl :
2547 adapter->gorcl - adapter->gotcl) / 10000;
2548 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2549
2550 ew32(ITR, 1000000000 / (itr * 256));
2551 }
2552
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002554 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
Malli Chilakala26483452005-04-28 19:44:46 -07002556 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002557 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002559 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002560 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002561 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002562
2563unlock:
2564 mutex_unlock(&adapter->mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565}
2566
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002567enum latency_range {
2568 lowest_latency = 0,
2569 low_latency = 1,
2570 bulk_latency = 2,
2571 latency_invalid = 255
2572};
2573
2574/**
2575 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002576 * @adapter: pointer to adapter
2577 * @itr_setting: current adapter->itr
2578 * @packets: the number of packets during this measurement interval
2579 * @bytes: the number of bytes during this measurement interval
2580 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002581 * Stores a new ITR value based on packets and byte
2582 * counts during the last interrupt. The advantage of per interrupt
2583 * computation is faster updates and more accurate ITR for the current
2584 * traffic pattern. Constants in this function were computed
2585 * based on theoretical maximum wire speed and thresholds were set based
2586 * on testing data as well as attempting to minimize response time
2587 * while increasing bulk throughput.
2588 * this functionality is controlled by the InterruptThrottleRate module
2589 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002590 **/
2591static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002592 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002593{
2594 unsigned int retval = itr_setting;
2595 struct e1000_hw *hw = &adapter->hw;
2596
2597 if (unlikely(hw->mac_type < e1000_82540))
2598 goto update_itr_done;
2599
2600 if (packets == 0)
2601 goto update_itr_done;
2602
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002603 switch (itr_setting) {
2604 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002605 /* jumbo frames get bulk treatment*/
2606 if (bytes/packets > 8000)
2607 retval = bulk_latency;
2608 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002609 retval = low_latency;
2610 break;
2611 case low_latency: /* 50 usec aka 20000 ints/s */
2612 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002613 /* jumbo frames need bulk latency setting */
2614 if (bytes/packets > 8000)
2615 retval = bulk_latency;
2616 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002617 retval = bulk_latency;
2618 else if ((packets > 35))
2619 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002620 } else if (bytes/packets > 2000)
2621 retval = bulk_latency;
2622 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002623 retval = lowest_latency;
2624 break;
2625 case bulk_latency: /* 250 usec aka 4000 ints/s */
2626 if (bytes > 25000) {
2627 if (packets > 35)
2628 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002629 } else if (bytes < 6000) {
2630 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002631 }
2632 break;
2633 }
2634
2635update_itr_done:
2636 return retval;
2637}
2638
2639static void e1000_set_itr(struct e1000_adapter *adapter)
2640{
2641 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002642 u16 current_itr;
2643 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002644
2645 if (unlikely(hw->mac_type < e1000_82540))
2646 return;
2647
2648 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2649 if (unlikely(adapter->link_speed != SPEED_1000)) {
2650 current_itr = 0;
2651 new_itr = 4000;
2652 goto set_itr_now;
2653 }
2654
2655 adapter->tx_itr = e1000_update_itr(adapter,
2656 adapter->tx_itr,
2657 adapter->total_tx_packets,
2658 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002659 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2660 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2661 adapter->tx_itr = low_latency;
2662
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002663 adapter->rx_itr = e1000_update_itr(adapter,
2664 adapter->rx_itr,
2665 adapter->total_rx_packets,
2666 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002667 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2668 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2669 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002670
2671 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2672
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002673 switch (current_itr) {
2674 /* counts and packets in update_itr are dependent on these numbers */
2675 case lowest_latency:
2676 new_itr = 70000;
2677 break;
2678 case low_latency:
2679 new_itr = 20000; /* aka hwitr = ~200 */
2680 break;
2681 case bulk_latency:
2682 new_itr = 4000;
2683 break;
2684 default:
2685 break;
2686 }
2687
2688set_itr_now:
2689 if (new_itr != adapter->itr) {
2690 /* this attempts to bias the interrupt rate towards Bulk
2691 * by adding intermediate steps when interrupt rate is
2692 * increasing */
2693 new_itr = new_itr > adapter->itr ?
2694 min(adapter->itr + (new_itr >> 2), new_itr) :
2695 new_itr;
2696 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002697 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002698 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002699}
2700
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701#define E1000_TX_FLAGS_CSUM 0x00000001
2702#define E1000_TX_FLAGS_VLAN 0x00000002
2703#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002704#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002705#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2707#define E1000_TX_FLAGS_VLAN_SHIFT 16
2708
Joe Perches64798842008-07-11 15:17:02 -07002709static int e1000_tso(struct e1000_adapter *adapter,
2710 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002713 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002715 u32 cmd_length = 0;
2716 u16 ipcse = 0, tucse, mss;
2717 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 int err;
2719
Herbert Xu89114af2006-07-08 13:34:32 -07002720 if (skb_is_gso(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 if (skb_header_cloned(skb)) {
2722 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2723 if (err)
2724 return err;
2725 }
2726
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002727 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002728 mss = skb_shinfo(skb)->gso_size;
Alexey Dobriyan60828232006-05-23 14:52:21 -07002729 if (skb->protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002730 struct iphdr *iph = ip_hdr(skb);
2731 iph->tot_len = 0;
2732 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002733 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2734 iph->daddr, 0,
2735 IPPROTO_TCP,
2736 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002737 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002738 ipcse = skb_transport_offset(skb) - 1;
Auke Koke15fdd02006-08-16 11:28:45 -07002739 } else if (skb->protocol == htons(ETH_P_IPV6)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002740 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002741 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002742 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2743 &ipv6_hdr(skb)->daddr,
2744 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002745 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002746 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002747 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002748 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002749 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002750 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 tucse = 0;
2752
2753 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002754 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002756 i = tx_ring->next_to_use;
2757 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002758 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
2760 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2761 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2762 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2763 context_desc->upper_setup.tcp_fields.tucss = tucss;
2764 context_desc->upper_setup.tcp_fields.tucso = tucso;
2765 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2766 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2767 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2768 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2769
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002770 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002771 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002772
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002773 if (++i == tx_ring->count) i = 0;
2774 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
Joe Perchesc3033b02008-03-21 11:06:25 -07002776 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002778 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779}
2780
Joe Perches64798842008-07-11 15:17:02 -07002781static bool e1000_tx_csum(struct e1000_adapter *adapter,
2782 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783{
2784 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002785 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002787 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002788 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
Dave Graham3ed30672008-10-09 14:29:26 -07002790 if (skb->ip_summed != CHECKSUM_PARTIAL)
2791 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
Dave Graham3ed30672008-10-09 14:29:26 -07002793 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002794 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002795 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2796 cmd_len |= E1000_TXD_CMD_TCP;
2797 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002798 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002799 /* XXX not handling all IPV6 headers */
2800 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2801 cmd_len |= E1000_TXD_CMD_TCP;
2802 break;
2803 default:
2804 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002805 e_warn(drv, "checksum_partial proto=%x!\n",
2806 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002807 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 }
2809
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002810 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002811
2812 i = tx_ring->next_to_use;
2813 buffer_info = &tx_ring->buffer_info[i];
2814 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2815
2816 context_desc->lower_setup.ip_config = 0;
2817 context_desc->upper_setup.tcp_fields.tucss = css;
2818 context_desc->upper_setup.tcp_fields.tucso =
2819 css + skb->csum_offset;
2820 context_desc->upper_setup.tcp_fields.tucse = 0;
2821 context_desc->tcp_seg_setup.data = 0;
2822 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2823
2824 buffer_info->time_stamp = jiffies;
2825 buffer_info->next_to_watch = i;
2826
2827 if (unlikely(++i == tx_ring->count)) i = 0;
2828 tx_ring->next_to_use = i;
2829
2830 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831}
2832
2833#define E1000_MAX_TXD_PWR 12
2834#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2835
Joe Perches64798842008-07-11 15:17:02 -07002836static int e1000_tx_map(struct e1000_adapter *adapter,
2837 struct e1000_tx_ring *tx_ring,
2838 struct sk_buff *skb, unsigned int first,
2839 unsigned int max_per_txd, unsigned int nr_frags,
2840 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841{
Joe Perches1dc32912008-07-11 15:17:08 -07002842 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002843 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002844 struct e1000_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002845 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002846 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002847 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848
2849 i = tx_ring->next_to_use;
2850
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002851 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002852 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002854 /* Workaround for Controller erratum --
2855 * descriptor for non-tso packet in a linear SKB that follows a
2856 * tso gets written back prematurely before the data is fully
Jeff Kirsher0f15a8f2006-03-02 18:46:29 -08002857 * DMA'd to the controller */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002858 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002859 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002860 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002861 size -= 4;
2862 }
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 /* Workaround for premature desc write-backs
2865 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002866 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002868 /* work-around for errata 10 and it applies
2869 * to all controllers in PCI-X mode
2870 * The fix is to make sure that the first descriptor of a
2871 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2872 */
Joe Perches1dc32912008-07-11 15:17:08 -07002873 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07002874 (size > 2015) && count == 0))
2875 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002876
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 /* Workaround for potential 82544 hang in PCI-X. Avoid
2878 * terminating buffers within evenly-aligned dwords. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002879 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2881 size > 4))
2882 size -= 4;
2883
2884 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002885 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002887 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002888 buffer_info->dma = dma_map_single(&pdev->dev,
2889 skb->data + offset,
2890 size, DMA_TO_DEVICE);
2891 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002892 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002893 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
2895 len -= size;
2896 offset += size;
2897 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002898 if (len) {
2899 i++;
2900 if (unlikely(i == tx_ring->count))
2901 i = 0;
2902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 }
2904
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002905 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002906 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
2908 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002909 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002910 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002912 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002913 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002914 i++;
2915 if (unlikely(i == tx_ring->count))
2916 i = 0;
2917
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 buffer_info = &tx_ring->buffer_info[i];
2919 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 /* Workaround for premature desc write-backs
2921 * in TSO mode. Append 4-byte sentinel desc */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002922 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924 /* Workaround for potential 82544 hang in PCI-X.
2925 * Avoid terminating buffers within evenly-aligned
2926 * dwords. */
Ian Campbell877749b2011-08-29 23:18:26 +00002927 bufend = (unsigned long)
2928 page_to_phys(skb_frag_page(frag));
2929 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002930 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002931 !(bufend & 4) &&
2932 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 size -= 4;
2934
2935 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002937 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002938 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2939 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002940 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002941 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002942 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944 len -= size;
2945 offset += size;
2946 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 }
2948 }
2949
Dean Nelson31c15a22011-08-25 14:39:24 +00002950 segs = skb_shinfo(skb)->gso_segs ?: 1;
2951 /* multiply data chunks by size of headers */
2952 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2953
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002955 tx_ring->buffer_info[i].segs = segs;
2956 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 tx_ring->buffer_info[first].next_to_watch = i;
2958
2959 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002960
2961dma_error:
2962 dev_err(&pdev->dev, "TX DMA map failed\n");
2963 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002964 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002965 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002966
2967 while (count--) {
2968 if (i==0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002969 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002970 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002971 buffer_info = &tx_ring->buffer_info[i];
2972 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2973 }
2974
2975 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976}
2977
Joe Perches64798842008-07-11 15:17:02 -07002978static void e1000_tx_queue(struct e1000_adapter *adapter,
2979 struct e1000_tx_ring *tx_ring, int tx_flags,
2980 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981{
Joe Perches1dc32912008-07-11 15:17:08 -07002982 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002983 struct e1000_tx_desc *tx_desc = NULL;
2984 struct e1000_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07002985 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 unsigned int i;
2987
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002988 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2990 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002991 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2992
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002993 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002994 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 }
2996
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002997 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2999 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3000 }
3001
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003002 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003 txd_lower |= E1000_TXD_CMD_VLE;
3004 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3005 }
3006
Ben Greear11a78dc2012-02-11 15:40:01 +00003007 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3008 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3009
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 i = tx_ring->next_to_use;
3011
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003012 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 buffer_info = &tx_ring->buffer_info[i];
3014 tx_desc = E1000_TX_DESC(*tx_ring, i);
3015 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3016 tx_desc->lower.data =
3017 cpu_to_le32(txd_lower | buffer_info->length);
3018 tx_desc->upper.data = cpu_to_le32(txd_upper);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003019 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 }
3021
3022 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3023
Ben Greear11a78dc2012-02-11 15:40:01 +00003024 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3025 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3026 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 /* Force memory writes to complete before letting h/w
3029 * know there are new descriptors to fetch. (Only
3030 * applicable for weak-ordered memory model archs,
3031 * such as IA-64). */
3032 wmb();
3033
3034 tx_ring->next_to_use = i;
Joe Perches1dc32912008-07-11 15:17:08 -07003035 writel(i, hw->hw_addr + tx_ring->tdt);
Jesse Brandeburg2ce90472006-11-01 08:47:42 -08003036 /* we need this if more than one processor can write to our tail
3037 * at a time, it syncronizes IO on IA64/Altix systems */
3038 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039}
3040
3041/**
3042 * 82547 workaround to avoid controller hang in half-duplex environment.
3043 * The workaround is to avoid queuing a large packet that would span
3044 * the internal Tx FIFO ring boundary by notifying the stack to resend
3045 * the packet at a later time. This gives the Tx FIFO an opportunity to
3046 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3047 * to the beginning of the Tx FIFO.
3048 **/
3049
3050#define E1000_FIFO_HDR 0x10
3051#define E1000_82547_PAD_LEN 0x3E0
3052
Joe Perches64798842008-07-11 15:17:02 -07003053static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3054 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055{
Joe Perches406874a2008-04-03 10:06:32 -07003056 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3057 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003059 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003061 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062 goto no_fifo_stall_required;
3063
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003064 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003065 return 1;
3066
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003067 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 atomic_set(&adapter->tx_fifo_stall, 1);
3069 return 1;
3070 }
3071
3072no_fifo_stall_required:
3073 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003074 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3076 return 0;
3077}
3078
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003079static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3080{
3081 struct e1000_adapter *adapter = netdev_priv(netdev);
3082 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3083
3084 netif_stop_queue(netdev);
3085 /* Herbert's original patch had:
3086 * smp_mb__after_netif_stop_queue();
3087 * but since that doesn't exist yet, just open code it. */
3088 smp_mb();
3089
3090 /* We need to check again in a case another CPU has just
3091 * made room available. */
3092 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3093 return -EBUSY;
3094
3095 /* A reprieve! */
3096 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003097 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003098 return 0;
3099}
3100
3101static int e1000_maybe_stop_tx(struct net_device *netdev,
3102 struct e1000_tx_ring *tx_ring, int size)
3103{
3104 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3105 return 0;
3106 return __e1000_maybe_stop_tx(netdev, size);
3107}
3108
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003110static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003112{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003113 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003114 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003115 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3117 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3118 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003119 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003120 unsigned int nr_frags;
3121 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003123 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003124 unsigned int f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003125
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003126 /* This goes back to the question of how to logically map a tx queue
3127 * to a flow. Right now, performance is impacted slightly negatively
3128 * if using multiple tx queues. If the stack breaks away from a
3129 * single qdisc implementation, we can look at this again. */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003130 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003131
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003132 if (unlikely(skb->len <= 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 dev_kfree_skb_any(skb);
3134 return NETDEV_TX_OK;
3135 }
3136
Herbert Xu79671682006-06-22 02:40:14 -07003137 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003138 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 * make sure there is enough room in the FIFO before
3140 * initiating the DMA for each buffer. The calc is:
3141 * 4 = ceil(buffer len/mss). To make sure we don't
3142 * overrun the FIFO, adjust the max buffer len if mss
3143 * drops. */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003144 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003145 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146 max_per_txd = min(mss << 2, max_per_txd);
3147 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003148
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003149 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003150 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003151 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003152 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003153 case e1000_82544:
3154 /* Make sure we have room to chop off 4 bytes,
3155 * and that the end alignment will work out to
3156 * this hardware's requirements
3157 * NOTE: this is a TSO only workaround
3158 * if end byte alignment not correct move us
3159 * into the next dword */
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07003160 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003161 break;
3162 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003163 pull_size = min((unsigned int)4, skb->data_len);
3164 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003165 e_err(drv, "__pskb_pull_tail "
3166 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003167 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003168 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003169 }
Eric Dumazete743d312010-04-14 15:59:40 -07003170 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003171 break;
3172 default:
3173 /* do nothing */
3174 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003175 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003176 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 }
3178
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003179 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003180 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003182 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003183
Jeff Kirsherfd803242005-12-13 00:06:22 -05003184 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003185 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003186 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003187
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 count += TXD_USE_COUNT(len, max_txd_pwr);
3189
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003190 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 count++;
3192
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003193 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003194 * in PCI-X mode, so add one more descriptor to the count
3195 */
Joe Perches1dc32912008-07-11 15:17:08 -07003196 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003197 (len > 2015)))
3198 count++;
3199
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003201 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003202 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003204 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 count += nr_frags;
3206
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207 /* need: count + 2 desc gap to keep tail from touching
3208 * head, otherwise try next time */
Alexander Duyck80179432009-01-21 14:42:47 -08003209 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003212 if (unlikely((hw->mac_type == e1000_82547) &&
3213 (e1000_82547_fifo_workaround(adapter, skb)))) {
3214 netif_stop_queue(netdev);
3215 if (!test_bit(__E1000_DOWN, &adapter->flags))
3216 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3217 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 }
3219
Jiri Pirko5622e402011-07-21 03:26:31 +00003220 if (vlan_tx_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 tx_flags |= E1000_TX_FLAGS_VLAN;
3222 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3223 }
3224
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003225 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003226
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003227 tso = e1000_tso(adapter, tx_ring, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 if (tso < 0) {
3229 dev_kfree_skb_any(skb);
3230 return NETDEV_TX_OK;
3231 }
3232
Jeff Kirsherfd803242005-12-13 00:06:22 -05003233 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003234 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003235 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 tx_flags |= E1000_TX_FLAGS_TSO;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003237 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 tx_flags |= E1000_TX_FLAGS_CSUM;
3239
Alexey Dobriyan60828232006-05-23 14:52:21 -07003240 if (likely(skb->protocol == htons(ETH_P_IP)))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003241 tx_flags |= E1000_TX_FLAGS_IPV4;
3242
Ben Greear11a78dc2012-02-11 15:40:01 +00003243 if (unlikely(skb->no_fcs))
3244 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3245
Alexander Duyck37e73df2009-03-25 21:58:45 +00003246 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3247 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Alexander Duyck37e73df2009-03-25 21:58:45 +00003249 if (count) {
3250 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duyck37e73df2009-03-25 21:58:45 +00003251 /* Make sure there is space in the ring for the next send. */
3252 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253
Alexander Duyck37e73df2009-03-25 21:58:45 +00003254 } else {
3255 dev_kfree_skb_any(skb);
3256 tx_ring->buffer_info[first].time_stamp = 0;
3257 tx_ring->next_to_use = first;
3258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 return NETDEV_TX_OK;
3261}
3262
Tushar Daveb04e36b2012-01-27 09:00:46 +00003263#define NUM_REGS 38 /* 1 based count */
3264static void e1000_regdump(struct e1000_adapter *adapter)
3265{
3266 struct e1000_hw *hw = &adapter->hw;
3267 u32 regs[NUM_REGS];
3268 u32 *regs_buff = regs;
3269 int i = 0;
3270
Tushar Davee29b5d82012-02-10 08:06:36 +00003271 static const char * const reg_name[] = {
3272 "CTRL", "STATUS",
3273 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3274 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3275 "TIDV", "TXDCTL", "TADV", "TARC0",
3276 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3277 "TXDCTL1", "TARC1",
3278 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3279 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3280 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003281 };
3282
3283 regs_buff[0] = er32(CTRL);
3284 regs_buff[1] = er32(STATUS);
3285
3286 regs_buff[2] = er32(RCTL);
3287 regs_buff[3] = er32(RDLEN);
3288 regs_buff[4] = er32(RDH);
3289 regs_buff[5] = er32(RDT);
3290 regs_buff[6] = er32(RDTR);
3291
3292 regs_buff[7] = er32(TCTL);
3293 regs_buff[8] = er32(TDBAL);
3294 regs_buff[9] = er32(TDBAH);
3295 regs_buff[10] = er32(TDLEN);
3296 regs_buff[11] = er32(TDH);
3297 regs_buff[12] = er32(TDT);
3298 regs_buff[13] = er32(TIDV);
3299 regs_buff[14] = er32(TXDCTL);
3300 regs_buff[15] = er32(TADV);
3301 regs_buff[16] = er32(TARC0);
3302
3303 regs_buff[17] = er32(TDBAL1);
3304 regs_buff[18] = er32(TDBAH1);
3305 regs_buff[19] = er32(TDLEN1);
3306 regs_buff[20] = er32(TDH1);
3307 regs_buff[21] = er32(TDT1);
3308 regs_buff[22] = er32(TXDCTL1);
3309 regs_buff[23] = er32(TARC1);
3310 regs_buff[24] = er32(CTRL_EXT);
3311 regs_buff[25] = er32(ERT);
3312 regs_buff[26] = er32(RDBAL0);
3313 regs_buff[27] = er32(RDBAH0);
3314 regs_buff[28] = er32(TDFH);
3315 regs_buff[29] = er32(TDFT);
3316 regs_buff[30] = er32(TDFHS);
3317 regs_buff[31] = er32(TDFTS);
3318 regs_buff[32] = er32(TDFPC);
3319 regs_buff[33] = er32(RDFH);
3320 regs_buff[34] = er32(RDFT);
3321 regs_buff[35] = er32(RDFHS);
3322 regs_buff[36] = er32(RDFTS);
3323 regs_buff[37] = er32(RDFPC);
3324
3325 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003326 for (i = 0; i < NUM_REGS; i++)
3327 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003328}
3329
3330/*
3331 * e1000_dump: Print registers, tx ring and rx ring
3332 */
3333static void e1000_dump(struct e1000_adapter *adapter)
3334{
3335 /* this code doesn't handle multiple rings */
3336 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3337 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3338 int i;
3339
3340 if (!netif_msg_hw(adapter))
3341 return;
3342
3343 /* Print Registers */
3344 e1000_regdump(adapter);
3345
3346 /*
3347 * transmit dump
3348 */
3349 pr_info("TX Desc ring0 dump\n");
3350
3351 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3352 *
3353 * Legacy Transmit Descriptor
3354 * +--------------------------------------------------------------+
3355 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3356 * +--------------------------------------------------------------+
3357 * 8 | Special | CSS | Status | CMD | CSO | Length |
3358 * +--------------------------------------------------------------+
3359 * 63 48 47 36 35 32 31 24 23 16 15 0
3360 *
3361 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3362 * 63 48 47 40 39 32 31 16 15 8 7 0
3363 * +----------------------------------------------------------------+
3364 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3365 * +----------------------------------------------------------------+
3366 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3367 * +----------------------------------------------------------------+
3368 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3369 *
3370 * Extended Data Descriptor (DTYP=0x1)
3371 * +----------------------------------------------------------------+
3372 * 0 | Buffer Address [63:0] |
3373 * +----------------------------------------------------------------+
3374 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3375 * +----------------------------------------------------------------+
3376 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3377 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003378 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3379 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003380
3381 if (!netif_msg_tx_done(adapter))
3382 goto rx_ring_summary;
3383
3384 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3385 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3386 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003387 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003388 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003389 const char *type;
3390
Tushar Daveb04e36b2012-01-27 09:00:46 +00003391 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003392 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003393 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003394 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003395 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003396 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003397 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003398 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003399
Tushar Davee29b5d82012-02-10 08:06:36 +00003400 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3401 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3402 le64_to_cpu(u->a), le64_to_cpu(u->b),
3403 (u64)buffer_info->dma, buffer_info->length,
3404 buffer_info->next_to_watch,
3405 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003406 }
3407
3408rx_ring_summary:
3409 /*
3410 * receive dump
3411 */
3412 pr_info("\nRX Desc ring dump\n");
3413
3414 /* Legacy Receive Descriptor Format
3415 *
3416 * +-----------------------------------------------------+
3417 * | Buffer Address [63:0] |
3418 * +-----------------------------------------------------+
3419 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3420 * +-----------------------------------------------------+
3421 * 63 48 47 40 39 32 31 16 15 0
3422 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003423 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003424
3425 if (!netif_msg_rx_status(adapter))
3426 goto exit;
3427
3428 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3429 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3430 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003431 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003432 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003433 const char *type;
3434
Tushar Daveb04e36b2012-01-27 09:00:46 +00003435 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003436 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003437 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003438 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003439 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003440 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003441
Tushar Davee29b5d82012-02-10 08:06:36 +00003442 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3443 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3444 (u64)buffer_info->dma, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003445 } /* for */
3446
3447 /* dump the descriptor caches */
3448 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003449 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003450 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003451 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3452 i,
3453 readl(adapter->hw.hw_addr + i+4),
3454 readl(adapter->hw.hw_addr + i),
3455 readl(adapter->hw.hw_addr + i+12),
3456 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003457 }
3458 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003459 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003460 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003461 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3462 i,
3463 readl(adapter->hw.hw_addr + i+4),
3464 readl(adapter->hw.hw_addr + i),
3465 readl(adapter->hw.hw_addr + i+12),
3466 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003467 }
3468exit:
3469 return;
3470}
3471
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472/**
3473 * e1000_tx_timeout - Respond to a Tx Hang
3474 * @netdev: network interface device structure
3475 **/
3476
Joe Perches64798842008-07-11 15:17:02 -07003477static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003479 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480
3481 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003482 adapter->tx_timeout_count++;
3483 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484}
3485
Joe Perches64798842008-07-11 15:17:02 -07003486static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487{
David Howells65f27f32006-11-22 14:55:48 +00003488 struct e1000_adapter *adapter =
3489 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00003491 if (test_bit(__E1000_DOWN, &adapter->flags))
3492 return;
Tushar Daveb04e36b2012-01-27 09:00:46 +00003493 e_err(drv, "Reset adapter\n");
Jesse Brandeburg338c15e2010-09-22 18:22:42 +00003494 e1000_reinit_safe(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495}
3496
3497/**
3498 * e1000_get_stats - Get System Network Statistics
3499 * @netdev: network interface device structure
3500 *
3501 * Returns the address of the device statistics structure.
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003502 * The statistics are actually updated from the watchdog.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 **/
3504
Joe Perches64798842008-07-11 15:17:02 -07003505static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506{
Jeff Kirsher6b7660c2006-01-12 16:50:35 -08003507 /* only return the current stats */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003508 return &netdev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509}
3510
3511/**
3512 * e1000_change_mtu - Change the Maximum Transfer Unit
3513 * @netdev: network interface device structure
3514 * @new_mtu: new value for maximum frame size
3515 *
3516 * Returns 0 on success, negative on failure
3517 **/
3518
Joe Perches64798842008-07-11 15:17:02 -07003519static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003521 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003522 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3524
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003525 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3526 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003527 e_err(probe, "Invalid MTU setting\n");
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003528 return -EINVAL;
3529 }
3530
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003531 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003532 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003533 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003534 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003535 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003536 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003537 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003538 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003539 default:
3540 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3541 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003542 }
3543
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003544 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3545 msleep(1);
3546 /* e1000_down has a dependency on max_frame_size */
3547 hw->max_frame_size = max_frame;
3548 if (netif_running(netdev))
3549 e1000_down(adapter);
3550
David S. Miller87f50322006-07-31 22:39:40 -07003551 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003552 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003553 * larger slab size.
3554 * i.e. RXBUFFER_2048 --> size-4096 slab
3555 * however with the new *_jumbo_rx* routines, jumbo receives will use
3556 * fragmented skbs */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003557
Jesse Brandeburg99261462010-01-22 22:56:16 +00003558 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003559 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003560 else
3561#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003562 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003563#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3564 adapter->rx_buffer_len = PAGE_SIZE;
3565#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003566
3567 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003568 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003569 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003570 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3571 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003572
Emil Tantilov675ad472010-04-27 14:02:58 +00003573 pr_info("%s changing MTU from %d to %d\n",
3574 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003575 netdev->mtu = new_mtu;
3576
Auke Kok2db10a02006-06-27 09:06:28 -07003577 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003578 e1000_up(adapter);
3579 else
3580 e1000_reset(adapter);
3581
3582 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 return 0;
3585}
3586
3587/**
3588 * e1000_update_stats - Update the board statistics counters
3589 * @adapter: board private structure
3590 **/
3591
Joe Perches64798842008-07-11 15:17:02 -07003592void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003593{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003594 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003595 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003596 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003598 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
3600#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3601
Linas Vepstas282f33c2006-06-08 22:19:44 -07003602 /*
3603 * Prevent stats update while adapter is being reset, or if the pci
3604 * connection is down.
3605 */
Auke Kok90267292006-06-08 09:30:24 -07003606 if (adapter->link_speed == 0)
3607 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003608 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003609 return;
Auke Kok90267292006-06-08 09:30:24 -07003610
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 spin_lock_irqsave(&adapter->stats_lock, flags);
3612
Masatake YAMATO828d0552007-10-20 03:06:37 +02003613 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 * called from the interrupt context, so they must only
3615 * be written while holding adapter->stats_lock
3616 */
3617
Joe Perches1dc32912008-07-11 15:17:08 -07003618 adapter->stats.crcerrs += er32(CRCERRS);
3619 adapter->stats.gprc += er32(GPRC);
3620 adapter->stats.gorcl += er32(GORCL);
3621 adapter->stats.gorch += er32(GORCH);
3622 adapter->stats.bprc += er32(BPRC);
3623 adapter->stats.mprc += er32(MPRC);
3624 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003625
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003626 adapter->stats.prc64 += er32(PRC64);
3627 adapter->stats.prc127 += er32(PRC127);
3628 adapter->stats.prc255 += er32(PRC255);
3629 adapter->stats.prc511 += er32(PRC511);
3630 adapter->stats.prc1023 += er32(PRC1023);
3631 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632
Joe Perches1dc32912008-07-11 15:17:08 -07003633 adapter->stats.symerrs += er32(SYMERRS);
3634 adapter->stats.mpc += er32(MPC);
3635 adapter->stats.scc += er32(SCC);
3636 adapter->stats.ecol += er32(ECOL);
3637 adapter->stats.mcc += er32(MCC);
3638 adapter->stats.latecol += er32(LATECOL);
3639 adapter->stats.dc += er32(DC);
3640 adapter->stats.sec += er32(SEC);
3641 adapter->stats.rlec += er32(RLEC);
3642 adapter->stats.xonrxc += er32(XONRXC);
3643 adapter->stats.xontxc += er32(XONTXC);
3644 adapter->stats.xoffrxc += er32(XOFFRXC);
3645 adapter->stats.xofftxc += er32(XOFFTXC);
3646 adapter->stats.fcruc += er32(FCRUC);
3647 adapter->stats.gptc += er32(GPTC);
3648 adapter->stats.gotcl += er32(GOTCL);
3649 adapter->stats.gotch += er32(GOTCH);
3650 adapter->stats.rnbc += er32(RNBC);
3651 adapter->stats.ruc += er32(RUC);
3652 adapter->stats.rfc += er32(RFC);
3653 adapter->stats.rjc += er32(RJC);
3654 adapter->stats.torl += er32(TORL);
3655 adapter->stats.torh += er32(TORH);
3656 adapter->stats.totl += er32(TOTL);
3657 adapter->stats.toth += er32(TOTH);
3658 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003659
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003660 adapter->stats.ptc64 += er32(PTC64);
3661 adapter->stats.ptc127 += er32(PTC127);
3662 adapter->stats.ptc255 += er32(PTC255);
3663 adapter->stats.ptc511 += er32(PTC511);
3664 adapter->stats.ptc1023 += er32(PTC1023);
3665 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003666
Joe Perches1dc32912008-07-11 15:17:08 -07003667 adapter->stats.mptc += er32(MPTC);
3668 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669
3670 /* used for adaptive IFS */
3671
Joe Perches1dc32912008-07-11 15:17:08 -07003672 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003673 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003674 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003675 adapter->stats.colc += hw->collision_delta;
3676
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003677 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003678 adapter->stats.algnerrc += er32(ALGNERRC);
3679 adapter->stats.rxerrc += er32(RXERRC);
3680 adapter->stats.tncrs += er32(TNCRS);
3681 adapter->stats.cexterr += er32(CEXTERR);
3682 adapter->stats.tsctc += er32(TSCTC);
3683 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 }
3685
3686 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003687 netdev->stats.multicast = adapter->stats.mprc;
3688 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003689
3690 /* Rx Errors */
3691
Jeff Kirsher87041632006-03-02 18:21:24 -08003692 /* RLEC on some newer hardware can be incorrect so build
3693 * our own version based on RUC and ROC */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003694 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003695 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003696 adapter->stats.ruc + adapter->stats.roc +
3697 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003698 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003699 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3700 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3701 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3702 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703
3704 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003705 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003706 netdev->stats.tx_errors = adapter->stats.txerrc;
3707 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3708 netdev->stats.tx_window_errors = adapter->stats.latecol;
3709 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003710 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003711 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003712 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003713 adapter->stats.tncrs = 0;
3714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003715
3716 /* Tx Dropped needs to be maintained elsewhere */
3717
3718 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003719 if (hw->media_type == e1000_media_type_copper) {
3720 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3722 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3723 adapter->phy_stats.idle_errors += phy_tmp;
3724 }
3725
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003726 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 (hw->phy_type == e1000_phy_m88) &&
3728 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3729 adapter->phy_stats.receive_errors += phy_tmp;
3730 }
3731
Jeff Garzik15e376b2006-12-15 11:16:33 -05003732 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003733 if (hw->has_smbus) {
3734 adapter->stats.mgptc += er32(MGTPTC);
3735 adapter->stats.mgprc += er32(MGTPRC);
3736 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003737 }
3738
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3740}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003741
3742/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 * e1000_intr - Interrupt Handler
3744 * @irq: interrupt number
3745 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746 **/
3747
Joe Perches64798842008-07-11 15:17:02 -07003748static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749{
3750 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003751 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003752 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003753 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003754
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003755 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003756 return IRQ_NONE; /* Not our interrupt */
3757
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003758 /*
3759 * we might have caused the interrupt, but the above
3760 * read cleared it, and just in case the driver is
3761 * down there is nothing to do so return handled
3762 */
3763 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3764 return IRQ_HANDLED;
3765
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003766 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003768 /* guard against interrupt when we're going down */
3769 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003770 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 }
3772
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003773 /* disable interrupts, without the synchronize_irq bit */
3774 ew32(IMC, ~0);
3775 E1000_WRITE_FLUSH();
3776
Ben Hutchings288379f2009-01-19 16:43:59 -08003777 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003778 adapter->total_tx_bytes = 0;
3779 adapter->total_tx_packets = 0;
3780 adapter->total_rx_bytes = 0;
3781 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003782 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003783 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003784 /* this really should not happen! if it does it is basically a
3785 * bug, but not a hard error, so enable ints and continue */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003786 if (!test_bit(__E1000_DOWN, &adapter->flags))
3787 e1000_irq_enable(adapter);
3788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790 return IRQ_HANDLED;
3791}
3792
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793/**
3794 * e1000_clean - NAPI Rx polling callback
3795 * @adapter: board private structure
3796 **/
Joe Perches64798842008-07-11 15:17:02 -07003797static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798{
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003799 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003800 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003801
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003802 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003803
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003804 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003805
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003806 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003807 work_done = budget;
3808
David S. Miller53e52c72008-01-07 21:06:12 -08003809 /* If budget not fully consumed, exit the polling mode */
3810 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003811 if (likely(adapter->itr_setting & 3))
3812 e1000_set_itr(adapter);
Ben Hutchings288379f2009-01-19 16:43:59 -08003813 napi_complete(napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003814 if (!test_bit(__E1000_DOWN, &adapter->flags))
3815 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816 }
3817
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003818 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819}
3820
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821/**
3822 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3823 * @adapter: board private structure
3824 **/
Joe Perches64798842008-07-11 15:17:02 -07003825static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3826 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827{
Joe Perches1dc32912008-07-11 15:17:08 -07003828 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829 struct net_device *netdev = adapter->netdev;
3830 struct e1000_tx_desc *tx_desc, *eop_desc;
3831 struct e1000_buffer *buffer_info;
3832 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003833 unsigned int count = 0;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003834 unsigned int total_tx_bytes=0, total_tx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835
3836 i = tx_ring->next_to_clean;
3837 eop = tx_ring->buffer_info[i].next_to_watch;
3838 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3839
Alexander Duyckccfb3422009-03-25 21:59:04 +00003840 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3841 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003842 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00003843 rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003844 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003845 tx_desc = E1000_TX_DESC(*tx_ring, i);
3846 buffer_info = &tx_ring->buffer_info[i];
3847 cleaned = (i == eop);
3848
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003849 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003850 total_tx_packets += buffer_info->segs;
3851 total_tx_bytes += buffer_info->bytecount;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003852 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003853 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003854 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003856 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003858
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 eop = tx_ring->buffer_info[i].next_to_watch;
3860 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3861 }
3862
3863 tx_ring->next_to_clean = i;
3864
Auke Kok77b2aad2006-04-14 19:05:25 -07003865#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003866 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003867 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3868 /* Make sure that anybody stopping the queue after this
3869 * sees the new next_to_clean.
3870 */
3871 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003872
3873 if (netif_queue_stopped(netdev) &&
3874 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003875 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003876 ++adapter->restart_queue;
3877 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003878 }
Malli Chilakala26483452005-04-28 19:44:46 -07003879
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003880 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003881 /* Detect a transmit hang in hardware, this serializes the
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 * check with the clearing of time_stamp and movement of i */
Joe Perchesc3033b02008-03-21 11:06:25 -07003883 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003884 if (tx_ring->buffer_info[eop].time_stamp &&
3885 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Joe Perches8e95a202009-12-03 07:58:21 +00003886 (adapter->tx_timeout_factor * HZ)) &&
3887 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003888
3889 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003890 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003891 " Tx Queue <%lu>\n"
3892 " TDH <%x>\n"
3893 " TDT <%x>\n"
3894 " next_to_use <%x>\n"
3895 " next_to_clean <%x>\n"
3896 "buffer_info[next_to_clean]\n"
3897 " time_stamp <%lx>\n"
3898 " next_to_watch <%x>\n"
3899 " jiffies <%lx>\n"
3900 " next_to_watch.status <%x>\n",
Jeff Kirsher7bfa4812006-01-12 16:50:41 -08003901 (unsigned long)((tx_ring - adapter->tx_ring) /
3902 sizeof(struct e1000_tx_ring)),
Joe Perches1dc32912008-07-11 15:17:08 -07003903 readl(hw->hw_addr + tx_ring->tdh),
3904 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003905 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003906 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003907 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003908 eop,
3909 jiffies,
3910 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003911 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003915 adapter->total_tx_bytes += total_tx_bytes;
3916 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003917 netdev->stats.tx_bytes += total_tx_bytes;
3918 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003919 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920}
3921
3922/**
3923 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003924 * @adapter: board private structure
3925 * @status_err: receive descriptor status and error fields
3926 * @csum: receive descriptor csum field
3927 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 **/
3929
Joe Perches64798842008-07-11 15:17:02 -07003930static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3931 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932{
Joe Perches1dc32912008-07-11 15:17:08 -07003933 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003934 u16 status = (u16)status_err;
3935 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003936
3937 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003938
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 /* 82543 or newer only */
Joe Perches1dc32912008-07-11 15:17:08 -07003940 if (unlikely(hw->mac_type < e1000_82543)) return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 /* Ignore Checksum bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003942 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003943 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003944 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003945 /* let the stack verify checksum errors */
3946 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 return;
3948 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003949 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003950 if (!(status & E1000_RXD_STAT_TCPCS))
3951 return;
3952
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003953 /* It must be a TCP or UDP packet with a valid checksum */
3954 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955 /* TCP checksum is good */
3956 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003958 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959}
3960
3961/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003962 * e1000_consume_page - helper function
3963 **/
3964static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3965 u16 length)
3966{
3967 bi->page = NULL;
3968 skb->len += length;
3969 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00003970 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003971}
3972
3973/**
3974 * e1000_receive_skb - helper function to handle rx indications
3975 * @adapter: board private structure
3976 * @status: descriptor status field as written by hardware
3977 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3978 * @skb: pointer to sk_buff to be indicated to stack
3979 */
3980static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3981 __le16 vlan, struct sk_buff *skb)
3982{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00003983 skb->protocol = eth_type_trans(skb, adapter->netdev);
3984
Jiri Pirko5622e402011-07-21 03:26:31 +00003985 if (status & E1000_RXD_STAT_VP) {
3986 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3987
3988 __vlan_hwaccel_put_tag(skb, vid);
3989 }
3990 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003991}
3992
3993/**
3994 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3995 * @adapter: board private structure
3996 * @rx_ring: ring to clean
3997 * @work_done: amount of napi work completed this call
3998 * @work_to_do: max amount of work allowed for this call to do
3999 *
4000 * the return value indicates whether actual cleaning was done, there
4001 * is no guarantee that everything was cleaned
4002 */
4003static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4004 struct e1000_rx_ring *rx_ring,
4005 int *work_done, int work_to_do)
4006{
4007 struct e1000_hw *hw = &adapter->hw;
4008 struct net_device *netdev = adapter->netdev;
4009 struct pci_dev *pdev = adapter->pdev;
4010 struct e1000_rx_desc *rx_desc, *next_rxd;
4011 struct e1000_buffer *buffer_info, *next_buffer;
4012 unsigned long irq_flags;
4013 u32 length;
4014 unsigned int i;
4015 int cleaned_count = 0;
4016 bool cleaned = false;
4017 unsigned int total_rx_bytes=0, total_rx_packets=0;
4018
4019 i = rx_ring->next_to_clean;
4020 rx_desc = E1000_RX_DESC(*rx_ring, i);
4021 buffer_info = &rx_ring->buffer_info[i];
4022
4023 while (rx_desc->status & E1000_RXD_STAT_DD) {
4024 struct sk_buff *skb;
4025 u8 status;
4026
4027 if (*work_done >= work_to_do)
4028 break;
4029 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004030 rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004031
4032 status = rx_desc->status;
4033 skb = buffer_info->skb;
4034 buffer_info->skb = NULL;
4035
4036 if (++i == rx_ring->count) i = 0;
4037 next_rxd = E1000_RX_DESC(*rx_ring, i);
4038 prefetch(next_rxd);
4039
4040 next_buffer = &rx_ring->buffer_info[i];
4041
4042 cleaned = true;
4043 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004044 dma_unmap_page(&pdev->dev, buffer_info->dma,
4045 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004046 buffer_info->dma = 0;
4047
4048 length = le16_to_cpu(rx_desc->length);
4049
4050 /* errors is only valid for DD + EOP descriptors */
4051 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4052 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4053 u8 last_byte = *(skb->data + length - 1);
4054 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4055 last_byte)) {
4056 spin_lock_irqsave(&adapter->stats_lock,
4057 irq_flags);
4058 e1000_tbi_adjust_stats(hw, &adapter->stats,
4059 length, skb->data);
4060 spin_unlock_irqrestore(&adapter->stats_lock,
4061 irq_flags);
4062 length--;
4063 } else {
4064 /* recycle both page and skb */
4065 buffer_info->skb = skb;
4066 /* an error means any chain goes out the window
4067 * too */
4068 if (rx_ring->rx_skb_top)
4069 dev_kfree_skb(rx_ring->rx_skb_top);
4070 rx_ring->rx_skb_top = NULL;
4071 goto next_desc;
4072 }
4073 }
4074
4075#define rxtop rx_ring->rx_skb_top
4076 if (!(status & E1000_RXD_STAT_EOP)) {
4077 /* this descriptor is only the beginning (or middle) */
4078 if (!rxtop) {
4079 /* this is the beginning of a chain */
4080 rxtop = skb;
4081 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4082 0, length);
4083 } else {
4084 /* this is the middle of a chain */
4085 skb_fill_page_desc(rxtop,
4086 skb_shinfo(rxtop)->nr_frags,
4087 buffer_info->page, 0, length);
4088 /* re-use the skb, only consumed the page */
4089 buffer_info->skb = skb;
4090 }
4091 e1000_consume_page(buffer_info, rxtop, length);
4092 goto next_desc;
4093 } else {
4094 if (rxtop) {
4095 /* end of the chain */
4096 skb_fill_page_desc(rxtop,
4097 skb_shinfo(rxtop)->nr_frags,
4098 buffer_info->page, 0, length);
4099 /* re-use the current skb, we only consumed the
4100 * page */
4101 buffer_info->skb = skb;
4102 skb = rxtop;
4103 rxtop = NULL;
4104 e1000_consume_page(buffer_info, skb, length);
4105 } else {
4106 /* no chain, got EOP, this buf is the packet
4107 * copybreak to save the put_page/alloc_page */
4108 if (length <= copybreak &&
4109 skb_tailroom(skb) >= length) {
4110 u8 *vaddr;
Cong Wang46790262011-11-25 23:14:23 +08004111 vaddr = kmap_atomic(buffer_info->page);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004112 memcpy(skb_tail_pointer(skb), vaddr, length);
Cong Wang46790262011-11-25 23:14:23 +08004113 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004114 /* re-use the page, so don't erase
4115 * buffer_info->page */
4116 skb_put(skb, length);
4117 } else {
4118 skb_fill_page_desc(skb, 0,
4119 buffer_info->page, 0,
4120 length);
4121 e1000_consume_page(buffer_info, skb,
4122 length);
4123 }
4124 }
4125 }
4126
4127 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4128 e1000_rx_checksum(adapter,
4129 (u32)(status) |
4130 ((u32)(rx_desc->errors) << 24),
4131 le16_to_cpu(rx_desc->csum), skb);
4132
Ben Greearb0d15622012-02-11 15:40:11 +00004133 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4134 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4135 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004136 total_rx_packets++;
4137
4138 /* eth type trans needs skb->data to point to something */
4139 if (!pskb_may_pull(skb, ETH_HLEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004140 e_err(drv, "pskb_may_pull failed.\n");
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004141 dev_kfree_skb(skb);
4142 goto next_desc;
4143 }
4144
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004145 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4146
4147next_desc:
4148 rx_desc->status = 0;
4149
4150 /* return some buffers to hardware, one at a time is too slow */
4151 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4152 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4153 cleaned_count = 0;
4154 }
4155
4156 /* use prefetched values */
4157 rx_desc = next_rxd;
4158 buffer_info = next_buffer;
4159 }
4160 rx_ring->next_to_clean = i;
4161
4162 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4163 if (cleaned_count)
4164 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4165
4166 adapter->total_rx_packets += total_rx_packets;
4167 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004168 netdev->stats.rx_bytes += total_rx_bytes;
4169 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004170 return cleaned;
4171}
4172
Joe Perches57bf6ee2010-05-13 15:26:17 +00004173/*
4174 * this should improve performance for small packets with large amounts
4175 * of reassembly being done in the stack
4176 */
4177static void e1000_check_copybreak(struct net_device *netdev,
4178 struct e1000_buffer *buffer_info,
4179 u32 length, struct sk_buff **skb)
4180{
4181 struct sk_buff *new_skb;
4182
4183 if (length > copybreak)
4184 return;
4185
4186 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4187 if (!new_skb)
4188 return;
4189
4190 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4191 (*skb)->data - NET_IP_ALIGN,
4192 length + NET_IP_ALIGN);
4193 /* save the skb in buffer_info as good */
4194 buffer_info->skb = *skb;
4195 *skb = new_skb;
4196}
4197
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004198/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004199 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004201 * @rx_ring: ring to clean
4202 * @work_done: amount of napi work completed this call
4203 * @work_to_do: max amount of work allowed for this call to do
4204 */
Joe Perches64798842008-07-11 15:17:02 -07004205static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4206 struct e1000_rx_ring *rx_ring,
4207 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208{
Joe Perches1dc32912008-07-11 15:17:08 -07004209 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004210 struct net_device *netdev = adapter->netdev;
4211 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004212 struct e1000_rx_desc *rx_desc, *next_rxd;
4213 struct e1000_buffer *buffer_info, *next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07004215 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004217 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004218 bool cleaned = false;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004219 unsigned int total_rx_bytes=0, total_rx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220
4221 i = rx_ring->next_to_clean;
4222 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004223 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004225 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004226 struct sk_buff *skb;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004227 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004228
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004229 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 break;
4231 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004232 rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004233
Jeff Kirshera292ca62006-01-12 16:51:30 -08004234 status = rx_desc->status;
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004235 skb = buffer_info->skb;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004236 buffer_info->skb = NULL;
4237
Jeff Kirsher30320be2006-03-02 18:21:57 -08004238 prefetch(skb->data - NET_IP_ALIGN);
4239
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004240 if (++i == rx_ring->count) i = 0;
4241 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004242 prefetch(next_rxd);
4243
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004244 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004245
Joe Perchesc3033b02008-03-21 11:06:25 -07004246 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004247 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004248 dma_unmap_single(&pdev->dev, buffer_info->dma,
4249 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004250 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252 length = le16_to_cpu(rx_desc->length);
Neil Hormanea30e112009-06-02 01:29:58 -07004253 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004254 * packet, if thats the case we need to toss it. In fact, we
4255 * to toss every packet with the EOP bit clear and the next
4256 * frame that _does_ have the EOP bit set, as it is by
4257 * definition only a frame fragment
4258 */
4259 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4260 adapter->discarding = true;
4261
4262 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004263 /* All receives must fit into a single buffer */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004264 e_dbg("Receive packet consumed multiple buffers\n");
Auke Kok864c4e42006-06-27 09:06:53 -07004265 /* recycle */
Auke Kok8fc897b2006-08-28 14:56:16 -07004266 buffer_info->skb = skb;
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004267 if (status & E1000_RXD_STAT_EOP)
4268 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269 goto next_desc;
4270 }
4271
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004272 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004273 u8 last_byte = *(skb->data + length - 1);
Joe Perches1dc32912008-07-11 15:17:08 -07004274 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4275 last_byte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004277 e1000_tbi_adjust_stats(hw, &adapter->stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 length, skb->data);
4279 spin_unlock_irqrestore(&adapter->stats_lock,
4280 flags);
4281 length--;
4282 } else {
Auke Kok9e2feac2006-04-14 19:05:18 -07004283 /* recycle */
4284 buffer_info->skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 goto next_desc;
4286 }
Auke Kok1cb58212006-04-18 12:31:04 -07004287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
Ben Greearb0d15622012-02-11 15:40:11 +00004289 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004290 total_rx_packets++;
4291
Ben Greearb0d15622012-02-11 15:40:11 +00004292 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4293 /* adjust length to remove Ethernet CRC, this must be
4294 * done after the TBI_ACCEPT workaround above
4295 */
4296 length -= 4;
4297
Joe Perches57bf6ee2010-05-13 15:26:17 +00004298 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4299
Auke Kok996695d2006-11-01 08:47:50 -08004300 skb_put(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301
4302 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004303 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004304 (u32)(status) |
4305 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004306 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004307
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004308 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004309
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310next_desc:
4311 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004313 /* return some buffers to hardware, one at a time is too slow */
4314 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4315 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4316 cleaned_count = 0;
4317 }
4318
Jeff Kirsher30320be2006-03-02 18:21:57 -08004319 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004320 rx_desc = next_rxd;
4321 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004324
4325 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4326 if (cleaned_count)
4327 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004329 adapter->total_rx_packets += total_rx_packets;
4330 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004331 netdev->stats.rx_bytes += total_rx_bytes;
4332 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 return cleaned;
4334}
4335
4336/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004337 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4338 * @adapter: address of board private structure
4339 * @rx_ring: pointer to receive ring structure
4340 * @cleaned_count: number of buffers to allocate this pass
4341 **/
4342
4343static void
4344e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4345 struct e1000_rx_ring *rx_ring, int cleaned_count)
4346{
4347 struct net_device *netdev = adapter->netdev;
4348 struct pci_dev *pdev = adapter->pdev;
4349 struct e1000_rx_desc *rx_desc;
4350 struct e1000_buffer *buffer_info;
4351 struct sk_buff *skb;
4352 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004353 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004354
4355 i = rx_ring->next_to_use;
4356 buffer_info = &rx_ring->buffer_info[i];
4357
4358 while (cleaned_count--) {
4359 skb = buffer_info->skb;
4360 if (skb) {
4361 skb_trim(skb, 0);
4362 goto check_page;
4363 }
4364
Eric Dumazet89d71a62009-10-13 05:34:20 +00004365 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004366 if (unlikely(!skb)) {
4367 /* Better luck next round */
4368 adapter->alloc_rx_buff_failed++;
4369 break;
4370 }
4371
4372 /* Fix for errata 23, can't cross 64kB boundary */
4373 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4374 struct sk_buff *oldskb = skb;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004375 e_err(rx_err, "skb align check failed: %u bytes at "
4376 "%p\n", bufsz, skb->data);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004377 /* Try again, without freeing the previous */
Eric Dumazet89d71a62009-10-13 05:34:20 +00004378 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004379 /* Failed allocation, critical failure */
4380 if (!skb) {
4381 dev_kfree_skb(oldskb);
4382 adapter->alloc_rx_buff_failed++;
4383 break;
4384 }
4385
4386 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4387 /* give up */
4388 dev_kfree_skb(skb);
4389 dev_kfree_skb(oldskb);
4390 break; /* while (cleaned_count--) */
4391 }
4392
4393 /* Use new allocation */
4394 dev_kfree_skb(oldskb);
4395 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004396 buffer_info->skb = skb;
4397 buffer_info->length = adapter->rx_buffer_len;
4398check_page:
4399 /* allocate a new page if necessary */
4400 if (!buffer_info->page) {
4401 buffer_info->page = alloc_page(GFP_ATOMIC);
4402 if (unlikely(!buffer_info->page)) {
4403 adapter->alloc_rx_buff_failed++;
4404 break;
4405 }
4406 }
4407
Anton Blanchardb5abb022010-02-19 17:54:53 +00004408 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004409 buffer_info->dma = dma_map_page(&pdev->dev,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004410 buffer_info->page, 0,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004411 buffer_info->length,
4412 DMA_FROM_DEVICE);
4413 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004414 put_page(buffer_info->page);
4415 dev_kfree_skb(skb);
4416 buffer_info->page = NULL;
4417 buffer_info->skb = NULL;
4418 buffer_info->dma = 0;
4419 adapter->alloc_rx_buff_failed++;
4420 break; /* while !buffer_info->skb */
4421 }
4422 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004423
4424 rx_desc = E1000_RX_DESC(*rx_ring, i);
4425 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4426
4427 if (unlikely(++i == rx_ring->count))
4428 i = 0;
4429 buffer_info = &rx_ring->buffer_info[i];
4430 }
4431
4432 if (likely(rx_ring->next_to_use != i)) {
4433 rx_ring->next_to_use = i;
4434 if (unlikely(i-- == 0))
4435 i = (rx_ring->count - 1);
4436
4437 /* Force memory writes to complete before letting h/w
4438 * know there are new descriptors to fetch. (Only
4439 * applicable for weak-ordered memory model archs,
4440 * such as IA-64). */
4441 wmb();
4442 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4443 }
4444}
4445
4446/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004447 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 * @adapter: address of board private structure
4449 **/
4450
Joe Perches64798842008-07-11 15:17:02 -07004451static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4452 struct e1000_rx_ring *rx_ring,
4453 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454{
Joe Perches1dc32912008-07-11 15:17:08 -07004455 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 struct net_device *netdev = adapter->netdev;
4457 struct pci_dev *pdev = adapter->pdev;
4458 struct e1000_rx_desc *rx_desc;
4459 struct e1000_buffer *buffer_info;
4460 struct sk_buff *skb;
Malli Chilakala26483452005-04-28 19:44:46 -07004461 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004462 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
4464 i = rx_ring->next_to_use;
4465 buffer_info = &rx_ring->buffer_info[i];
4466
Jeff Kirshera292ca62006-01-12 16:51:30 -08004467 while (cleaned_count--) {
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004468 skb = buffer_info->skb;
4469 if (skb) {
Jeff Kirshera292ca62006-01-12 16:51:30 -08004470 skb_trim(skb, 0);
4471 goto map_skb;
4472 }
4473
Eric Dumazet89d71a62009-10-13 05:34:20 +00004474 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004475 if (unlikely(!skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004477 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 break;
4479 }
4480
Malli Chilakala26483452005-04-28 19:44:46 -07004481 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4483 struct sk_buff *oldskb = skb;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004484 e_err(rx_err, "skb align check failed: %u bytes at "
4485 "%p\n", bufsz, skb->data);
Malli Chilakala26483452005-04-28 19:44:46 -07004486 /* Try again, without freeing the previous */
Eric Dumazet89d71a62009-10-13 05:34:20 +00004487 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Malli Chilakala26483452005-04-28 19:44:46 -07004488 /* Failed allocation, critical failure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 if (!skb) {
4490 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004491 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492 break;
4493 }
Malli Chilakala26483452005-04-28 19:44:46 -07004494
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4496 /* give up */
4497 dev_kfree_skb(skb);
4498 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004499 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500 break; /* while !buffer_info->skb */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004502
4503 /* Use new allocation */
4504 dev_kfree_skb(oldskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 buffer_info->skb = skb;
4507 buffer_info->length = adapter->rx_buffer_len;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004508map_skb:
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004509 buffer_info->dma = dma_map_single(&pdev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 skb->data,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004511 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004512 DMA_FROM_DEVICE);
4513 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004514 dev_kfree_skb(skb);
4515 buffer_info->skb = NULL;
4516 buffer_info->dma = 0;
4517 adapter->alloc_rx_buff_failed++;
4518 break; /* while !buffer_info->skb */
4519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004521 /*
4522 * XXX if it was allocated cleanly it will never map to a
4523 * boundary crossing
4524 */
4525
Malli Chilakala26483452005-04-28 19:44:46 -07004526 /* Fix for errata 23, can't cross 64kB boundary */
4527 if (!e1000_check_64k_bound(adapter,
4528 (void *)(unsigned long)buffer_info->dma,
4529 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004530 e_err(rx_err, "dma align check failed: %u bytes at "
4531 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004532 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533 dev_kfree_skb(skb);
4534 buffer_info->skb = NULL;
4535
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004536 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004538 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004539 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004541 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 break; /* while !buffer_info->skb */
4543 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004544 rx_desc = E1000_RX_DESC(*rx_ring, i);
4545 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4546
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004547 if (unlikely(++i == rx_ring->count))
4548 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 buffer_info = &rx_ring->buffer_info[i];
4550 }
4551
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004552 if (likely(rx_ring->next_to_use != i)) {
4553 rx_ring->next_to_use = i;
4554 if (unlikely(i-- == 0))
4555 i = (rx_ring->count - 1);
4556
4557 /* Force memory writes to complete before letting h/w
4558 * know there are new descriptors to fetch. (Only
4559 * applicable for weak-ordered memory model archs,
4560 * such as IA-64). */
4561 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004562 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564}
4565
4566/**
4567 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4568 * @adapter:
4569 **/
4570
Joe Perches64798842008-07-11 15:17:02 -07004571static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572{
Joe Perches1dc32912008-07-11 15:17:08 -07004573 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004574 u16 phy_status;
4575 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004576
Joe Perches1dc32912008-07-11 15:17:08 -07004577 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4578 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579 return;
4580
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004581 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 /* If Master/Slave config fault is asserted twice,
4583 * we assume back-to-back */
Joe Perches1dc32912008-07-11 15:17:08 -07004584 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004585 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004586 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004587 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004588 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004589 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004591 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592 phy_ctrl);
4593 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004594 if (!e1000_phy_setup_autoneg(hw) &&
4595 !e1000_read_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 &phy_ctrl)) {
4597 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4598 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004599 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004600 phy_ctrl);
4601 }
4602 }
4603 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004604 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004606 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004608 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4609 if (!e1000_phy_setup_autoneg(hw) &&
4610 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4612 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004613 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614 }
4615 }
4616 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004617 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 adapter->smartspeed = 0;
4619}
4620
4621/**
4622 * e1000_ioctl -
4623 * @netdev:
4624 * @ifreq:
4625 * @cmd:
4626 **/
4627
Joe Perches64798842008-07-11 15:17:02 -07004628static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629{
4630 switch (cmd) {
4631 case SIOCGMIIPHY:
4632 case SIOCGMIIREG:
4633 case SIOCSMIIREG:
4634 return e1000_mii_ioctl(netdev, ifr, cmd);
4635 default:
4636 return -EOPNOTSUPP;
4637 }
4638}
4639
4640/**
4641 * e1000_mii_ioctl -
4642 * @netdev:
4643 * @ifreq:
4644 * @cmd:
4645 **/
4646
Joe Perches64798842008-07-11 15:17:02 -07004647static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4648 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004650 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004651 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 struct mii_ioctl_data *data = if_mii(ifr);
4653 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004654 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004655 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656
Joe Perches1dc32912008-07-11 15:17:08 -07004657 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 return -EOPNOTSUPP;
4659
4660 switch (cmd) {
4661 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004662 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 break;
4664 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004665 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004666 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004667 &data->val_out)) {
4668 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004670 }
4671 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 break;
4673 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004674 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675 return -EFAULT;
4676 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004677 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004678 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004679 mii_reg)) {
4680 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004682 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004683 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004684 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 switch (data->reg_num) {
4686 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004687 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004689 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004690 hw->autoneg = 1;
4691 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004693 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004695 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004697 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 else
David Decotigny14ad2512011-04-27 18:32:43 +00004699 speed = SPEED_10;
4700 retval = e1000_set_spd_dplx(
4701 adapter, speed,
4702 ((mii_reg & 0x100)
4703 ? DUPLEX_FULL :
4704 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004705 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706 return retval;
4707 }
Auke Kok2db10a02006-06-27 09:06:28 -07004708 if (netif_running(adapter->netdev))
4709 e1000_reinit_locked(adapter);
4710 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 e1000_reset(adapter);
4712 break;
4713 case M88E1000_PHY_SPEC_CTRL:
4714 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004715 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 return -EIO;
4717 break;
4718 }
4719 } else {
4720 switch (data->reg_num) {
4721 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004722 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004724 if (netif_running(adapter->netdev))
4725 e1000_reinit_locked(adapter);
4726 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727 e1000_reset(adapter);
4728 break;
4729 }
4730 }
4731 break;
4732 default:
4733 return -EOPNOTSUPP;
4734 }
4735 return E1000_SUCCESS;
4736}
4737
Joe Perches64798842008-07-11 15:17:02 -07004738void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739{
4740 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004741 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004743 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004744 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745}
4746
Joe Perches64798842008-07-11 15:17:02 -07004747void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748{
4749 struct e1000_adapter *adapter = hw->back;
4750
4751 pci_clear_mwi(adapter->pdev);
4752}
4753
Joe Perches64798842008-07-11 15:17:02 -07004754int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004755{
4756 struct e1000_adapter *adapter = hw->back;
4757 return pcix_get_mmrbc(adapter->pdev);
4758}
4759
Joe Perches64798842008-07-11 15:17:02 -07004760void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004761{
4762 struct e1000_adapter *adapter = hw->back;
4763 pcix_set_mmrbc(adapter->pdev, mmrbc);
4764}
4765
Joe Perches64798842008-07-11 15:17:02 -07004766void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767{
4768 outl(value, port);
4769}
4770
Jiri Pirko5622e402011-07-21 03:26:31 +00004771static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004772{
Jiri Pirko5622e402011-07-21 03:26:31 +00004773 u16 vid;
4774
4775 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4776 return true;
4777 return false;
4778}
4779
Jiri Pirko52f55092012-03-20 18:10:01 +00004780static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4781 netdev_features_t features)
4782{
4783 struct e1000_hw *hw = &adapter->hw;
4784 u32 ctrl;
4785
4786 ctrl = er32(CTRL);
4787 if (features & NETIF_F_HW_VLAN_RX) {
4788 /* enable VLAN tag insert/strip */
4789 ctrl |= E1000_CTRL_VME;
4790 } else {
4791 /* disable VLAN tag insert/strip */
4792 ctrl &= ~E1000_CTRL_VME;
4793 }
4794 ew32(CTRL, ctrl);
4795}
Jiri Pirko5622e402011-07-21 03:26:31 +00004796static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4797 bool filter_on)
4798{
Joe Perches1dc32912008-07-11 15:17:08 -07004799 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004800 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004802 if (!test_bit(__E1000_DOWN, &adapter->flags))
4803 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804
Jiri Pirko52f55092012-03-20 18:10:01 +00004805 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004806 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004807 /* enable VLAN receive filtering */
4808 rctl = er32(RCTL);
4809 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004810 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004811 rctl |= E1000_RCTL_VFE;
4812 ew32(RCTL, rctl);
4813 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004815 /* disable VLAN receive filtering */
4816 rctl = er32(RCTL);
4817 rctl &= ~E1000_RCTL_VFE;
4818 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004819 }
4820
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004821 if (!test_bit(__E1000_DOWN, &adapter->flags))
4822 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823}
4824
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004825static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004826 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004827{
4828 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004829
4830 if (!test_bit(__E1000_DOWN, &adapter->flags))
4831 e1000_irq_disable(adapter);
4832
Jiri Pirko52f55092012-03-20 18:10:01 +00004833 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004834
4835 if (!test_bit(__E1000_DOWN, &adapter->flags))
4836 e1000_irq_enable(adapter);
4837}
4838
Jiri Pirko8e586132011-12-08 19:52:37 -05004839static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004841 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004842 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004843 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004844
Joe Perches1dc32912008-07-11 15:17:08 -07004845 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004846 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4847 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004848 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004849
4850 if (!e1000_vlan_used(adapter))
4851 e1000_vlan_filter_on_off(adapter, true);
4852
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 /* add VID to filter table */
4854 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004855 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004857 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004858
4859 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05004860
4861 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862}
4863
Jiri Pirko8e586132011-12-08 19:52:37 -05004864static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004866 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004867 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004868 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004869
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004870 if (!test_bit(__E1000_DOWN, &adapter->flags))
4871 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004872 if (!test_bit(__E1000_DOWN, &adapter->flags))
4873 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874
4875 /* remove VID from filter table */
4876 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004877 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004879 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004880
4881 clear_bit(vid, adapter->active_vlans);
4882
4883 if (!e1000_vlan_used(adapter))
4884 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05004885
4886 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887}
4888
Joe Perches64798842008-07-11 15:17:02 -07004889static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890{
Jiri Pirko5622e402011-07-21 03:26:31 +00004891 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004892
Jiri Pirko5622e402011-07-21 03:26:31 +00004893 if (!e1000_vlan_used(adapter))
4894 return;
4895
4896 e1000_vlan_filter_on_off(adapter, true);
4897 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4898 e1000_vlan_rx_add_vid(adapter->netdev, vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004899}
4900
David Decotigny14ad2512011-04-27 18:32:43 +00004901int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902{
Joe Perches1dc32912008-07-11 15:17:08 -07004903 struct e1000_hw *hw = &adapter->hw;
4904
4905 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906
David Decotigny14ad2512011-04-27 18:32:43 +00004907 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4908 * for the switch() below to work */
4909 if ((spd & 1) || (dplx & ~1))
4910 goto err_inval;
4911
Malli Chilakala69213682005-06-17 17:44:20 -07004912 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07004913 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00004914 spd != SPEED_1000 &&
4915 dplx != DUPLEX_FULL)
4916 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07004917
David Decotigny14ad2512011-04-27 18:32:43 +00004918 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004920 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004921 break;
4922 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004923 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 break;
4925 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004926 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 break;
4928 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004929 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930 break;
4931 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004932 hw->autoneg = 1;
4933 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004934 break;
4935 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4936 default:
David Decotigny14ad2512011-04-27 18:32:43 +00004937 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004938 }
4939 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00004940
4941err_inval:
4942 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4943 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004944}
4945
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00004946static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947{
4948 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07004949 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004950 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004951 u32 ctrl, ctrl_ext, rctl, status;
4952 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07004953#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08004954 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07004955#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956
4957 netif_device_detach(netdev);
4958
Auke Kok2db10a02006-06-27 09:06:28 -07004959 if (netif_running(netdev)) {
4960 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07004962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004964#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08004965 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00004966 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004967 return retval;
4968#endif
4969
Joe Perches1dc32912008-07-11 15:17:08 -07004970 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004971 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 wufc &= ~E1000_WUFC_LNKC;
4973
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004974 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08004976 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977
Dean Nelsonb8681792012-01-19 17:47:24 +00004978 rctl = er32(RCTL);
4979
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00004981 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00004983
4984 /* enable receives in the hardware */
4985 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986
Joe Perches1dc32912008-07-11 15:17:08 -07004987 if (hw->mac_type >= e1000_82540) {
4988 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 /* advertise wake from D3Cold */
4990 #define E1000_CTRL_ADVD3WUC 0x00100000
4991 /* phy power management enable */
4992 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4993 ctrl |= E1000_CTRL_ADVD3WUC |
4994 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07004995 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996 }
4997
Joe Perches1dc32912008-07-11 15:17:08 -07004998 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004999 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07005001 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07005003 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004 }
5005
Joe Perches1dc32912008-07-11 15:17:08 -07005006 ew32(WUC, E1000_WUC_PME_EN);
5007 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07005009 ew32(WUC, 0);
5010 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 }
5012
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005013 e1000_release_manageability(adapter);
5014
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005015 *enable_wake = !!wufc;
5016
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005017 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005018 if (adapter->en_mng_pt)
5019 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005020
Auke Kokedd106f2006-11-06 08:57:12 -08005021 if (netif_running(netdev))
5022 e1000_free_irq(adapter);
5023
Linus Torvalds1da177e2005-04-16 15:20:36 -07005024 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005025
Linus Torvalds1da177e2005-04-16 15:20:36 -07005026 return 0;
5027}
5028
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005029#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005030static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5031{
5032 int retval;
5033 bool wake;
5034
5035 retval = __e1000_shutdown(pdev, &wake);
5036 if (retval)
5037 return retval;
5038
5039 if (wake) {
5040 pci_prepare_to_sleep(pdev);
5041 } else {
5042 pci_wake_from_d3(pdev, false);
5043 pci_set_power_state(pdev, PCI_D3hot);
5044 }
5045
5046 return 0;
5047}
5048
Joe Perches64798842008-07-11 15:17:02 -07005049static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050{
5051 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005052 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005053 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005054 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055
Auke Kokd0e027d2006-04-14 19:04:40 -07005056 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005057 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005058 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005059
5060 if (adapter->need_ioport)
5061 err = pci_enable_device(pdev);
5062 else
5063 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005064 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005065 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005066 return err;
5067 }
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005068 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069
Auke Kokd0e027d2006-04-14 19:04:40 -07005070 pci_enable_wake(pdev, PCI_D3hot, 0);
5071 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072
Joe Perchesc7be73b2008-07-11 15:17:28 -07005073 if (netif_running(netdev)) {
5074 err = e1000_request_irq(adapter);
5075 if (err)
5076 return err;
5077 }
Auke Kokedd106f2006-11-06 08:57:12 -08005078
5079 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005081 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005083 e1000_init_manageability(adapter);
5084
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005085 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 e1000_up(adapter);
5087
5088 netif_device_attach(netdev);
5089
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 return 0;
5091}
5092#endif
Auke Kokc653e632006-05-23 13:35:57 -07005093
5094static void e1000_shutdown(struct pci_dev *pdev)
5095{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005096 bool wake;
5097
5098 __e1000_shutdown(pdev, &wake);
5099
5100 if (system_state == SYSTEM_POWER_OFF) {
5101 pci_wake_from_d3(pdev, wake);
5102 pci_set_power_state(pdev, PCI_D3hot);
5103 }
Auke Kokc653e632006-05-23 13:35:57 -07005104}
5105
Linus Torvalds1da177e2005-04-16 15:20:36 -07005106#ifdef CONFIG_NET_POLL_CONTROLLER
5107/*
5108 * Polling 'interrupt' - used by things like netconsole to send skbs
5109 * without having to re-enable interrupts. It's not called while
5110 * the interrupt routine is executing.
5111 */
Joe Perches64798842008-07-11 15:17:02 -07005112static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005114 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005115
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 disable_irq(adapter->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005117 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118 enable_irq(adapter->pdev->irq);
5119}
5120#endif
5121
Auke Kok90267292006-06-08 09:30:24 -07005122/**
5123 * e1000_io_error_detected - called when PCI error is detected
5124 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005125 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005126 *
5127 * This function is called after a PCI bus error affecting
5128 * this device has been detected.
5129 */
Joe Perches64798842008-07-11 15:17:02 -07005130static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5131 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005132{
5133 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005134 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005135
5136 netif_device_detach(netdev);
5137
Andre Detscheab63302009-06-30 12:46:13 +00005138 if (state == pci_channel_io_perm_failure)
5139 return PCI_ERS_RESULT_DISCONNECT;
5140
Auke Kok90267292006-06-08 09:30:24 -07005141 if (netif_running(netdev))
5142 e1000_down(adapter);
Linas Vepstas72e8d6b2006-09-18 20:58:06 -07005143 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005144
5145 /* Request a slot slot reset. */
5146 return PCI_ERS_RESULT_NEED_RESET;
5147}
5148
5149/**
5150 * e1000_io_slot_reset - called after the pci bus has been reset.
5151 * @pdev: Pointer to PCI device
5152 *
5153 * Restart the card from scratch, as if from a cold-boot. Implementation
5154 * resembles the first-half of the e1000_resume routine.
5155 */
5156static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5157{
5158 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005159 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005160 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005161 int err;
Auke Kok90267292006-06-08 09:30:24 -07005162
Taku Izumi81250292008-07-11 15:17:44 -07005163 if (adapter->need_ioport)
5164 err = pci_enable_device(pdev);
5165 else
5166 err = pci_enable_device_mem(pdev);
5167 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005168 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005169 return PCI_ERS_RESULT_DISCONNECT;
5170 }
5171 pci_set_master(pdev);
5172
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005173 pci_enable_wake(pdev, PCI_D3hot, 0);
5174 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005175
Auke Kok90267292006-06-08 09:30:24 -07005176 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005177 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005178
5179 return PCI_ERS_RESULT_RECOVERED;
5180}
5181
5182/**
5183 * e1000_io_resume - called when traffic can start flowing again.
5184 * @pdev: Pointer to PCI device
5185 *
5186 * This callback is called when the error recovery driver tells us that
5187 * its OK to resume normal operation. Implementation resembles the
5188 * second-half of the e1000_resume routine.
5189 */
5190static void e1000_io_resume(struct pci_dev *pdev)
5191{
5192 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005193 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005194
5195 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005196
5197 if (netif_running(netdev)) {
5198 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005199 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005200 return;
5201 }
5202 }
5203
5204 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005205}
5206
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207/* e1000_main.c */