blob: 619b0cb60f2f3fa8b3ffc615d15c1996ae83f087 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000049static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Nicholas Nunley35574762006-09-27 12:53:34 -0700108 struct e1000_rx_ring *rx_ring);
109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500114static void e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
117static int e1000_open(struct net_device *netdev);
118static int e1000_close(struct net_device *netdev);
119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100137static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700140static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400147static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000148 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800149 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000150static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
152 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000159static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700161static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
Jiri Pirko5622e402011-07-21 03:26:31 +0000164static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000165static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000167static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168 bool filter_on);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000169static int e1000_vlan_rx_add_vid(struct net_device *netdev,
170 __be16 proto, u16 vid);
171static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
172 __be16 proto, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173static void e1000_restore_vlan(struct e1000_adapter *adapter);
174
Auke Kok6fdfef12006-06-27 09:06:36 -0700175#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000176static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static int e1000_resume(struct pci_dev *pdev);
178#endif
Auke Kokc653e632006-05-23 13:35:57 -0700179static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181#ifdef CONFIG_NET_POLL_CONTROLLER
182/* for netdump / net console */
183static void e1000_netpoll (struct net_device *netdev);
184#endif
185
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100186#define COPYBREAK_DEFAULT 256
187static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
188module_param(copybreak, uint, 0644);
189MODULE_PARM_DESC(copybreak,
190 "Maximum size of packet that is copied to a new buffer on receive");
191
Auke Kok90267292006-06-08 09:30:24 -0700192static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
193 pci_channel_state_t state);
194static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
195static void e1000_io_resume(struct pci_dev *pdev);
196
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700197static const struct pci_error_handlers e1000_err_handler = {
Auke Kok90267292006-06-08 09:30:24 -0700198 .error_detected = e1000_io_error_detected,
199 .slot_reset = e1000_io_slot_reset,
200 .resume = e1000_io_resume,
201};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203static struct pci_driver e1000_driver = {
204 .name = e1000_driver_name,
205 .id_table = e1000_pci_tbl,
206 .probe = e1000_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500207 .remove = e1000_remove,
Auke Kokc4e24f02006-09-27 12:53:19 -0700208#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300209 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700211 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212#endif
Auke Kok90267292006-06-08 09:30:24 -0700213 .shutdown = e1000_shutdown,
214 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215};
216
217MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
218MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
219MODULE_LICENSE("GPL");
220MODULE_VERSION(DRV_VERSION);
221
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000222#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
223static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224module_param(debug, int, 0);
225MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226
227/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000228 * e1000_get_hw_dev - return device
229 * used by hardware layer to print debugging information
230 *
231 **/
232struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
233{
234 struct e1000_adapter *adapter = hw->back;
235 return adapter->netdev;
236}
237
238/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 * e1000_init_module - Driver Registration Routine
240 *
241 * e1000_init_module is the first routine called when the driver is
242 * loaded. All it does is register with the PCI subsystem.
243 **/
Joe Perches64798842008-07-11 15:17:02 -0700244static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
246 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000247 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
Emil Tantilov675ad472010-04-27 14:02:58 +0000249 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Jeff Garzik29917622006-08-19 17:48:59 -0400251 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100252 if (copybreak != COPYBREAK_DEFAULT) {
253 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000254 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100255 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000256 pr_info("copybreak enabled for "
257 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 return ret;
260}
261
262module_init(e1000_init_module);
263
264/**
265 * e1000_exit_module - Driver Exit Cleanup Routine
266 *
267 * e1000_exit_module is called just before the driver is removed
268 * from memory.
269 **/
Joe Perches64798842008-07-11 15:17:02 -0700270static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 pci_unregister_driver(&e1000_driver);
273}
274
275module_exit(e1000_exit_module);
276
Auke Kok2db10a02006-06-27 09:06:28 -0700277static int e1000_request_irq(struct e1000_adapter *adapter)
278{
279 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000280 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700281 int irq_flags = IRQF_SHARED;
282 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700283
Auke Koke94bd232007-05-16 01:49:46 -0700284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285 netdev);
286 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700288 }
Auke Kok2db10a02006-06-27 09:06:28 -0700289
290 return err;
291}
292
293static void e1000_free_irq(struct e1000_adapter *adapter)
294{
295 struct net_device *netdev = adapter->netdev;
296
297 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700298}
299
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300/**
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
303 **/
Joe Perches64798842008-07-11 15:17:02 -0700304static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
Joe Perches1dc32912008-07-11 15:17:08 -0700306 struct e1000_hw *hw = &adapter->hw;
307
308 ew32(IMC, ~0);
309 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 synchronize_irq(adapter->pdev->irq);
311}
312
313/**
314 * e1000_irq_enable - Enable default interrupt generation settings
315 * @adapter: board private structure
316 **/
Joe Perches64798842008-07-11 15:17:02 -0700317static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318{
Joe Perches1dc32912008-07-11 15:17:08 -0700319 struct e1000_hw *hw = &adapter->hw;
320
321 ew32(IMS, IMS_ENABLE_MASK);
322 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100324
Joe Perches64798842008-07-11 15:17:02 -0700325static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700326{
Joe Perches1dc32912008-07-11 15:17:08 -0700327 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700328 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700329 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700330 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800331
Jiri Pirko5622e402011-07-21 03:26:31 +0000332 if (!e1000_vlan_used(adapter))
333 return;
334
335 if (!test_bit(vid, adapter->active_vlans)) {
336 if (hw->mng_cookie.status &
337 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
Patrick McHardy80d5c362013-04-19 02:04:28 +0000338 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800339 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000340 } else {
341 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
342 }
343 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
344 (vid != old_vid) &&
345 !test_bit(old_vid, adapter->active_vlans))
Patrick McHardy80d5c362013-04-19 02:04:28 +0000346 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
347 old_vid);
Jiri Pirko5622e402011-07-21 03:26:31 +0000348 } else {
349 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700350 }
351}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800352
Joe Perches64798842008-07-11 15:17:02 -0700353static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500354{
Joe Perches1dc32912008-07-11 15:17:08 -0700355 struct e1000_hw *hw = &adapter->hw;
356
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500357 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700358 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500359
360 /* disable hardware interception of ARP */
361 manc &= ~(E1000_MANC_ARP_EN);
362
Joe Perches1dc32912008-07-11 15:17:08 -0700363 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500364 }
365}
366
Joe Perches64798842008-07-11 15:17:02 -0700367static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500368{
Joe Perches1dc32912008-07-11 15:17:08 -0700369 struct e1000_hw *hw = &adapter->hw;
370
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500371 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700372 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500373
374 /* re-enable hardware interception of ARP */
375 manc |= E1000_MANC_ARP_EN;
376
Joe Perches1dc32912008-07-11 15:17:08 -0700377 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500378 }
379}
380
Auke Koke0aac5a2007-03-06 08:57:21 -0800381/**
382 * e1000_configure - configure the hardware for RX and TX
383 * @adapter = private board structure
384 **/
385static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
387 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700388 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Patrick McHardydb0ce502007-11-13 20:54:59 -0800390 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
392 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500393 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
395 e1000_configure_tx(adapter);
396 e1000_setup_rctl(adapter);
397 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800398 /* call E1000_DESC_UNUSED which always leaves
399 * at least 1 descriptor unused to make sure
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000400 * next_to_use != next_to_clean
401 */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800402 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800404 adapter->alloc_rx_buf(adapter, ring,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000405 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800406 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800407}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800408
Auke Koke0aac5a2007-03-06 08:57:21 -0800409int e1000_up(struct e1000_adapter *adapter)
410{
Joe Perches1dc32912008-07-11 15:17:08 -0700411 struct e1000_hw *hw = &adapter->hw;
412
Auke Koke0aac5a2007-03-06 08:57:21 -0800413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700415
Auke Kok1314bbf2006-09-27 12:54:02 -0700416 clear_bit(__E1000_DOWN, &adapter->flags);
417
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700418 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700419
Auke Koke0aac5a2007-03-06 08:57:21 -0800420 e1000_irq_enable(adapter);
421
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000422 netif_wake_queue(adapter->netdev);
423
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100424 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700425 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 return 0;
427}
428
Auke Kok79f05bf2006-06-27 09:06:32 -0700429/**
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
432 *
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
Auke Kok79f05bf2006-06-27 09:06:32 -0700436 **/
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700437void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700438{
Joe Perches1dc32912008-07-11 15:17:08 -0700439 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700440 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700441
442 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700443 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700444 /* according to the manual, the phy will retain its
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000445 * settings across a power-down/up cycle
446 */
Joe Perches1dc32912008-07-11 15:17:08 -0700447 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700448 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700449 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700450 }
451}
452
453static void e1000_power_down_phy(struct e1000_adapter *adapter)
454{
Joe Perches1dc32912008-07-11 15:17:08 -0700455 struct e1000_hw *hw = &adapter->hw;
456
Bruce Allan61c25052006-09-27 12:53:54 -0700457 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700458 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700459 * (a) WoL is enabled
460 * (b) AMT is active
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000461 * (c) SoL/IDER session is active
462 */
Joe Perches1dc32912008-07-11 15:17:08 -0700463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700465 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700466
Joe Perches1dc32912008-07-11 15:17:08 -0700467 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700468 case e1000_82540:
469 case e1000_82545:
470 case e1000_82545_rev_3:
471 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000472 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700473 case e1000_82546_rev_3:
474 case e1000_82541:
475 case e1000_82541_rev_2:
476 case e1000_82547:
477 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700479 goto out;
480 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700481 default:
482 goto out;
483 }
Joe Perches1dc32912008-07-11 15:17:08 -0700484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700485 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000487 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700488 }
Bruce Allan61c25052006-09-27 12:53:54 -0700489out:
490 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700491}
492
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000493static void e1000_down_and_stop(struct e1000_adapter *adapter)
494{
495 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000496
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
500
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
504}
505
Joe Perches64798842008-07-11 15:17:02 -0700506void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000508 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000510 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Auke Kok1314bbf2006-09-27 12:54:02 -0700512
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000513 /* disable receives in the hardware */
514 rctl = er32(RCTL);
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
517
Jesse Brandeburg51851072009-09-25 12:17:01 +0000518 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000519
520 /* disable transmits in the hardware */
521 tctl = er32(TCTL);
522 tctl &= ~E1000_TCTL_EN;
523 ew32(TCTL, tctl);
524 /* flush both disables and wait for them to finish */
525 E1000_WRITE_FLUSH();
526 msleep(10);
527
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700528 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700529
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800531
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000532 /* Setting DOWN must be after irq_disable to prevent
Anupam Chandaab088532010-11-21 09:54:21 -0800533 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000534 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800535 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000536 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 adapter->link_speed = 0;
539 adapter->link_duplex = 0;
540 netif_carrier_off(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400543 e1000_clean_all_tx_rings(adapter);
544 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Joe Perches64798842008-07-11 15:17:02 -0700547void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700548{
549 WARN_ON(in_interrupt());
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551 msleep(1);
552 e1000_down(adapter);
553 e1000_up(adapter);
554 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555}
556
Joe Perches64798842008-07-11 15:17:02 -0700557void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558{
Joe Perches1dc32912008-07-11 15:17:08 -0700559 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700560 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700561 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000562 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
564 /* Repartition Pba for greater than 9k mtu
565 * To take effect CTRL.RST is required.
566 */
567
Joe Perches1dc32912008-07-11 15:17:08 -0700568 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100569 case e1000_82542_rev2_0:
570 case e1000_82542_rev2_1:
571 case e1000_82543:
572 case e1000_82544:
573 case e1000_82540:
574 case e1000_82541:
575 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700576 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100577 pba = E1000_PBA_48K;
578 break;
579 case e1000_82545:
580 case e1000_82545_rev_3:
581 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000582 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100583 case e1000_82546_rev_3:
584 pba = E1000_PBA_48K;
585 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700586 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700587 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700588 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700589 pba = E1000_PBA_30K;
590 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100591 case e1000_undefined:
592 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700593 break;
594 }
595
Joe Perchesc3033b02008-03-21 11:06:25 -0700596 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000597 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100598 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700599
Joe Perches1dc32912008-07-11 15:17:08 -0700600 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100601 adapter->tx_fifo_head = 0;
602 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
603 adapter->tx_fifo_size =
604 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
605 atomic_set(&adapter->tx_fifo_stall, 0);
606 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000607 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100608 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700609 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700610
Bruce Allan018ea442006-12-15 10:39:45 +0100611 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000612 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100613 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000614 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100615 * one full receive packet and is similarly rounded up and
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000616 * expressed in KB.
617 */
Joe Perches1dc32912008-07-11 15:17:08 -0700618 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100619 /* upper 16 bits has Tx packet buffer allocation size in KB */
620 tx_space = pba >> 16;
621 /* lower 16 bits has Rx packet buffer allocation size in KB */
622 pba &= 0xffff;
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000623 /* the Tx fifo also stores 16 bytes of information about the Tx
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000624 * but don't include ethernet FCS because hardware appends it
625 */
626 min_tx_space = (hw->max_frame_size +
627 sizeof(struct e1000_tx_desc) -
628 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700629 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100630 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000631 /* software strips receive CRC, so leave room for it */
632 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700633 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100634 min_rx_space >>= 10;
635
636 /* If current Tx allocation is less than the min Tx FIFO size,
637 * and the min Tx FIFO size is less than the current Rx FIFO
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000638 * allocation, take space away from current Rx allocation
639 */
Bruce Allan018ea442006-12-15 10:39:45 +0100640 if (tx_space < min_tx_space &&
641 ((min_tx_space - tx_space) < pba)) {
642 pba = pba - (min_tx_space - tx_space);
643
644 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700645 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100646 case e1000_82545 ... e1000_82546_rev_3:
647 pba &= ~(E1000_PBA_8K - 1);
648 break;
649 default:
650 break;
651 }
652
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000653 /* if short on Rx space, Rx wins and must trump Tx
654 * adjustment or use Early Receive if available
655 */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000656 if (pba < min_rx_space)
657 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100658 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700660
Joe Perches1dc32912008-07-11 15:17:08 -0700661 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000663 /* flow control settings:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000664 * The high water mark must be low enough to fit one full frame
665 * (or the size used for early receive) above it in the Rx FIFO.
666 * Set it to the lower of:
667 * - 90% of the Rx FIFO size, and
668 * - the full Rx FIFO size minus the early receive size (for parts
669 * with ERT support assuming ERT set to E1000_ERT_2048), or
670 * - the full Rx FIFO size minus one full frame
671 */
672 hwm = min(((pba << 10) * 9 / 10),
673 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800674
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000675 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
676 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000677 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700678 hw->fc_send_xon = 1;
679 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700681 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700682 e1000_reset_hw(hw);
683 if (hw->mac_type >= e1000_82544)
684 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700685
Joe Perches1dc32912008-07-11 15:17:08 -0700686 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700687 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700688 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100689
690 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700691 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700692 hw->autoneg == 1 &&
693 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
694 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100695 /* clear phy power management bit if we are in gig only mode,
696 * which if enabled will attempt negotiation to 100Mb, which
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000697 * can cause a loss of link at power off or driver unload
698 */
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100699 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700700 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100701 }
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700704 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Joe Perches1dc32912008-07-11 15:17:08 -0700706 e1000_reset_adaptive(hw);
707 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700708
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500709 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710}
711
Ben Hutchings1aa8b472012-07-10 10:56:59 +0000712/* Dump the eeprom for users having checksum issues */
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800713static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800714{
715 struct net_device *netdev = adapter->netdev;
716 struct ethtool_eeprom eeprom;
717 const struct ethtool_ops *ops = netdev->ethtool_ops;
718 u8 *data;
719 int i;
720 u16 csum_old, csum_new = 0;
721
722 eeprom.len = ops->get_eeprom_len(netdev);
723 eeprom.offset = 0;
724
725 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000726 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800727 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800728
729 ops->get_eeprom(netdev, &eeprom, data);
730
731 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
732 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
733 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
734 csum_new += data[i] + (data[i + 1] << 8);
735 csum_new = EEPROM_SUM - csum_new;
736
Emil Tantilov675ad472010-04-27 14:02:58 +0000737 pr_err("/*********************/\n");
738 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
739 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800740
Emil Tantilov675ad472010-04-27 14:02:58 +0000741 pr_err("Offset Values\n");
742 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800743 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
744
Emil Tantilov675ad472010-04-27 14:02:58 +0000745 pr_err("Include this output when contacting your support provider.\n");
746 pr_err("This is not a software error! Something bad happened to\n");
747 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
748 pr_err("result in further problems, possibly loss of data,\n");
749 pr_err("corruption or system hangs!\n");
750 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
751 pr_err("which is invalid and requires you to set the proper MAC\n");
752 pr_err("address manually before continuing to enable this network\n");
753 pr_err("device. Please inspect the EEPROM dump and report the\n");
754 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
755 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800756
757 kfree(data);
758}
759
760/**
Taku Izumi81250292008-07-11 15:17:44 -0700761 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
762 * @pdev: PCI device information struct
763 *
764 * Return true if an adapter needs ioport resources
765 **/
766static int e1000_is_need_ioport(struct pci_dev *pdev)
767{
768 switch (pdev->device) {
769 case E1000_DEV_ID_82540EM:
770 case E1000_DEV_ID_82540EM_LOM:
771 case E1000_DEV_ID_82540EP:
772 case E1000_DEV_ID_82540EP_LOM:
773 case E1000_DEV_ID_82540EP_LP:
774 case E1000_DEV_ID_82541EI:
775 case E1000_DEV_ID_82541EI_MOBILE:
776 case E1000_DEV_ID_82541ER:
777 case E1000_DEV_ID_82541ER_LOM:
778 case E1000_DEV_ID_82541GI:
779 case E1000_DEV_ID_82541GI_LF:
780 case E1000_DEV_ID_82541GI_MOBILE:
781 case E1000_DEV_ID_82544EI_COPPER:
782 case E1000_DEV_ID_82544EI_FIBER:
783 case E1000_DEV_ID_82544GC_COPPER:
784 case E1000_DEV_ID_82544GC_LOM:
785 case E1000_DEV_ID_82545EM_COPPER:
786 case E1000_DEV_ID_82545EM_FIBER:
787 case E1000_DEV_ID_82546EB_COPPER:
788 case E1000_DEV_ID_82546EB_FIBER:
789 case E1000_DEV_ID_82546EB_QUAD_COPPER:
790 return true;
791 default:
792 return false;
793 }
794}
795
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000796static netdev_features_t e1000_fix_features(struct net_device *netdev,
797 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000798{
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000799 /* Since there is no support for separate Rx/Tx vlan accel
800 * enable/disable make sure Tx flag is always in same state as Rx.
Jiri Pirko5622e402011-07-21 03:26:31 +0000801 */
Patrick McHardyf6469682013-04-19 02:04:27 +0000802 if (features & NETIF_F_HW_VLAN_CTAG_RX)
803 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko5622e402011-07-21 03:26:31 +0000804 else
Patrick McHardyf6469682013-04-19 02:04:27 +0000805 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko5622e402011-07-21 03:26:31 +0000806
807 return features;
808}
809
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000810static int e1000_set_features(struct net_device *netdev,
811 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000812{
813 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000814 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000815
Patrick McHardyf6469682013-04-19 02:04:27 +0000816 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko5622e402011-07-21 03:26:31 +0000817 e1000_vlan_mode(netdev, features);
818
Ben Greeare825b732012-04-04 06:01:29 +0000819 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
Michał Mirosławe97d3202011-06-08 08:36:42 +0000820 return 0;
821
Ben Greeare825b732012-04-04 06:01:29 +0000822 netdev->features = features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000823 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
824
825 if (netif_running(netdev))
826 e1000_reinit_locked(adapter);
827 else
828 e1000_reset(adapter);
829
830 return 0;
831}
832
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800833static const struct net_device_ops e1000_netdev_ops = {
834 .ndo_open = e1000_open,
835 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800836 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800837 .ndo_get_stats = e1000_get_stats,
838 .ndo_set_rx_mode = e1000_set_rx_mode,
839 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000840 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800841 .ndo_change_mtu = e1000_change_mtu,
842 .ndo_do_ioctl = e1000_ioctl,
843 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800844 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
845 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
846#ifdef CONFIG_NET_POLL_CONTROLLER
847 .ndo_poll_controller = e1000_netpoll,
848#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000849 .ndo_fix_features = e1000_fix_features,
850 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800851};
852
Taku Izumi81250292008-07-11 15:17:44 -0700853/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000854 * e1000_init_hw_struct - initialize members of hw struct
855 * @adapter: board private struct
856 * @hw: structure used by e1000_hw.c
857 *
858 * Factors out initialization of the e1000_hw struct to its own function
859 * that can be called very early at init (just after struct allocation).
860 * Fields are initialized based on PCI device information and
861 * OS network device settings (MTU size).
862 * Returns negative error codes if MAC type setup fails.
863 */
864static int e1000_init_hw_struct(struct e1000_adapter *adapter,
865 struct e1000_hw *hw)
866{
867 struct pci_dev *pdev = adapter->pdev;
868
869 /* PCI config space info */
870 hw->vendor_id = pdev->vendor;
871 hw->device_id = pdev->device;
872 hw->subsystem_vendor_id = pdev->subsystem_vendor;
873 hw->subsystem_id = pdev->subsystem_device;
874 hw->revision_id = pdev->revision;
875
876 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
877
878 hw->max_frame_size = adapter->netdev->mtu +
879 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
880 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
881
882 /* identify the MAC */
883 if (e1000_set_mac_type(hw)) {
884 e_err(probe, "Unknown MAC Type\n");
885 return -EIO;
886 }
887
888 switch (hw->mac_type) {
889 default:
890 break;
891 case e1000_82541:
892 case e1000_82547:
893 case e1000_82541_rev_2:
894 case e1000_82547_rev_2:
895 hw->phy_init_script = 1;
896 break;
897 }
898
899 e1000_set_media_type(hw);
900 e1000_get_bus_info(hw);
901
902 hw->wait_autoneg_complete = false;
903 hw->tbi_compatibility_en = true;
904 hw->adaptive_ifs = true;
905
906 /* Copper options */
907
908 if (hw->media_type == e1000_media_type_copper) {
909 hw->mdix = AUTO_ALL_MODES;
910 hw->disable_polarity_correction = false;
911 hw->master_slave = E1000_MASTER_SLAVE;
912 }
913
914 return 0;
915}
916
917/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 * e1000_probe - Device Initialization Routine
919 * @pdev: PCI device information struct
920 * @ent: entry in e1000_pci_tbl
921 *
922 * Returns 0 on success, negative on failure
923 *
924 * e1000_probe initializes an adapter identified by a pci_dev structure.
925 * The OS initialization, configuring of the adapter private structure,
926 * and a hardware reset occur.
927 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +0000928static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929{
930 struct net_device *netdev;
931 struct e1000_adapter *adapter;
Joe Perches1dc32912008-07-11 15:17:08 -0700932 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 static int cards_found = 0;
Jesse Brandeburg120cd572006-08-31 14:27:46 -0700935 static int global_quad_port_a = 0; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700936 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700937 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000938 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700939 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700940 int bars, need_ioport;
Joe Perches0795af52007-10-03 17:59:30 -0700941
Taku Izumi81250292008-07-11 15:17:44 -0700942 /* do not allocate ioport bars when not needed */
943 need_ioport = e1000_is_need_ioport(pdev);
944 if (need_ioport) {
945 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
946 err = pci_enable_device(pdev);
947 } else {
948 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800949 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700950 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700951 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 return err;
953
Taku Izumi81250292008-07-11 15:17:44 -0700954 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700955 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700956 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957
958 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000959 err = pci_save_state(pdev);
960 if (err)
961 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700963 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700965 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 SET_NETDEV_DEV(netdev, &pdev->dev);
969
970 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700971 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 adapter->netdev = netdev;
973 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000974 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700975 adapter->bars = bars;
976 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Joe Perches1dc32912008-07-11 15:17:08 -0700978 hw = &adapter->hw;
979 hw->back = adapter;
980
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700981 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -0700982 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -0700983 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Taku Izumi81250292008-07-11 15:17:44 -0700986 if (adapter->need_ioport) {
987 for (i = BAR_1; i <= BAR_5; i++) {
988 if (pci_resource_len(pdev, i) == 0)
989 continue;
990 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
991 hw->io_base = pci_resource_start(pdev, i);
992 break;
993 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 }
995 }
996
Jesse Brandeburge508be12010-09-07 21:01:12 +0000997 /* make ready for any if (hw->...) below */
998 err = e1000_init_hw_struct(adapter, hw);
999 if (err)
1000 goto err_sw_init;
1001
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001002 /* there is a workaround being applied below that limits
Jesse Brandeburge508be12010-09-07 21:01:12 +00001003 * 64-bit DMA addresses to 64-bit hardware. There are some
1004 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1005 */
1006 pci_using_dac = 0;
1007 if ((hw->bus_type == e1000_bus_type_pcix) &&
Russell King9931a262013-06-26 23:49:11 +01001008 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Jesse Brandeburge508be12010-09-07 21:01:12 +00001009 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001010 } else {
Russell King9931a262013-06-26 23:49:11 +01001011 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Dean Nelson19a0b672010-11-11 05:50:25 +00001012 if (err) {
1013 pr_err("No usable DMA config, aborting\n");
1014 goto err_dma;
1015 }
Jesse Brandeburge508be12010-09-07 21:01:12 +00001016 }
1017
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001018 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001021 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001022
Auke Kok0eb5a342006-09-27 12:53:17 -07001023 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 adapter->bd_number = cards_found;
1026
1027 /* setup the private structure */
1028
Joe Perchesc7be73b2008-07-11 15:17:28 -07001029 err = e1000_sw_init(adapter);
1030 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 goto err_sw_init;
1032
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001033 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001034 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001035 hw->ce4100_gbe_mdio_base_virt =
1036 ioremap(pci_resource_start(pdev, BAR_1),
Dirk Brandewie5377a412011-01-06 14:29:54 +00001037 pci_resource_len(pdev, BAR_1));
1038
Florian Fainelli13acde82012-01-04 20:23:35 +00001039 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001040 goto err_mdio_ioremap;
1041 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001042
Joe Perches1dc32912008-07-11 15:17:08 -07001043 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001044 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001045 NETIF_F_HW_CSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00001046 NETIF_F_HW_VLAN_CTAG_RX;
1047 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1048 NETIF_F_HW_VLAN_CTAG_FILTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 }
1050
Joe Perches1dc32912008-07-11 15:17:08 -07001051 if ((hw->mac_type >= e1000_82544) &&
1052 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001053 netdev->hw_features |= NETIF_F_TSO;
1054
Ben Greear11a78dc2012-02-11 15:40:01 +00001055 netdev->priv_flags |= IFF_SUPP_NOFCS;
1056
Michał Mirosławe97d3202011-06-08 08:36:42 +00001057 netdev->features |= netdev->hw_features;
Tushar Dave75006732012-06-12 13:03:29 +00001058 netdev->hw_features |= (NETIF_F_RXCSUM |
1059 NETIF_F_RXALL |
1060 NETIF_F_RXFCS);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001061
Yi Zou7b872a52010-09-22 17:57:58 +00001062 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001064 netdev->vlan_features |= NETIF_F_HIGHDMA;
1065 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Tushar Dave75006732012-06-12 13:03:29 +00001067 netdev->vlan_features |= (NETIF_F_TSO |
1068 NETIF_F_HW_CSUM |
1069 NETIF_F_SG);
Patrick McHardy20501a62008-10-11 12:25:59 -07001070
Jiri Pirko01789342011-08-16 06:29:00 +00001071 netdev->priv_flags |= IFF_UNICAST_FLT;
1072
Joe Perches1dc32912008-07-11 15:17:08 -07001073 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001074
Auke Kokcd94dd02006-06-27 09:08:22 -07001075 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001076 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001077 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001078 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001079 }
1080
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001081 /* before reading the EEPROM, reset the controller to
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001082 * put the device in a known good starting state
1083 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001084
Joe Perches1dc32912008-07-11 15:17:08 -07001085 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
1087 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001088 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001089 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001090 e1000_dump_eeprom(adapter);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001091 /* set MAC address to all zeroes to invalidate and temporary
Auke Kok67b3c272007-12-17 13:50:23 -08001092 * disable this device for the user. This blocks regular
1093 * traffic while still permitting ethtool ioctls from reaching
1094 * the hardware as well as allowing the user to run the
1095 * interface after manually setting a hw addr using
1096 * `ip set address`
1097 */
Joe Perches1dc32912008-07-11 15:17:08 -07001098 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001099 } else {
1100 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001101 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001102 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 }
Auke Kok67b3c272007-12-17 13:50:23 -08001104 /* don't block initalization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001105 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00001107 if (!is_valid_ether_addr(netdev->dev_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001108 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001111 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113 e1000_82547_tx_fifo_stall_task);
1114 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001115 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 e1000_check_options(adapter);
1118
1119 /* Initial Wake on LAN setting
1120 * If APM wake is enabled in the EEPROM,
1121 * enable the ACPI Magic Packet filter
1122 */
1123
Joe Perches1dc32912008-07-11 15:17:08 -07001124 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 case e1000_82542_rev2_0:
1126 case e1000_82542_rev2_1:
1127 case e1000_82543:
1128 break;
1129 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001130 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133 break;
1134 case e1000_82546:
1135 case e1000_82546_rev_3:
Joe Perches1dc32912008-07-11 15:17:08 -07001136 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1137 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139 break;
1140 }
1141 /* Fall Through */
1142 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001143 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145 break;
1146 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001147 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001148 adapter->eeprom_wol |= E1000_WUFC_MAG;
1149
1150 /* now that we have the eeprom settings, apply the special cases
1151 * where the eeprom may be wrong or the board simply won't support
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001152 * wake on lan on a particular port
1153 */
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001154 switch (pdev->device) {
1155 case E1000_DEV_ID_82546GB_PCIE:
1156 adapter->eeprom_wol = 0;
1157 break;
1158 case E1000_DEV_ID_82546EB_FIBER:
1159 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001160 /* Wake events only supported on port A for dual fiber
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001161 * regardless of eeprom setting
1162 */
Joe Perches1dc32912008-07-11 15:17:08 -07001163 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001164 adapter->eeprom_wol = 0;
1165 break;
1166 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167 /* if quad port adapter, disable WoL on all but port A */
1168 if (global_quad_port_a != 0)
1169 adapter->eeprom_wol = 0;
1170 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001171 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001172 /* Reset for multiple quad port adapters */
1173 if (++global_quad_port_a == 4)
1174 global_quad_port_a = 0;
1175 break;
1176 }
1177
1178 /* initialize the wol settings based on the eeprom settings */
1179 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001180 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Dirk Brandewie5377a412011-01-06 14:29:54 +00001182 /* Auto detect PHY address */
1183 if (hw->mac_type == e1000_ce4100) {
1184 for (i = 0; i < 32; i++) {
1185 hw->phy_addr = i;
1186 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187 if (tmp == 0 || tmp == 0xFF) {
1188 if (i == 31)
1189 goto err_eeprom;
1190 continue;
1191 } else
1192 break;
1193 }
1194 }
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 /* reset the hardware with the new settings */
1197 e1000_reset(adapter);
1198
Auke Kok416b5d12007-06-01 10:22:39 -07001199 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001200 err = register_netdev(netdev);
1201 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001202 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001203
Jiri Pirko52f55092012-03-20 18:10:01 +00001204 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001205
Emil Tantilov675ad472010-04-27 14:02:58 +00001206 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001207 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001208 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001215
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001216 /* carrier off reporting is important to ethtool even BEFORE open */
1217 netif_carrier_off(netdev);
1218
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001219 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
1221 cards_found++;
1222 return 0;
1223
1224err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001225err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001226 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001227
Joe Perches1dc32912008-07-11 15:17:08 -07001228 if (hw->flash_address)
1229 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001230 kfree(adapter->tx_ring);
1231 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001232err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001234err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001235 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001236 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237err_ioremap:
1238 free_netdev(netdev);
1239err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001240 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001241err_pci_reg:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001242 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 return err;
1244}
1245
1246/**
1247 * e1000_remove - Device Removal Routine
1248 * @pdev: PCI device information struct
1249 *
1250 * e1000_remove is called by the PCI subsystem to alert the driver
1251 * that it should release a PCI device. The could be caused by a
1252 * Hot-Plug event, or because the driver is going to be removed from
1253 * memory.
1254 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001255static void e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
1257 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001258 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001259 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001261 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001262 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001264 unregister_netdev(netdev);
1265
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001266 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001268 kfree(adapter->tx_ring);
1269 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001270
Florian Fainelli1c267502012-01-04 20:23:34 +00001271 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001272 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001273 iounmap(hw->hw_addr);
1274 if (hw->flash_address)
1275 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001276 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
1278 free_netdev(netdev);
1279
1280 pci_disable_device(pdev);
1281}
1282
1283/**
1284 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1285 * @adapter: board private structure to initialize
1286 *
1287 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001288 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001290static int e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291{
Auke Kokeb0f8052006-07-14 16:14:48 -07001292 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001294 adapter->num_tx_queues = 1;
1295 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001296
1297 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001298 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001299 return -ENOMEM;
1300 }
1301
Herbert Xu47313052007-05-29 15:07:31 -07001302 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001303 e1000_irq_disable(adapter);
1304
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 spin_lock_init(&adapter->stats_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Auke Kok1314bbf2006-09-27 12:54:02 -07001307 set_bit(__E1000_DOWN, &adapter->flags);
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return 0;
1310}
1311
1312/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001313 * e1000_alloc_queues - Allocate memory for all rings
1314 * @adapter: board private structure to initialize
1315 *
1316 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001317 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001318 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001319static int e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001320{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001321 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1322 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001323 if (!adapter->tx_ring)
1324 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001325
Yan Burman1c7e5b12007-03-06 08:58:04 -08001326 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1327 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001328 if (!adapter->rx_ring) {
1329 kfree(adapter->tx_ring);
1330 return -ENOMEM;
1331 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001332
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001333 return E1000_SUCCESS;
1334}
1335
1336/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 * e1000_open - Called when a network interface is made active
1338 * @netdev: network interface device structure
1339 *
1340 * Returns 0 on success, negative value on failure
1341 *
1342 * The open entry point is called when a network interface is made
1343 * active by the system (IFF_UP). At this point all resources needed
1344 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001345 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 * and the stack is notified that the interface is ready.
1347 **/
Joe Perches64798842008-07-11 15:17:02 -07001348static int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001350 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001351 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 int err;
1353
Auke Kok2db10a02006-06-27 09:06:28 -07001354 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001355 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001356 return -EBUSY;
1357
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001358 netif_carrier_off(netdev);
1359
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001361 err = e1000_setup_all_tx_resources(adapter);
1362 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 goto err_setup_tx;
1364
1365 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001366 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001367 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001368 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001369
Auke Kok79f05bf2006-06-27 09:06:32 -07001370 e1000_power_up_phy(adapter);
1371
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001372 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001373 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001374 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1375 e1000_update_mng_vlan(adapter);
1376 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377
Auke Koke0aac5a2007-03-06 08:57:21 -08001378 /* before we allocate an interrupt, we must be ready to handle it.
1379 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1380 * as soon as we call pci_request_irq, so we have to setup our
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001381 * clean_rx handler before we do so.
1382 */
Auke Koke0aac5a2007-03-06 08:57:21 -08001383 e1000_configure(adapter);
1384
1385 err = e1000_request_irq(adapter);
1386 if (err)
1387 goto err_req_irq;
1388
1389 /* From here on the code is the same as e1000_up() */
1390 clear_bit(__E1000_DOWN, &adapter->flags);
1391
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001392 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001393
Auke Koke0aac5a2007-03-06 08:57:21 -08001394 e1000_irq_enable(adapter);
1395
Ben Hutchings076152d2008-07-18 17:50:57 -07001396 netif_start_queue(netdev);
1397
Auke Koke0aac5a2007-03-06 08:57:21 -08001398 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001399 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001400
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return E1000_SUCCESS;
1402
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001403err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001404 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001405 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001407 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408err_setup_tx:
1409 e1000_reset(adapter);
1410
1411 return err;
1412}
1413
1414/**
1415 * e1000_close - Disables a network interface
1416 * @netdev: network interface device structure
1417 *
1418 * Returns 0, this is not allowed to fail
1419 *
1420 * The close entry point is called when an interface is de-activated
1421 * by the OS. The hardware is still under the drivers control, but
1422 * needs to be disabled. A global MAC reset is issued to stop the
1423 * hardware, and all transmit and receive resources are freed.
1424 **/
Joe Perches64798842008-07-11 15:17:02 -07001425static int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001427 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001428 struct e1000_hw *hw = &adapter->hw;
yzhu16a7d64e2013-11-23 07:07:40 +00001429 int count = E1000_CHECK_RESET_COUNT;
1430
1431 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1432 usleep_range(10000, 20000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433
Auke Kok2db10a02006-06-27 09:06:28 -07001434 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001436 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001437 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001439 e1000_free_all_tx_resources(adapter);
1440 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Bruce Allan46665602006-09-27 12:54:08 -07001442 /* kill manageability vlan ID if supported, but not if a vlan with
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001443 * the same ID is registered on the host OS (let 8021q kill it)
1444 */
Joe Perches1dc32912008-07-11 15:17:08 -07001445 if ((hw->mng_cookie.status &
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001446 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1447 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Patrick McHardy80d5c362013-04-19 02:04:28 +00001448 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1449 adapter->mng_vlan_id);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001450 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 return 0;
1453}
1454
1455/**
1456 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1457 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001458 * @start: address of beginning of memory
1459 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 **/
Joe Perches64798842008-07-11 15:17:02 -07001461static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1462 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463{
Joe Perches1dc32912008-07-11 15:17:08 -07001464 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001465 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 unsigned long end = begin + len;
1467
Malli Chilakala26483452005-04-28 19:44:46 -07001468 /* First rev 82545 and 82546 need to not allow any memory
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001469 * write location to cross 64k boundary due to errata 23
1470 */
Joe Perches1dc32912008-07-11 15:17:08 -07001471 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001472 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001473 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001474 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 }
1476
Joe Perchesc3033b02008-03-21 11:06:25 -07001477 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479
1480/**
1481 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1482 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001483 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 *
1485 * Return 0 on success, negative on failure
1486 **/
Joe Perches64798842008-07-11 15:17:02 -07001487static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1488 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 struct pci_dev *pdev = adapter->pdev;
1491 int size;
1492
1493 size = sizeof(struct e1000_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001494 txdr->buffer_info = vzalloc(size);
Joe Perches14f8dc42013-02-07 11:46:27 +00001495 if (!txdr->buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497
1498 /* round up to nearest 4K */
1499
1500 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001501 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001503 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1504 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001505 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 vfree(txdr->buffer_info);
1508 return -ENOMEM;
1509 }
1510
Malli Chilakala26483452005-04-28 19:44:46 -07001511 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1513 void *olddesc = txdr->desc;
1514 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001515 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001516 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001517 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001518 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1519 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001520 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001521 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001522 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1523 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 goto setup_tx_desc_die;
1525 }
1526
1527 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1528 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001529 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1530 txdr->dma);
1531 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1532 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001533 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001534 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 vfree(txdr->buffer_info);
1536 return -ENOMEM;
1537 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001538 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001539 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1540 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 }
1542 }
1543 memset(txdr->desc, 0, txdr->size);
1544
1545 txdr->next_to_use = 0;
1546 txdr->next_to_clean = 0;
1547
1548 return 0;
1549}
1550
1551/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001552 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1553 * (Descriptors) for all queues
1554 * @adapter: board private structure
1555 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001556 * Return 0 on success, negative on failure
1557 **/
Joe Perches64798842008-07-11 15:17:02 -07001558int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001559{
1560 int i, err = 0;
1561
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001562 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001563 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1564 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001565 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001566 for (i-- ; i >= 0; i--)
1567 e1000_free_tx_resources(adapter,
1568 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001569 break;
1570 }
1571 }
1572
1573 return err;
1574}
1575
1576/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1578 * @adapter: board private structure
1579 *
1580 * Configure the Tx unit of the MAC after a reset.
1581 **/
Joe Perches64798842008-07-11 15:17:02 -07001582static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
Joe Perches406874a2008-04-03 10:06:32 -07001584 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001585 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001586 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001587 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588
1589 /* Setup the HW Tx Head and Tail descriptor pointers */
1590
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001591 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001592 case 1:
1593 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001594 tdba = adapter->tx_ring[0].dma;
1595 tdlen = adapter->tx_ring[0].count *
1596 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001597 ew32(TDLEN, tdlen);
1598 ew32(TDBAH, (tdba >> 32));
1599 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1600 ew32(TDT, 0);
1601 ew32(TDH, 0);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001602 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1603 E1000_TDH : E1000_82542_TDH);
1604 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1605 E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001606 break;
1607 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001610 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001611 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001612 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1613 else
1614 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1615
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001616 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 case e1000_82542_rev2_0:
1618 case e1000_82542_rev2_1:
1619 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001620 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1621 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 break;
1623 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001624 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1625 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1626 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001628 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1629 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001630 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 /* Set the Tx Interrupt Delay register */
1633
Joe Perches1dc32912008-07-11 15:17:08 -07001634 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001635 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001636 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 /* Program the Transmit Control Register */
1639
Joe Perches1dc32912008-07-11 15:17:08 -07001640 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001642 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1644
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001645 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
1647 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001648 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1649
1650 /* only set IDE if we are delaying interrupts using the timers */
1651 if (adapter->tx_int_delay)
1652 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001654 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1656 else
1657 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1658
1659 /* Cache if we're 82544 running in PCI-X because we'll
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001660 * need this to apply a workaround later in the send path.
1661 */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001662 if (hw->mac_type == e1000_82544 &&
1663 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001664 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001665
Joe Perches1dc32912008-07-11 15:17:08 -07001666 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001667
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668}
1669
1670/**
1671 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1672 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001673 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 *
1675 * Returns 0 on success, negative on failure
1676 **/
Joe Perches64798842008-07-11 15:17:02 -07001677static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1678 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001681 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
1683 size = sizeof(struct e1000_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001684 rxdr->buffer_info = vzalloc(size);
Joe Perches14f8dc42013-02-07 11:46:27 +00001685 if (!rxdr->buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001688 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 /* Round up to nearest 4K */
1691
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001692 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001693 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001695 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1696 GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001697 if (!rxdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 vfree(rxdr->buffer_info);
1700 return -ENOMEM;
1701 }
1702
Malli Chilakala26483452005-04-28 19:44:46 -07001703 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1705 void *olddesc = rxdr->desc;
1706 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001707 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001708 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001709 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001710 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1711 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001712 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001713 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001714 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1715 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 goto setup_rx_desc_die;
1717 }
1718
1719 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1720 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001721 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1722 rxdr->dma);
1723 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1724 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001725 e_err(probe, "Unable to allocate aligned memory for "
1726 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001727 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001729 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001730 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 }
1733 }
1734 memset(rxdr->desc, 0, rxdr->size);
1735
1736 rxdr->next_to_clean = 0;
1737 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001738 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
1740 return 0;
1741}
1742
1743/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001744 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1745 * (Descriptors) for all queues
1746 * @adapter: board private structure
1747 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001748 * Return 0 on success, negative on failure
1749 **/
Joe Perches64798842008-07-11 15:17:02 -07001750int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001751{
1752 int i, err = 0;
1753
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001754 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001755 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1756 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001757 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001758 for (i-- ; i >= 0; i--)
1759 e1000_free_rx_resources(adapter,
1760 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001761 break;
1762 }
1763 }
1764
1765 return err;
1766}
1767
1768/**
Malli Chilakala26483452005-04-28 19:44:46 -07001769 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 * @adapter: Board private structure
1771 **/
Joe Perches64798842008-07-11 15:17:02 -07001772static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773{
Joe Perches1dc32912008-07-11 15:17:08 -07001774 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001775 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Joe Perches1dc32912008-07-11 15:17:08 -07001777 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
1779 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1780
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001781 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1782 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001783 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Joe Perches1dc32912008-07-11 15:17:08 -07001785 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 rctl |= E1000_RCTL_SBP;
1787 else
1788 rctl &= ~E1000_RCTL_SBP;
1789
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001790 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1791 rctl &= ~E1000_RCTL_LPE;
1792 else
1793 rctl |= E1000_RCTL_LPE;
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001796 rctl &= ~E1000_RCTL_SZ_4096;
1797 rctl |= E1000_RCTL_BSEX;
1798 switch (adapter->rx_buffer_len) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08001799 case E1000_RXBUFFER_2048:
1800 default:
1801 rctl |= E1000_RCTL_SZ_2048;
1802 rctl &= ~E1000_RCTL_BSEX;
1803 break;
1804 case E1000_RXBUFFER_4096:
1805 rctl |= E1000_RCTL_SZ_4096;
1806 break;
1807 case E1000_RXBUFFER_8192:
1808 rctl |= E1000_RCTL_SZ_8192;
1809 break;
1810 case E1000_RXBUFFER_16384:
1811 rctl |= E1000_RCTL_SZ_16384;
1812 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001813 }
1814
Ben Greeare825b732012-04-04 06:01:29 +00001815 /* This is useful for sniffing bad packets. */
1816 if (adapter->netdev->features & NETIF_F_RXALL) {
1817 /* UPE and MPE will be handled by normal PROMISC logic
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001818 * in e1000e_set_rx_mode
1819 */
Ben Greeare825b732012-04-04 06:01:29 +00001820 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1821 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1822 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1823
1824 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1825 E1000_RCTL_DPF | /* Allow filtered pause */
1826 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1827 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1828 * and that breaks VLANs.
1829 */
1830 }
1831
Joe Perches1dc32912008-07-11 15:17:08 -07001832 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}
1834
1835/**
1836 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1837 * @adapter: board private structure
1838 *
1839 * Configure the Rx unit of the MAC after a reset.
1840 **/
Joe Perches64798842008-07-11 15:17:02 -07001841static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
Joe Perches406874a2008-04-03 10:06:32 -07001843 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001844 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001845 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001846
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001847 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1848 rdlen = adapter->rx_ring[0].count *
1849 sizeof(struct e1000_rx_desc);
1850 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1851 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1852 } else {
1853 rdlen = adapter->rx_ring[0].count *
1854 sizeof(struct e1000_rx_desc);
1855 adapter->clean_rx = e1000_clean_rx_irq;
1856 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
1859 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001860 rctl = er32(RCTL);
1861 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
1863 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001864 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001866 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001867 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001868 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001869 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
1871
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001872 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001873 * the Base and Length of the Rx Descriptor Ring
1874 */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001875 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001876 case 1:
1877 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001878 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001879 ew32(RDLEN, rdlen);
1880 ew32(RDBAH, (rdba >> 32));
1881 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1882 ew32(RDT, 0);
1883 ew32(RDH, 0);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001884 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1885 E1000_RDH : E1000_82542_RDH);
1886 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1887 E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001888 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001889 }
1890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001892 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001893 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001894 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001895 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001896 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001897 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001898 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001899 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 }
1901
1902 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001903 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904}
1905
1906/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001907 * e1000_free_tx_resources - Free Tx Resources per Queue
1908 * @adapter: board private structure
1909 * @tx_ring: Tx descriptor ring for a specific queue
1910 *
1911 * Free all transmit software resources
1912 **/
Joe Perches64798842008-07-11 15:17:02 -07001913static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1914 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001915{
1916 struct pci_dev *pdev = adapter->pdev;
1917
1918 e1000_clean_tx_ring(adapter, tx_ring);
1919
1920 vfree(tx_ring->buffer_info);
1921 tx_ring->buffer_info = NULL;
1922
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001923 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1924 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001925
1926 tx_ring->desc = NULL;
1927}
1928
1929/**
1930 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 * @adapter: board private structure
1932 *
1933 * Free all transmit software resources
1934 **/
Joe Perches64798842008-07-11 15:17:02 -07001935void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001937 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001939 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001940 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941}
1942
Joe Perches64798842008-07-11 15:17:02 -07001943static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1944 struct e1000_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Alexander Duyck602c0552009-12-02 16:46:00 +00001946 if (buffer_info->dma) {
1947 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001948 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1949 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001950 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001951 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001952 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001953 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001954 buffer_info->dma = 0;
1955 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001956 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001958 buffer_info->skb = NULL;
1959 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001960 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001961 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
1964/**
1965 * e1000_clean_tx_ring - Free Tx Buffers
1966 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001967 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 **/
Joe Perches64798842008-07-11 15:17:02 -07001969static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1970 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971{
Joe Perches1dc32912008-07-11 15:17:08 -07001972 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 struct e1000_buffer *buffer_info;
1974 unsigned long size;
1975 unsigned int i;
1976
1977 /* Free all the Tx ring sk_buffs */
1978
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001979 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 buffer_info = &tx_ring->buffer_info[i];
1981 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1982 }
1983
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00001984 netdev_reset_queue(adapter->netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 size = sizeof(struct e1000_buffer) * tx_ring->count;
1986 memset(tx_ring->buffer_info, 0, size);
1987
1988 /* Zero out the descriptor ring */
1989
1990 memset(tx_ring->desc, 0, tx_ring->size);
1991
1992 tx_ring->next_to_use = 0;
1993 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00001994 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Joe Perches1dc32912008-07-11 15:17:08 -07001996 writel(0, hw->hw_addr + tx_ring->tdh);
1997 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001998}
1999
2000/**
2001 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2002 * @adapter: board private structure
2003 **/
Joe Perches64798842008-07-11 15:17:02 -07002004static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002005{
2006 int i;
2007
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002008 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002009 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010}
2011
2012/**
2013 * e1000_free_rx_resources - Free Rx Resources
2014 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002015 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 *
2017 * Free all receive software resources
2018 **/
Joe Perches64798842008-07-11 15:17:02 -07002019static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2020 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 struct pci_dev *pdev = adapter->pdev;
2023
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002024 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
2026 vfree(rx_ring->buffer_info);
2027 rx_ring->buffer_info = NULL;
2028
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002029 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2030 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
2032 rx_ring->desc = NULL;
2033}
2034
2035/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002036 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002038 *
2039 * Free all receive software resources
2040 **/
Joe Perches64798842008-07-11 15:17:02 -07002041void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002042{
2043 int i;
2044
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002045 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002046 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2047}
2048
2049/**
2050 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2051 * @adapter: board private structure
2052 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 **/
Joe Perches64798842008-07-11 15:17:02 -07002054static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2055 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056{
Joe Perches1dc32912008-07-11 15:17:08 -07002057 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 struct e1000_buffer *buffer_info;
2059 struct pci_dev *pdev = adapter->pdev;
2060 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002061 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
2063 /* Free all the Rx ring sk_buffs */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002064 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 buffer_info = &rx_ring->buffer_info[i];
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002066 if (buffer_info->dma &&
2067 adapter->clean_rx == e1000_clean_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002068 dma_unmap_single(&pdev->dev, buffer_info->dma,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002069 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002070 DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002071 } else if (buffer_info->dma &&
2072 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002073 dma_unmap_page(&pdev->dev, buffer_info->dma,
2074 buffer_info->length,
2075 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002076 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002078 buffer_info->dma = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002079 if (buffer_info->page) {
2080 put_page(buffer_info->page);
2081 buffer_info->page = NULL;
2082 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002083 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 dev_kfree_skb(buffer_info->skb);
2085 buffer_info->skb = NULL;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08002086 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 }
2088
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002089 /* there also may be some cached data from a chained receive */
2090 if (rx_ring->rx_skb_top) {
2091 dev_kfree_skb(rx_ring->rx_skb_top);
2092 rx_ring->rx_skb_top = NULL;
2093 }
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 size = sizeof(struct e1000_buffer) * rx_ring->count;
2096 memset(rx_ring->buffer_info, 0, size);
2097
2098 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 memset(rx_ring->desc, 0, rx_ring->size);
2100
2101 rx_ring->next_to_clean = 0;
2102 rx_ring->next_to_use = 0;
2103
Joe Perches1dc32912008-07-11 15:17:08 -07002104 writel(0, hw->hw_addr + rx_ring->rdh);
2105 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002106}
2107
2108/**
2109 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2110 * @adapter: board private structure
2111 **/
Joe Perches64798842008-07-11 15:17:02 -07002112static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002113{
2114 int i;
2115
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002116 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002117 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118}
2119
2120/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2121 * and memory write and invalidate disabled for certain operations
2122 */
Joe Perches64798842008-07-11 15:17:02 -07002123static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124{
Joe Perches1dc32912008-07-11 15:17:08 -07002125 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002127 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
Joe Perches1dc32912008-07-11 15:17:08 -07002129 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Joe Perches1dc32912008-07-11 15:17:08 -07002131 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002133 ew32(RCTL, rctl);
2134 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 mdelay(5);
2136
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002137 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002138 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139}
2140
Joe Perches64798842008-07-11 15:17:02 -07002141static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142{
Joe Perches1dc32912008-07-11 15:17:08 -07002143 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002145 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Joe Perches1dc32912008-07-11 15:17:08 -07002147 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002149 ew32(RCTL, rctl);
2150 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 mdelay(5);
2152
Joe Perches1dc32912008-07-11 15:17:08 -07002153 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2154 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002156 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002157 /* No need to loop, because 82542 supports only 1 queue */
2158 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002159 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002160 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
2162}
2163
2164/**
2165 * e1000_set_mac - Change the Ethernet Address of the NIC
2166 * @netdev: network interface device structure
2167 * @p: pointer to an address structure
2168 *
2169 * Returns 0 on success, negative on failure
2170 **/
Joe Perches64798842008-07-11 15:17:02 -07002171static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002173 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002174 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 struct sockaddr *addr = p;
2176
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002177 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 return -EADDRNOTAVAIL;
2179
2180 /* 82542 2.0 needs to be in reset to write receive address registers */
2181
Joe Perches1dc32912008-07-11 15:17:08 -07002182 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 e1000_enter_82542_rst(adapter);
2184
2185 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002186 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Joe Perches1dc32912008-07-11 15:17:08 -07002188 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Joe Perches1dc32912008-07-11 15:17:08 -07002190 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 e1000_leave_82542_rst(adapter);
2192
2193 return 0;
2194}
2195
2196/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002197 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 * @netdev: network interface device structure
2199 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002200 * The set_rx_mode entry point is called whenever the unicast or multicast
2201 * address lists or the network interface flags are updated. This routine is
2202 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 * promiscuous mode, and all-multi behavior.
2204 **/
Joe Perches64798842008-07-11 15:17:02 -07002205static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002207 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002209 struct netdev_hw_addr *ha;
2210 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002211 u32 rctl;
2212 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002213 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002214 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002215 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2216
Joe Perches14f8dc42013-02-07 11:46:27 +00002217 if (!mcarray)
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002218 return;
Auke Kokcd94dd02006-06-27 09:08:22 -07002219
Malli Chilakala26483452005-04-28 19:44:46 -07002220 /* Check for Promiscuous and All Multicast modes */
2221
Joe Perches1dc32912008-07-11 15:17:08 -07002222 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002224 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002226 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002228 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002229 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002230 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002231 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002232 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002233 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002234 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002235 }
2236
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002237 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002238 rctl |= E1000_RCTL_UPE;
2239 } else if (!(netdev->flags & IFF_PROMISC)) {
2240 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002241 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 }
2243
Joe Perches1dc32912008-07-11 15:17:08 -07002244 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245
2246 /* 82542 2.0 needs to be in reset to write receive address registers */
2247
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002248 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 e1000_enter_82542_rst(adapter);
2250
Patrick McHardydb0ce502007-11-13 20:54:59 -08002251 /* load the first 14 addresses into the exact filters 1-14. Unicast
2252 * addresses take precedence to avoid disabling unicast filtering
2253 * when possible.
2254 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002255 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 * if there are not 14 addresses, go ahead and clear the filters
2257 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002258 i = 1;
2259 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002260 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002261 if (i == rar_entries)
2262 break;
2263 e1000_rar_set(hw, ha->addr, i++);
2264 }
2265
Jiri Pirko22bedad32010-04-01 21:22:57 +00002266 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002267 if (i == rar_entries) {
2268 /* load any remaining addresses into the hash table */
2269 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002270 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002271 hash_reg = (hash_value >> 5) & 0x7F;
2272 hash_bit = hash_value & 0x1F;
2273 mta = (1 << hash_bit);
2274 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002275 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002276 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
2278 }
2279
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002280 for (; i < rar_entries; i++) {
2281 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2282 E1000_WRITE_FLUSH();
2283 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2284 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 }
2286
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002287 /* write the hash table completely, write from bottom to avoid
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002288 * both stupid write combining chipsets, and flushing each write
2289 */
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002290 for (i = mta_reg_count - 1; i >= 0 ; i--) {
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002291 /* If we are on an 82544 has an errata where writing odd
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002292 * offsets overwrites the previous even offset, but writing
2293 * backwards over the range solves the issue by always
2294 * writing the odd offset first
2295 */
2296 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2297 }
2298 E1000_WRITE_FLUSH();
2299
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002300 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002302
2303 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304}
2305
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002306/**
2307 * e1000_update_phy_info_task - get phy info
2308 * @work: work struct contained inside adapter struct
2309 *
2310 * Need to wait a few seconds after link up to get diagnostic information from
2311 * the phy
2312 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002313static void e1000_update_phy_info_task(struct work_struct *work)
2314{
2315 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002316 struct e1000_adapter,
2317 phy_info_task.work);
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00002318
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002319 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320}
2321
2322/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002323 * e1000_82547_tx_fifo_stall_task - task to complete work
2324 * @work: work struct contained inside adapter struct
2325 **/
2326static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2327{
2328 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002329 struct e1000_adapter,
2330 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002331 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002333 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002335 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002336 if ((er32(TDT) == er32(TDH)) &&
2337 (er32(TDFT) == er32(TDFH)) &&
2338 (er32(TDFTS) == er32(TDFHS))) {
2339 tctl = er32(TCTL);
2340 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2341 ew32(TDFT, adapter->tx_head_addr);
2342 ew32(TDFH, adapter->tx_head_addr);
2343 ew32(TDFTS, adapter->tx_head_addr);
2344 ew32(TDFHS, adapter->tx_head_addr);
2345 ew32(TCTL, tctl);
2346 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
2348 adapter->tx_fifo_head = 0;
2349 atomic_set(&adapter->tx_fifo_stall, 0);
2350 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002351 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002352 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 }
2354 }
2355}
2356
Nick Nunleyb5481922010-02-03 14:49:28 +00002357bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002358{
2359 struct e1000_hw *hw = &adapter->hw;
2360 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002361
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002362 /* get_link_status is set on LSC (link status) interrupt or rx
2363 * sequence error interrupt (except on intel ce4100).
2364 * get_link_status will stay false until the
2365 * e1000_check_for_link establishes link for copper adapters
2366 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002367 */
2368 switch (hw->media_type) {
2369 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002370 if (hw->mac_type == e1000_ce4100)
2371 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002372 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002373 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002374 link_active = !hw->get_link_status;
2375 } else {
2376 link_active = true;
2377 }
2378 break;
2379 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002380 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002381 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2382 break;
2383 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002384 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002385 link_active = hw->serdes_has_link;
2386 break;
2387 default:
2388 break;
2389 }
2390
2391 return link_active;
2392}
2393
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002395 * e1000_watchdog - work function
2396 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002398static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002400 struct e1000_adapter *adapter = container_of(work,
2401 struct e1000_adapter,
2402 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002403 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002405 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002406 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002408 link = e1000_has_link(adapter);
2409 if ((netif_carrier_ok(netdev)) && link)
2410 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002412 if (link) {
2413 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002414 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002415 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002416 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002417 e1000_get_speed_and_duplex(hw,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002418 &adapter->link_speed,
2419 &adapter->link_duplex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420
Joe Perches1dc32912008-07-11 15:17:08 -07002421 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002422 pr_info("%s NIC Link is Up %d Mbps %s, "
2423 "Flow Control: %s\n",
2424 netdev->name,
2425 adapter->link_speed,
2426 adapter->link_duplex == FULL_DUPLEX ?
2427 "Full Duplex" : "Half Duplex",
2428 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2429 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2430 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2431 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002433 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002434 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002435 switch (adapter->link_speed) {
2436 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002437 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002438 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002439 break;
2440 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002441 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002442 /* maybe add some timeout factor ? */
2443 break;
2444 }
2445
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002446 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002447 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002448 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002449 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002450
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002452 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002453 schedule_delayed_work(&adapter->phy_info_task,
2454 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 adapter->smartspeed = 0;
2456 }
2457 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002458 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 adapter->link_speed = 0;
2460 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002461 pr_info("%s NIC Link is Down\n",
2462 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002464
2465 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002466 schedule_delayed_work(&adapter->phy_info_task,
2467 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 }
2469
2470 e1000_smartspeed(adapter);
2471 }
2472
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002473link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 e1000_update_stats(adapter);
2475
Joe Perches1dc32912008-07-11 15:17:08 -07002476 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002478 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 adapter->colc_old = adapter->stats.colc;
2480
2481 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2482 adapter->gorcl_old = adapter->stats.gorcl;
2483 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2484 adapter->gotcl_old = adapter->stats.gotcl;
2485
Joe Perches1dc32912008-07-11 15:17:08 -07002486 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002488 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002489 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 /* We've lost link, so the controller stops DMA,
2491 * but we've got queued Tx work that's never going
2492 * to get done, so reset controller to flush Tx.
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002493 * (Do the reset outside of interrupt context).
2494 */
Jeff Kirsher87041632006-03-02 18:21:24 -08002495 adapter->tx_timeout_count++;
2496 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002497 /* exit immediately since reset is imminent */
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00002498 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 }
2500 }
2501
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002502 /* Simple mode for Interrupt Throttle Rate (ITR) */
2503 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002504 /* Symmetric Tx/Rx gets a reduced ITR=2000;
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002505 * Total asymmetrical Tx or Rx gets ITR=8000;
2506 * everyone else is between 2000-8000.
2507 */
2508 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2509 u32 dif = (adapter->gotcl > adapter->gorcl ?
2510 adapter->gotcl - adapter->gorcl :
2511 adapter->gorcl - adapter->gotcl) / 10000;
2512 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2513
2514 ew32(ITR, 1000000000 / (itr * 256));
2515 }
2516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002518 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
Malli Chilakala26483452005-04-28 19:44:46 -07002520 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002521 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002523 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002524 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002525 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526}
2527
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002528enum latency_range {
2529 lowest_latency = 0,
2530 low_latency = 1,
2531 bulk_latency = 2,
2532 latency_invalid = 255
2533};
2534
2535/**
2536 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002537 * @adapter: pointer to adapter
2538 * @itr_setting: current adapter->itr
2539 * @packets: the number of packets during this measurement interval
2540 * @bytes: the number of bytes during this measurement interval
2541 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002542 * Stores a new ITR value based on packets and byte
2543 * counts during the last interrupt. The advantage of per interrupt
2544 * computation is faster updates and more accurate ITR for the current
2545 * traffic pattern. Constants in this function were computed
2546 * based on theoretical maximum wire speed and thresholds were set based
2547 * on testing data as well as attempting to minimize response time
2548 * while increasing bulk throughput.
2549 * this functionality is controlled by the InterruptThrottleRate module
2550 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002551 **/
2552static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002553 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002554{
2555 unsigned int retval = itr_setting;
2556 struct e1000_hw *hw = &adapter->hw;
2557
2558 if (unlikely(hw->mac_type < e1000_82540))
2559 goto update_itr_done;
2560
2561 if (packets == 0)
2562 goto update_itr_done;
2563
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002564 switch (itr_setting) {
2565 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002566 /* jumbo frames get bulk treatment*/
2567 if (bytes/packets > 8000)
2568 retval = bulk_latency;
2569 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002570 retval = low_latency;
2571 break;
2572 case low_latency: /* 50 usec aka 20000 ints/s */
2573 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002574 /* jumbo frames need bulk latency setting */
2575 if (bytes/packets > 8000)
2576 retval = bulk_latency;
2577 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002578 retval = bulk_latency;
2579 else if ((packets > 35))
2580 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002581 } else if (bytes/packets > 2000)
2582 retval = bulk_latency;
2583 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002584 retval = lowest_latency;
2585 break;
2586 case bulk_latency: /* 250 usec aka 4000 ints/s */
2587 if (bytes > 25000) {
2588 if (packets > 35)
2589 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002590 } else if (bytes < 6000) {
2591 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002592 }
2593 break;
2594 }
2595
2596update_itr_done:
2597 return retval;
2598}
2599
2600static void e1000_set_itr(struct e1000_adapter *adapter)
2601{
2602 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002603 u16 current_itr;
2604 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002605
2606 if (unlikely(hw->mac_type < e1000_82540))
2607 return;
2608
2609 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2610 if (unlikely(adapter->link_speed != SPEED_1000)) {
2611 current_itr = 0;
2612 new_itr = 4000;
2613 goto set_itr_now;
2614 }
2615
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002616 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2617 adapter->total_tx_packets,
2618 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002619 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2620 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2621 adapter->tx_itr = low_latency;
2622
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002623 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2624 adapter->total_rx_packets,
2625 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002626 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2627 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2628 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002629
2630 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2631
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002632 switch (current_itr) {
2633 /* counts and packets in update_itr are dependent on these numbers */
2634 case lowest_latency:
2635 new_itr = 70000;
2636 break;
2637 case low_latency:
2638 new_itr = 20000; /* aka hwitr = ~200 */
2639 break;
2640 case bulk_latency:
2641 new_itr = 4000;
2642 break;
2643 default:
2644 break;
2645 }
2646
2647set_itr_now:
2648 if (new_itr != adapter->itr) {
2649 /* this attempts to bias the interrupt rate towards Bulk
2650 * by adding intermediate steps when interrupt rate is
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002651 * increasing
2652 */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002653 new_itr = new_itr > adapter->itr ?
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002654 min(adapter->itr + (new_itr >> 2), new_itr) :
2655 new_itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002656 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002657 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002658 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002659}
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661#define E1000_TX_FLAGS_CSUM 0x00000001
2662#define E1000_TX_FLAGS_VLAN 0x00000002
2663#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002664#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002665#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2667#define E1000_TX_FLAGS_VLAN_SHIFT 16
2668
Joe Perches64798842008-07-11 15:17:02 -07002669static int e1000_tso(struct e1000_adapter *adapter,
2670 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002673 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002675 u32 cmd_length = 0;
2676 u16 ipcse = 0, tucse, mss;
2677 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 int err;
2679
Herbert Xu89114af2006-07-08 13:34:32 -07002680 if (skb_is_gso(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 if (skb_header_cloned(skb)) {
2682 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2683 if (err)
2684 return err;
2685 }
2686
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002687 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002688 mss = skb_shinfo(skb)->gso_size;
Alexey Dobriyan60828232006-05-23 14:52:21 -07002689 if (skb->protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002690 struct iphdr *iph = ip_hdr(skb);
2691 iph->tot_len = 0;
2692 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002693 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2694 iph->daddr, 0,
2695 IPPROTO_TCP,
2696 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002697 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002698 ipcse = skb_transport_offset(skb) - 1;
Auke Koke15fdd02006-08-16 11:28:45 -07002699 } else if (skb->protocol == htons(ETH_P_IPV6)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002700 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002701 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002702 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2703 &ipv6_hdr(skb)->daddr,
2704 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002705 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002706 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002707 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002708 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002709 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002710 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 tucse = 0;
2712
2713 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002714 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002716 i = tx_ring->next_to_use;
2717 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002718 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
2720 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2721 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2722 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2723 context_desc->upper_setup.tcp_fields.tucss = tucss;
2724 context_desc->upper_setup.tcp_fields.tucso = tucso;
2725 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2726 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2727 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2728 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2729
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002730 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002731 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002732
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002733 if (++i == tx_ring->count) i = 0;
2734 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735
Joe Perchesc3033b02008-03-21 11:06:25 -07002736 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002738 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739}
2740
Joe Perches64798842008-07-11 15:17:02 -07002741static bool e1000_tx_csum(struct e1000_adapter *adapter,
2742 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743{
2744 struct e1000_context_desc *context_desc;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002745 struct e1000_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002747 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002748 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Dave Graham3ed30672008-10-09 14:29:26 -07002750 if (skb->ip_summed != CHECKSUM_PARTIAL)
2751 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Dave Graham3ed30672008-10-09 14:29:26 -07002753 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002754 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002755 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2756 cmd_len |= E1000_TXD_CMD_TCP;
2757 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002758 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002759 /* XXX not handling all IPV6 headers */
2760 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2761 cmd_len |= E1000_TXD_CMD_TCP;
2762 break;
2763 default:
2764 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002765 e_warn(drv, "checksum_partial proto=%x!\n",
2766 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002767 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 }
2769
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002770 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002771
2772 i = tx_ring->next_to_use;
2773 buffer_info = &tx_ring->buffer_info[i];
2774 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2775
2776 context_desc->lower_setup.ip_config = 0;
2777 context_desc->upper_setup.tcp_fields.tucss = css;
2778 context_desc->upper_setup.tcp_fields.tucso =
2779 css + skb->csum_offset;
2780 context_desc->upper_setup.tcp_fields.tucse = 0;
2781 context_desc->tcp_seg_setup.data = 0;
2782 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2783
2784 buffer_info->time_stamp = jiffies;
2785 buffer_info->next_to_watch = i;
2786
2787 if (unlikely(++i == tx_ring->count)) i = 0;
2788 tx_ring->next_to_use = i;
2789
2790 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791}
2792
2793#define E1000_MAX_TXD_PWR 12
2794#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2795
Joe Perches64798842008-07-11 15:17:02 -07002796static int e1000_tx_map(struct e1000_adapter *adapter,
2797 struct e1000_tx_ring *tx_ring,
2798 struct sk_buff *skb, unsigned int first,
2799 unsigned int max_per_txd, unsigned int nr_frags,
2800 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801{
Joe Perches1dc32912008-07-11 15:17:08 -07002802 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002803 struct pci_dev *pdev = adapter->pdev;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002804 struct e1000_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002805 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002806 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002807 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808
2809 i = tx_ring->next_to_use;
2810
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002811 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002812 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002814 /* Workaround for Controller erratum --
2815 * descriptor for non-tso packet in a linear SKB that follows a
2816 * tso gets written back prematurely before the data is fully
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002817 * DMA'd to the controller
2818 */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002819 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002820 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002821 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002822 size -= 4;
2823 }
2824
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 /* Workaround for premature desc write-backs
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002826 * in TSO mode. Append 4-byte sentinel desc
2827 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002828 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002830 /* work-around for errata 10 and it applies
2831 * to all controllers in PCI-X mode
2832 * The fix is to make sure that the first descriptor of a
2833 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2834 */
Joe Perches1dc32912008-07-11 15:17:08 -07002835 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07002836 (size > 2015) && count == 0))
2837 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002838
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 /* Workaround for potential 82544 hang in PCI-X. Avoid
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002840 * terminating buffers within evenly-aligned dwords.
2841 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002842 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2844 size > 4))
2845 size -= 4;
2846
2847 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002848 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002850 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002851 buffer_info->dma = dma_map_single(&pdev->dev,
2852 skb->data + offset,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002853 size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002854 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002855 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002856 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857
2858 len -= size;
2859 offset += size;
2860 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002861 if (len) {
2862 i++;
2863 if (unlikely(i == tx_ring->count))
2864 i = 0;
2865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 }
2867
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002868 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002869 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
2871 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002872 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002873 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002875 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002876 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002877 i++;
2878 if (unlikely(i == tx_ring->count))
2879 i = 0;
2880
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 buffer_info = &tx_ring->buffer_info[i];
2882 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 /* Workaround for premature desc write-backs
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002884 * in TSO mode. Append 4-byte sentinel desc
2885 */
2886 if (unlikely(mss && f == (nr_frags-1) &&
2887 size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 /* Workaround for potential 82544 hang in PCI-X.
2890 * Avoid terminating buffers within evenly-aligned
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002891 * dwords.
2892 */
Ian Campbell877749b2011-08-29 23:18:26 +00002893 bufend = (unsigned long)
2894 page_to_phys(skb_frag_page(frag));
2895 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002896 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002897 !(bufend & 4) &&
2898 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 size -= 4;
2900
2901 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002903 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002904 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2905 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002906 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002907 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002908 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
2910 len -= size;
2911 offset += size;
2912 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 }
2914 }
2915
Dean Nelson31c15a22011-08-25 14:39:24 +00002916 segs = skb_shinfo(skb)->gso_segs ?: 1;
2917 /* multiply data chunks by size of headers */
2918 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2919
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002921 tx_ring->buffer_info[i].segs = segs;
2922 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 tx_ring->buffer_info[first].next_to_watch = i;
2924
2925 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002926
2927dma_error:
2928 dev_err(&pdev->dev, "TX DMA map failed\n");
2929 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002930 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002931 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002932
2933 while (count--) {
2934 if (i==0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002935 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002936 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002937 buffer_info = &tx_ring->buffer_info[i];
2938 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2939 }
2940
2941 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942}
2943
Joe Perches64798842008-07-11 15:17:02 -07002944static void e1000_tx_queue(struct e1000_adapter *adapter,
2945 struct e1000_tx_ring *tx_ring, int tx_flags,
2946 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947{
Joe Perches1dc32912008-07-11 15:17:08 -07002948 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949 struct e1000_tx_desc *tx_desc = NULL;
2950 struct e1000_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07002951 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 unsigned int i;
2953
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002954 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002956 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002957 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2958
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002959 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002960 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 }
2962
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002963 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2965 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2966 }
2967
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002968 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 txd_lower |= E1000_TXD_CMD_VLE;
2970 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2971 }
2972
Ben Greear11a78dc2012-02-11 15:40:01 +00002973 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2974 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2975
Linus Torvalds1da177e2005-04-16 15:20:36 -07002976 i = tx_ring->next_to_use;
2977
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002978 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 buffer_info = &tx_ring->buffer_info[i];
2980 tx_desc = E1000_TX_DESC(*tx_ring, i);
2981 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2982 tx_desc->lower.data =
2983 cpu_to_le32(txd_lower | buffer_info->length);
2984 tx_desc->upper.data = cpu_to_le32(txd_upper);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002985 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 }
2987
2988 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2989
Ben Greear11a78dc2012-02-11 15:40:01 +00002990 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
2991 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2992 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
2993
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 /* Force memory writes to complete before letting h/w
2995 * know there are new descriptors to fetch. (Only
2996 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002997 * such as IA-64).
2998 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 wmb();
3000
3001 tx_ring->next_to_use = i;
Joe Perches1dc32912008-07-11 15:17:08 -07003002 writel(i, hw->hw_addr + tx_ring->tdt);
Jesse Brandeburg2ce90472006-11-01 08:47:42 -08003003 /* we need this if more than one processor can write to our tail
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003004 * at a time, it synchronizes IO on IA64/Altix systems
3005 */
Jesse Brandeburg2ce90472006-11-01 08:47:42 -08003006 mmiowb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007}
3008
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003009/* 82547 workaround to avoid controller hang in half-duplex environment.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 * The workaround is to avoid queuing a large packet that would span
3011 * the internal Tx FIFO ring boundary by notifying the stack to resend
3012 * the packet at a later time. This gives the Tx FIFO an opportunity to
3013 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3014 * to the beginning of the Tx FIFO.
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003015 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016
3017#define E1000_FIFO_HDR 0x10
3018#define E1000_82547_PAD_LEN 0x3E0
3019
Joe Perches64798842008-07-11 15:17:02 -07003020static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3021 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022{
Joe Perches406874a2008-04-03 10:06:32 -07003023 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3024 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003025
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003026 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003028 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 goto no_fifo_stall_required;
3030
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003031 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032 return 1;
3033
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003034 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 atomic_set(&adapter->tx_fifo_stall, 1);
3036 return 1;
3037 }
3038
3039no_fifo_stall_required:
3040 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003041 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3043 return 0;
3044}
3045
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003046static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3047{
3048 struct e1000_adapter *adapter = netdev_priv(netdev);
3049 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3050
3051 netif_stop_queue(netdev);
3052 /* Herbert's original patch had:
3053 * smp_mb__after_netif_stop_queue();
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003054 * but since that doesn't exist yet, just open code it.
3055 */
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003056 smp_mb();
3057
3058 /* We need to check again in a case another CPU has just
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003059 * made room available.
3060 */
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003061 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3062 return -EBUSY;
3063
3064 /* A reprieve! */
3065 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003066 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003067 return 0;
3068}
3069
3070static int e1000_maybe_stop_tx(struct net_device *netdev,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003071 struct e1000_tx_ring *tx_ring, int size)
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003072{
3073 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3074 return 0;
3075 return __e1000_maybe_stop_tx(netdev, size);
3076}
3077
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003079static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3080 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003082 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003083 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003084 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3086 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3087 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003088 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003089 unsigned int nr_frags;
3090 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003092 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 unsigned int f;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003095 /* This goes back to the question of how to logically map a Tx queue
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003096 * to a flow. Right now, performance is impacted slightly negatively
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003097 * if using multiple Tx queues. If the stack breaks away from a
3098 * single qdisc implementation, we can look at this again.
3099 */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003100 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003101
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003102 if (unlikely(skb->len <= 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103 dev_kfree_skb_any(skb);
3104 return NETDEV_TX_OK;
3105 }
3106
Tushar Dave59d86c72012-09-15 10:16:57 +00003107 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3108 * packets may get corrupted during padding by HW.
3109 * To WA this issue, pad all small packets manually.
3110 */
3111 if (skb->len < ETH_ZLEN) {
3112 if (skb_pad(skb, ETH_ZLEN - skb->len))
3113 return NETDEV_TX_OK;
3114 skb->len = ETH_ZLEN;
3115 skb_set_tail_pointer(skb, ETH_ZLEN);
3116 }
3117
Herbert Xu79671682006-06-22 02:40:14 -07003118 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003119 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003120 * make sure there is enough room in the FIFO before
3121 * initiating the DMA for each buffer. The calc is:
3122 * 4 = ceil(buffer len/mss). To make sure we don't
3123 * overrun the FIFO, adjust the max buffer len if mss
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003124 * drops.
3125 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003126 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003127 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 max_per_txd = min(mss << 2, max_per_txd);
3129 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003130
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003131 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003132 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003133 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003134 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003135 case e1000_82544:
3136 /* Make sure we have room to chop off 4 bytes,
3137 * and that the end alignment will work out to
3138 * this hardware's requirements
3139 * NOTE: this is a TSO only workaround
3140 * if end byte alignment not correct move us
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003141 * into the next dword
3142 */
3143 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3144 & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003145 break;
3146 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003147 pull_size = min((unsigned int)4, skb->data_len);
3148 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003149 e_err(drv, "__pskb_pull_tail "
3150 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003151 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003152 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003153 }
Eric Dumazete743d312010-04-14 15:59:40 -07003154 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003155 break;
3156 default:
3157 /* do nothing */
3158 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003159 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 }
3162
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003163 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003164 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003166 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003167
Jeff Kirsherfd803242005-12-13 00:06:22 -05003168 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003169 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003170 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003171
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172 count += TXD_USE_COUNT(len, max_txd_pwr);
3173
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003174 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 count++;
3176
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003177 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003178 * in PCI-X mode, so add one more descriptor to the count
3179 */
Joe Perches1dc32912008-07-11 15:17:08 -07003180 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003181 (len > 2015)))
3182 count++;
3183
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003185 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003186 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003188 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003189 count += nr_frags;
3190
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191 /* need: count + 2 desc gap to keep tail from touching
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003192 * head, otherwise try next time
3193 */
Alexander Duyck80179432009-01-21 14:42:47 -08003194 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003197 if (unlikely((hw->mac_type == e1000_82547) &&
3198 (e1000_82547_fifo_workaround(adapter, skb)))) {
3199 netif_stop_queue(netdev);
3200 if (!test_bit(__E1000_DOWN, &adapter->flags))
3201 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3202 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 }
3204
Jiri Pirko5622e402011-07-21 03:26:31 +00003205 if (vlan_tx_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003206 tx_flags |= E1000_TX_FLAGS_VLAN;
3207 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3208 }
3209
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003210 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003211
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003212 tso = e1000_tso(adapter, tx_ring, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 if (tso < 0) {
3214 dev_kfree_skb_any(skb);
3215 return NETDEV_TX_OK;
3216 }
3217
Jeff Kirsherfd803242005-12-13 00:06:22 -05003218 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003219 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003220 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003221 tx_flags |= E1000_TX_FLAGS_TSO;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003222 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 tx_flags |= E1000_TX_FLAGS_CSUM;
3224
Alexey Dobriyan60828232006-05-23 14:52:21 -07003225 if (likely(skb->protocol == htons(ETH_P_IP)))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003226 tx_flags |= E1000_TX_FLAGS_IPV4;
3227
Ben Greear11a78dc2012-02-11 15:40:01 +00003228 if (unlikely(skb->no_fcs))
3229 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3230
Alexander Duyck37e73df2009-03-25 21:58:45 +00003231 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003232 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233
Alexander Duyck37e73df2009-03-25 21:58:45 +00003234 if (count) {
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003235 netdev_sent_queue(netdev, skb->len);
Willem de Bruijneab467f2012-04-27 09:04:04 +00003236 skb_tx_timestamp(skb);
3237
Alexander Duyck37e73df2009-03-25 21:58:45 +00003238 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duyck37e73df2009-03-25 21:58:45 +00003239 /* Make sure there is space in the ring for the next send. */
3240 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
Alexander Duyck37e73df2009-03-25 21:58:45 +00003242 } else {
3243 dev_kfree_skb_any(skb);
3244 tx_ring->buffer_info[first].time_stamp = 0;
3245 tx_ring->next_to_use = first;
3246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248 return NETDEV_TX_OK;
3249}
3250
Tushar Daveb04e36b2012-01-27 09:00:46 +00003251#define NUM_REGS 38 /* 1 based count */
3252static void e1000_regdump(struct e1000_adapter *adapter)
3253{
3254 struct e1000_hw *hw = &adapter->hw;
3255 u32 regs[NUM_REGS];
3256 u32 *regs_buff = regs;
3257 int i = 0;
3258
Tushar Davee29b5d82012-02-10 08:06:36 +00003259 static const char * const reg_name[] = {
3260 "CTRL", "STATUS",
3261 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3262 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3263 "TIDV", "TXDCTL", "TADV", "TARC0",
3264 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3265 "TXDCTL1", "TARC1",
3266 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3267 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3268 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003269 };
3270
3271 regs_buff[0] = er32(CTRL);
3272 regs_buff[1] = er32(STATUS);
3273
3274 regs_buff[2] = er32(RCTL);
3275 regs_buff[3] = er32(RDLEN);
3276 regs_buff[4] = er32(RDH);
3277 regs_buff[5] = er32(RDT);
3278 regs_buff[6] = er32(RDTR);
3279
3280 regs_buff[7] = er32(TCTL);
3281 regs_buff[8] = er32(TDBAL);
3282 regs_buff[9] = er32(TDBAH);
3283 regs_buff[10] = er32(TDLEN);
3284 regs_buff[11] = er32(TDH);
3285 regs_buff[12] = er32(TDT);
3286 regs_buff[13] = er32(TIDV);
3287 regs_buff[14] = er32(TXDCTL);
3288 regs_buff[15] = er32(TADV);
3289 regs_buff[16] = er32(TARC0);
3290
3291 regs_buff[17] = er32(TDBAL1);
3292 regs_buff[18] = er32(TDBAH1);
3293 regs_buff[19] = er32(TDLEN1);
3294 regs_buff[20] = er32(TDH1);
3295 regs_buff[21] = er32(TDT1);
3296 regs_buff[22] = er32(TXDCTL1);
3297 regs_buff[23] = er32(TARC1);
3298 regs_buff[24] = er32(CTRL_EXT);
3299 regs_buff[25] = er32(ERT);
3300 regs_buff[26] = er32(RDBAL0);
3301 regs_buff[27] = er32(RDBAH0);
3302 regs_buff[28] = er32(TDFH);
3303 regs_buff[29] = er32(TDFT);
3304 regs_buff[30] = er32(TDFHS);
3305 regs_buff[31] = er32(TDFTS);
3306 regs_buff[32] = er32(TDFPC);
3307 regs_buff[33] = er32(RDFH);
3308 regs_buff[34] = er32(RDFT);
3309 regs_buff[35] = er32(RDFHS);
3310 regs_buff[36] = er32(RDFTS);
3311 regs_buff[37] = er32(RDFPC);
3312
3313 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003314 for (i = 0; i < NUM_REGS; i++)
3315 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003316}
3317
3318/*
3319 * e1000_dump: Print registers, tx ring and rx ring
3320 */
3321static void e1000_dump(struct e1000_adapter *adapter)
3322{
3323 /* this code doesn't handle multiple rings */
3324 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3325 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3326 int i;
3327
3328 if (!netif_msg_hw(adapter))
3329 return;
3330
3331 /* Print Registers */
3332 e1000_regdump(adapter);
3333
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003334 /* transmit dump */
Tushar Daveb04e36b2012-01-27 09:00:46 +00003335 pr_info("TX Desc ring0 dump\n");
3336
3337 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3338 *
3339 * Legacy Transmit Descriptor
3340 * +--------------------------------------------------------------+
3341 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3342 * +--------------------------------------------------------------+
3343 * 8 | Special | CSS | Status | CMD | CSO | Length |
3344 * +--------------------------------------------------------------+
3345 * 63 48 47 36 35 32 31 24 23 16 15 0
3346 *
3347 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3348 * 63 48 47 40 39 32 31 16 15 8 7 0
3349 * +----------------------------------------------------------------+
3350 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3351 * +----------------------------------------------------------------+
3352 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3353 * +----------------------------------------------------------------+
3354 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3355 *
3356 * Extended Data Descriptor (DTYP=0x1)
3357 * +----------------------------------------------------------------+
3358 * 0 | Buffer Address [63:0] |
3359 * +----------------------------------------------------------------+
3360 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3361 * +----------------------------------------------------------------+
3362 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3363 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003364 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3365 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003366
3367 if (!netif_msg_tx_done(adapter))
3368 goto rx_ring_summary;
3369
3370 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3371 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3372 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003373 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003374 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003375 const char *type;
3376
Tushar Daveb04e36b2012-01-27 09:00:46 +00003377 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003378 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003379 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003380 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003381 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003382 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003383 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003384 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003385
Tushar Davee29b5d82012-02-10 08:06:36 +00003386 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3387 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3388 le64_to_cpu(u->a), le64_to_cpu(u->b),
3389 (u64)buffer_info->dma, buffer_info->length,
3390 buffer_info->next_to_watch,
3391 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003392 }
3393
3394rx_ring_summary:
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003395 /* receive dump */
Tushar Daveb04e36b2012-01-27 09:00:46 +00003396 pr_info("\nRX Desc ring dump\n");
3397
3398 /* Legacy Receive Descriptor Format
3399 *
3400 * +-----------------------------------------------------+
3401 * | Buffer Address [63:0] |
3402 * +-----------------------------------------------------+
3403 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3404 * +-----------------------------------------------------+
3405 * 63 48 47 40 39 32 31 16 15 0
3406 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003407 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003408
3409 if (!netif_msg_rx_status(adapter))
3410 goto exit;
3411
3412 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3413 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3414 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003415 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003416 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003417 const char *type;
3418
Tushar Daveb04e36b2012-01-27 09:00:46 +00003419 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003420 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003421 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003422 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003423 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003424 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003425
Tushar Davee29b5d82012-02-10 08:06:36 +00003426 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3427 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3428 (u64)buffer_info->dma, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003429 } /* for */
3430
3431 /* dump the descriptor caches */
3432 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003433 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003434 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003435 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3436 i,
3437 readl(adapter->hw.hw_addr + i+4),
3438 readl(adapter->hw.hw_addr + i),
3439 readl(adapter->hw.hw_addr + i+12),
3440 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003441 }
3442 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003443 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003444 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003445 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3446 i,
3447 readl(adapter->hw.hw_addr + i+4),
3448 readl(adapter->hw.hw_addr + i),
3449 readl(adapter->hw.hw_addr + i+12),
3450 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003451 }
3452exit:
3453 return;
3454}
3455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456/**
3457 * e1000_tx_timeout - Respond to a Tx Hang
3458 * @netdev: network interface device structure
3459 **/
Joe Perches64798842008-07-11 15:17:02 -07003460static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003462 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463
3464 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003465 adapter->tx_timeout_count++;
3466 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467}
3468
Joe Perches64798842008-07-11 15:17:02 -07003469static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470{
David Howells65f27f32006-11-22 14:55:48 +00003471 struct e1000_adapter *adapter =
3472 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473
Tushar Daveb04e36b2012-01-27 09:00:46 +00003474 e_err(drv, "Reset adapter\n");
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00003475 e1000_reinit_locked(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476}
3477
3478/**
3479 * e1000_get_stats - Get System Network Statistics
3480 * @netdev: network interface device structure
3481 *
3482 * Returns the address of the device statistics structure.
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003483 * The statistics are actually updated from the watchdog.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 **/
Joe Perches64798842008-07-11 15:17:02 -07003485static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486{
Jeff Kirsher6b7660c2006-01-12 16:50:35 -08003487 /* only return the current stats */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003488 return &netdev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489}
3490
3491/**
3492 * e1000_change_mtu - Change the Maximum Transfer Unit
3493 * @netdev: network interface device structure
3494 * @new_mtu: new value for maximum frame size
3495 *
3496 * Returns 0 on success, negative on failure
3497 **/
Joe Perches64798842008-07-11 15:17:02 -07003498static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003500 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003501 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3503
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003504 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3505 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003506 e_err(probe, "Invalid MTU setting\n");
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003507 return -EINVAL;
3508 }
3509
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003510 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003511 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003512 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003513 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003514 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003515 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003516 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003517 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003518 default:
3519 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3520 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003521 }
3522
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003523 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3524 msleep(1);
3525 /* e1000_down has a dependency on max_frame_size */
3526 hw->max_frame_size = max_frame;
3527 if (netif_running(netdev))
3528 e1000_down(adapter);
3529
David S. Miller87f50322006-07-31 22:39:40 -07003530 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003531 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003532 * larger slab size.
3533 * i.e. RXBUFFER_2048 --> size-4096 slab
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003534 * however with the new *_jumbo_rx* routines, jumbo receives will use
3535 * fragmented skbs
3536 */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003537
Jesse Brandeburg99261462010-01-22 22:56:16 +00003538 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003539 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003540 else
3541#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003542 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003543#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3544 adapter->rx_buffer_len = PAGE_SIZE;
3545#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003546
3547 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003548 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003549 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003550 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3551 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003552
Emil Tantilov675ad472010-04-27 14:02:58 +00003553 pr_info("%s changing MTU from %d to %d\n",
3554 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003555 netdev->mtu = new_mtu;
3556
Auke Kok2db10a02006-06-27 09:06:28 -07003557 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003558 e1000_up(adapter);
3559 else
3560 e1000_reset(adapter);
3561
3562 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 return 0;
3565}
3566
3567/**
3568 * e1000_update_stats - Update the board statistics counters
3569 * @adapter: board private structure
3570 **/
Joe Perches64798842008-07-11 15:17:02 -07003571void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003573 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003574 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003575 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003577 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578
3579#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3580
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003581 /* Prevent stats update while adapter is being reset, or if the pci
Linas Vepstas282f33c2006-06-08 22:19:44 -07003582 * connection is down.
3583 */
Auke Kok90267292006-06-08 09:30:24 -07003584 if (adapter->link_speed == 0)
3585 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003586 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003587 return;
Auke Kok90267292006-06-08 09:30:24 -07003588
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589 spin_lock_irqsave(&adapter->stats_lock, flags);
3590
Masatake YAMATO828d0552007-10-20 03:06:37 +02003591 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003592 * called from the interrupt context, so they must only
3593 * be written while holding adapter->stats_lock
3594 */
3595
Joe Perches1dc32912008-07-11 15:17:08 -07003596 adapter->stats.crcerrs += er32(CRCERRS);
3597 adapter->stats.gprc += er32(GPRC);
3598 adapter->stats.gorcl += er32(GORCL);
3599 adapter->stats.gorch += er32(GORCH);
3600 adapter->stats.bprc += er32(BPRC);
3601 adapter->stats.mprc += er32(MPRC);
3602 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003603
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003604 adapter->stats.prc64 += er32(PRC64);
3605 adapter->stats.prc127 += er32(PRC127);
3606 adapter->stats.prc255 += er32(PRC255);
3607 adapter->stats.prc511 += er32(PRC511);
3608 adapter->stats.prc1023 += er32(PRC1023);
3609 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
Joe Perches1dc32912008-07-11 15:17:08 -07003611 adapter->stats.symerrs += er32(SYMERRS);
3612 adapter->stats.mpc += er32(MPC);
3613 adapter->stats.scc += er32(SCC);
3614 adapter->stats.ecol += er32(ECOL);
3615 adapter->stats.mcc += er32(MCC);
3616 adapter->stats.latecol += er32(LATECOL);
3617 adapter->stats.dc += er32(DC);
3618 adapter->stats.sec += er32(SEC);
3619 adapter->stats.rlec += er32(RLEC);
3620 adapter->stats.xonrxc += er32(XONRXC);
3621 adapter->stats.xontxc += er32(XONTXC);
3622 adapter->stats.xoffrxc += er32(XOFFRXC);
3623 adapter->stats.xofftxc += er32(XOFFTXC);
3624 adapter->stats.fcruc += er32(FCRUC);
3625 adapter->stats.gptc += er32(GPTC);
3626 adapter->stats.gotcl += er32(GOTCL);
3627 adapter->stats.gotch += er32(GOTCH);
3628 adapter->stats.rnbc += er32(RNBC);
3629 adapter->stats.ruc += er32(RUC);
3630 adapter->stats.rfc += er32(RFC);
3631 adapter->stats.rjc += er32(RJC);
3632 adapter->stats.torl += er32(TORL);
3633 adapter->stats.torh += er32(TORH);
3634 adapter->stats.totl += er32(TOTL);
3635 adapter->stats.toth += er32(TOTH);
3636 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003637
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003638 adapter->stats.ptc64 += er32(PTC64);
3639 adapter->stats.ptc127 += er32(PTC127);
3640 adapter->stats.ptc255 += er32(PTC255);
3641 adapter->stats.ptc511 += er32(PTC511);
3642 adapter->stats.ptc1023 += er32(PTC1023);
3643 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003644
Joe Perches1dc32912008-07-11 15:17:08 -07003645 adapter->stats.mptc += er32(MPTC);
3646 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647
3648 /* used for adaptive IFS */
3649
Joe Perches1dc32912008-07-11 15:17:08 -07003650 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003652 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653 adapter->stats.colc += hw->collision_delta;
3654
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003655 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003656 adapter->stats.algnerrc += er32(ALGNERRC);
3657 adapter->stats.rxerrc += er32(RXERRC);
3658 adapter->stats.tncrs += er32(TNCRS);
3659 adapter->stats.cexterr += er32(CEXTERR);
3660 adapter->stats.tsctc += er32(TSCTC);
3661 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003662 }
3663
3664 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003665 netdev->stats.multicast = adapter->stats.mprc;
3666 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003667
3668 /* Rx Errors */
3669
Jeff Kirsher87041632006-03-02 18:21:24 -08003670 /* RLEC on some newer hardware can be incorrect so build
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003671 * our own version based on RUC and ROC
3672 */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003673 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003675 adapter->stats.ruc + adapter->stats.roc +
3676 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003677 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003678 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3679 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3680 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3681 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682
3683 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003684 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003685 netdev->stats.tx_errors = adapter->stats.txerrc;
3686 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3687 netdev->stats.tx_window_errors = adapter->stats.latecol;
3688 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003689 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003690 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003691 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003692 adapter->stats.tncrs = 0;
3693 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694
3695 /* Tx Dropped needs to be maintained elsewhere */
3696
3697 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003698 if (hw->media_type == e1000_media_type_copper) {
3699 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3701 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3702 adapter->phy_stats.idle_errors += phy_tmp;
3703 }
3704
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003705 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 (hw->phy_type == e1000_phy_m88) &&
3707 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3708 adapter->phy_stats.receive_errors += phy_tmp;
3709 }
3710
Jeff Garzik15e376b2006-12-15 11:16:33 -05003711 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003712 if (hw->has_smbus) {
3713 adapter->stats.mgptc += er32(MGTPTC);
3714 adapter->stats.mgprc += er32(MGTPRC);
3715 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003716 }
3717
Linus Torvalds1da177e2005-04-16 15:20:36 -07003718 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3719}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003720
3721/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003722 * e1000_intr - Interrupt Handler
3723 * @irq: interrupt number
3724 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003725 **/
Joe Perches64798842008-07-11 15:17:02 -07003726static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727{
3728 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003729 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003731 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003732
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003733 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003734 return IRQ_NONE; /* Not our interrupt */
3735
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003736 /* we might have caused the interrupt, but the above
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003737 * read cleared it, and just in case the driver is
3738 * down there is nothing to do so return handled
3739 */
3740 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3741 return IRQ_HANDLED;
3742
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003743 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003745 /* guard against interrupt when we're going down */
3746 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003747 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003748 }
3749
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003750 /* disable interrupts, without the synchronize_irq bit */
3751 ew32(IMC, ~0);
3752 E1000_WRITE_FLUSH();
3753
Ben Hutchings288379f2009-01-19 16:43:59 -08003754 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003755 adapter->total_tx_bytes = 0;
3756 adapter->total_tx_packets = 0;
3757 adapter->total_rx_bytes = 0;
3758 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003759 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003760 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003761 /* this really should not happen! if it does it is basically a
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003762 * bug, but not a hard error, so enable ints and continue
3763 */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003764 if (!test_bit(__E1000_DOWN, &adapter->flags))
3765 e1000_irq_enable(adapter);
3766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768 return IRQ_HANDLED;
3769}
3770
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771/**
3772 * e1000_clean - NAPI Rx polling callback
3773 * @adapter: board private structure
3774 **/
Joe Perches64798842008-07-11 15:17:02 -07003775static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776{
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003777 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3778 napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003779 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003780
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003781 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003782
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003783 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003784
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003785 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003786 work_done = budget;
3787
David S. Miller53e52c72008-01-07 21:06:12 -08003788 /* If budget not fully consumed, exit the polling mode */
3789 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003790 if (likely(adapter->itr_setting & 3))
3791 e1000_set_itr(adapter);
Ben Hutchings288379f2009-01-19 16:43:59 -08003792 napi_complete(napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003793 if (!test_bit(__E1000_DOWN, &adapter->flags))
3794 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 }
3796
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003797 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798}
3799
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800/**
3801 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3802 * @adapter: board private structure
3803 **/
Joe Perches64798842008-07-11 15:17:02 -07003804static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3805 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003806{
Joe Perches1dc32912008-07-11 15:17:08 -07003807 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003808 struct net_device *netdev = adapter->netdev;
3809 struct e1000_tx_desc *tx_desc, *eop_desc;
3810 struct e1000_buffer *buffer_info;
3811 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003812 unsigned int count = 0;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003813 unsigned int total_tx_bytes=0, total_tx_packets=0;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003814 unsigned int bytes_compl = 0, pkts_compl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815
3816 i = tx_ring->next_to_clean;
3817 eop = tx_ring->buffer_info[i].next_to_watch;
3818 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3819
Alexander Duyckccfb3422009-03-25 21:59:04 +00003820 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3821 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003822 bool cleaned = false;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00003823 rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003824 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 tx_desc = E1000_TX_DESC(*tx_ring, i);
3826 buffer_info = &tx_ring->buffer_info[i];
3827 cleaned = (i == eop);
3828
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003829 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003830 total_tx_packets += buffer_info->segs;
3831 total_tx_bytes += buffer_info->bytecount;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003832 if (buffer_info->skb) {
3833 bytes_compl += buffer_info->skb->len;
3834 pkts_compl++;
3835 }
3836
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003837 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003838 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003839 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003841 if (unlikely(++i == tx_ring->count)) i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 eop = tx_ring->buffer_info[i].next_to_watch;
3845 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3846 }
3847
3848 tx_ring->next_to_clean = i;
3849
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003850 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3851
Auke Kok77b2aad2006-04-14 19:05:25 -07003852#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003853 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003854 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3855 /* Make sure that anybody stopping the queue after this
3856 * sees the new next_to_clean.
3857 */
3858 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003859
3860 if (netif_queue_stopped(netdev) &&
3861 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003862 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003863 ++adapter->restart_queue;
3864 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003865 }
Malli Chilakala26483452005-04-28 19:44:46 -07003866
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003867 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003868 /* Detect a transmit hang in hardware, this serializes the
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003869 * check with the clearing of time_stamp and movement of i
3870 */
Joe Perchesc3033b02008-03-21 11:06:25 -07003871 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003872 if (tx_ring->buffer_info[eop].time_stamp &&
3873 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003874 (adapter->tx_timeout_factor * HZ)) &&
Joe Perches8e95a202009-12-03 07:58:21 +00003875 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003876
3877 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003878 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003879 " Tx Queue <%lu>\n"
3880 " TDH <%x>\n"
3881 " TDT <%x>\n"
3882 " next_to_use <%x>\n"
3883 " next_to_clean <%x>\n"
3884 "buffer_info[next_to_clean]\n"
3885 " time_stamp <%lx>\n"
3886 " next_to_watch <%x>\n"
3887 " jiffies <%lx>\n"
3888 " next_to_watch.status <%x>\n",
Hong Zhiguo49a45a02013-10-22 18:32:56 +00003889 (unsigned long)(tx_ring - adapter->tx_ring),
Joe Perches1dc32912008-07-11 15:17:08 -07003890 readl(hw->hw_addr + tx_ring->tdh),
3891 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003892 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003893 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003894 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003895 eop,
3896 jiffies,
3897 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003898 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003902 adapter->total_tx_bytes += total_tx_bytes;
3903 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003904 netdev->stats.tx_bytes += total_tx_bytes;
3905 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003906 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907}
3908
3909/**
3910 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003911 * @adapter: board private structure
3912 * @status_err: receive descriptor status and error fields
3913 * @csum: receive descriptor csum field
3914 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 **/
Joe Perches64798842008-07-11 15:17:02 -07003916static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3917 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918{
Joe Perches1dc32912008-07-11 15:17:08 -07003919 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003920 u16 status = (u16)status_err;
3921 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003922
3923 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003924
Linus Torvalds1da177e2005-04-16 15:20:36 -07003925 /* 82543 or newer only */
Joe Perches1dc32912008-07-11 15:17:08 -07003926 if (unlikely(hw->mac_type < e1000_82543)) return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 /* Ignore Checksum bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003928 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003929 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003930 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003931 /* let the stack verify checksum errors */
3932 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 return;
3934 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003935 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003936 if (!(status & E1000_RXD_STAT_TCPCS))
3937 return;
3938
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003939 /* It must be a TCP or UDP packet with a valid checksum */
3940 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 /* TCP checksum is good */
3942 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003944 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945}
3946
3947/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003948 * e1000_consume_page - helper function
3949 **/
3950static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003951 u16 length)
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003952{
3953 bi->page = NULL;
3954 skb->len += length;
3955 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00003956 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003957}
3958
3959/**
3960 * e1000_receive_skb - helper function to handle rx indications
3961 * @adapter: board private structure
3962 * @status: descriptor status field as written by hardware
3963 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3964 * @skb: pointer to sk_buff to be indicated to stack
3965 */
3966static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3967 __le16 vlan, struct sk_buff *skb)
3968{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00003969 skb->protocol = eth_type_trans(skb, adapter->netdev);
3970
Jiri Pirko5622e402011-07-21 03:26:31 +00003971 if (status & E1000_RXD_STAT_VP) {
3972 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3973
Patrick McHardy86a9bad2013-04-19 02:04:30 +00003974 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Jiri Pirko5622e402011-07-21 03:26:31 +00003975 }
3976 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003977}
3978
3979/**
3980 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3981 * @adapter: board private structure
3982 * @rx_ring: ring to clean
3983 * @work_done: amount of napi work completed this call
3984 * @work_to_do: max amount of work allowed for this call to do
3985 *
3986 * the return value indicates whether actual cleaning was done, there
3987 * is no guarantee that everything was cleaned
3988 */
3989static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3990 struct e1000_rx_ring *rx_ring,
3991 int *work_done, int work_to_do)
3992{
3993 struct e1000_hw *hw = &adapter->hw;
3994 struct net_device *netdev = adapter->netdev;
3995 struct pci_dev *pdev = adapter->pdev;
3996 struct e1000_rx_desc *rx_desc, *next_rxd;
3997 struct e1000_buffer *buffer_info, *next_buffer;
3998 unsigned long irq_flags;
3999 u32 length;
4000 unsigned int i;
4001 int cleaned_count = 0;
4002 bool cleaned = false;
4003 unsigned int total_rx_bytes=0, total_rx_packets=0;
4004
4005 i = rx_ring->next_to_clean;
4006 rx_desc = E1000_RX_DESC(*rx_ring, i);
4007 buffer_info = &rx_ring->buffer_info[i];
4008
4009 while (rx_desc->status & E1000_RXD_STAT_DD) {
4010 struct sk_buff *skb;
4011 u8 status;
4012
4013 if (*work_done >= work_to_do)
4014 break;
4015 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004016 rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004017
4018 status = rx_desc->status;
4019 skb = buffer_info->skb;
4020 buffer_info->skb = NULL;
4021
4022 if (++i == rx_ring->count) i = 0;
4023 next_rxd = E1000_RX_DESC(*rx_ring, i);
4024 prefetch(next_rxd);
4025
4026 next_buffer = &rx_ring->buffer_info[i];
4027
4028 cleaned = true;
4029 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004030 dma_unmap_page(&pdev->dev, buffer_info->dma,
4031 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004032 buffer_info->dma = 0;
4033
4034 length = le16_to_cpu(rx_desc->length);
4035
4036 /* errors is only valid for DD + EOP descriptors */
4037 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4038 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
Sebastian Andrzej Siewiora3060852012-05-11 16:30:46 +00004039 u8 *mapped;
4040 u8 last_byte;
4041
4042 mapped = page_address(buffer_info->page);
4043 last_byte = *(mapped + length - 1);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004044 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4045 last_byte)) {
4046 spin_lock_irqsave(&adapter->stats_lock,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004047 irq_flags);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004048 e1000_tbi_adjust_stats(hw, &adapter->stats,
Sebastian Andrzej Siewior281a8f22012-05-15 09:18:55 +00004049 length, mapped);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004050 spin_unlock_irqrestore(&adapter->stats_lock,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004051 irq_flags);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004052 length--;
4053 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004054 if (netdev->features & NETIF_F_RXALL)
4055 goto process_skb;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004056 /* recycle both page and skb */
4057 buffer_info->skb = skb;
4058 /* an error means any chain goes out the window
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004059 * too
4060 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004061 if (rx_ring->rx_skb_top)
4062 dev_kfree_skb(rx_ring->rx_skb_top);
4063 rx_ring->rx_skb_top = NULL;
4064 goto next_desc;
4065 }
4066 }
4067
4068#define rxtop rx_ring->rx_skb_top
Ben Greeare825b732012-04-04 06:01:29 +00004069process_skb:
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004070 if (!(status & E1000_RXD_STAT_EOP)) {
4071 /* this descriptor is only the beginning (or middle) */
4072 if (!rxtop) {
4073 /* this is the beginning of a chain */
4074 rxtop = skb;
4075 skb_fill_page_desc(rxtop, 0, buffer_info->page,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004076 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004077 } else {
4078 /* this is the middle of a chain */
4079 skb_fill_page_desc(rxtop,
4080 skb_shinfo(rxtop)->nr_frags,
4081 buffer_info->page, 0, length);
4082 /* re-use the skb, only consumed the page */
4083 buffer_info->skb = skb;
4084 }
4085 e1000_consume_page(buffer_info, rxtop, length);
4086 goto next_desc;
4087 } else {
4088 if (rxtop) {
4089 /* end of the chain */
4090 skb_fill_page_desc(rxtop,
4091 skb_shinfo(rxtop)->nr_frags,
4092 buffer_info->page, 0, length);
4093 /* re-use the current skb, we only consumed the
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004094 * page
4095 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004096 buffer_info->skb = skb;
4097 skb = rxtop;
4098 rxtop = NULL;
4099 e1000_consume_page(buffer_info, skb, length);
4100 } else {
4101 /* no chain, got EOP, this buf is the packet
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004102 * copybreak to save the put_page/alloc_page
4103 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004104 if (length <= copybreak &&
4105 skb_tailroom(skb) >= length) {
4106 u8 *vaddr;
Cong Wang46790262011-11-25 23:14:23 +08004107 vaddr = kmap_atomic(buffer_info->page);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004108 memcpy(skb_tail_pointer(skb), vaddr,
4109 length);
Cong Wang46790262011-11-25 23:14:23 +08004110 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004111 /* re-use the page, so don't erase
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004112 * buffer_info->page
4113 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004114 skb_put(skb, length);
4115 } else {
4116 skb_fill_page_desc(skb, 0,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004117 buffer_info->page, 0,
4118 length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004119 e1000_consume_page(buffer_info, skb,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004120 length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004121 }
4122 }
4123 }
4124
4125 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4126 e1000_rx_checksum(adapter,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004127 (u32)(status) |
4128 ((u32)(rx_desc->errors) << 24),
4129 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004130
Ben Greearb0d15622012-02-11 15:40:11 +00004131 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4132 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4133 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004134 total_rx_packets++;
4135
4136 /* eth type trans needs skb->data to point to something */
4137 if (!pskb_may_pull(skb, ETH_HLEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004138 e_err(drv, "pskb_may_pull failed.\n");
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004139 dev_kfree_skb(skb);
4140 goto next_desc;
4141 }
4142
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004143 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4144
4145next_desc:
4146 rx_desc->status = 0;
4147
4148 /* return some buffers to hardware, one at a time is too slow */
4149 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4150 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4151 cleaned_count = 0;
4152 }
4153
4154 /* use prefetched values */
4155 rx_desc = next_rxd;
4156 buffer_info = next_buffer;
4157 }
4158 rx_ring->next_to_clean = i;
4159
4160 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4161 if (cleaned_count)
4162 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4163
4164 adapter->total_rx_packets += total_rx_packets;
4165 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004166 netdev->stats.rx_bytes += total_rx_bytes;
4167 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004168 return cleaned;
4169}
4170
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004171/* this should improve performance for small packets with large amounts
Joe Perches57bf6ee2010-05-13 15:26:17 +00004172 * of reassembly being done in the stack
4173 */
4174static void e1000_check_copybreak(struct net_device *netdev,
4175 struct e1000_buffer *buffer_info,
4176 u32 length, struct sk_buff **skb)
4177{
4178 struct sk_buff *new_skb;
4179
4180 if (length > copybreak)
4181 return;
4182
4183 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4184 if (!new_skb)
4185 return;
4186
4187 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4188 (*skb)->data - NET_IP_ALIGN,
4189 length + NET_IP_ALIGN);
4190 /* save the skb in buffer_info as good */
4191 buffer_info->skb = *skb;
4192 *skb = new_skb;
4193}
4194
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004195/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004196 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004198 * @rx_ring: ring to clean
4199 * @work_done: amount of napi work completed this call
4200 * @work_to_do: max amount of work allowed for this call to do
4201 */
Joe Perches64798842008-07-11 15:17:02 -07004202static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4203 struct e1000_rx_ring *rx_ring,
4204 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205{
Joe Perches1dc32912008-07-11 15:17:08 -07004206 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004207 struct net_device *netdev = adapter->netdev;
4208 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004209 struct e1000_rx_desc *rx_desc, *next_rxd;
4210 struct e1000_buffer *buffer_info, *next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07004212 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004214 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004215 bool cleaned = false;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004216 unsigned int total_rx_bytes=0, total_rx_packets=0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217
4218 i = rx_ring->next_to_clean;
4219 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004220 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004222 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004223 struct sk_buff *skb;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004224 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004225
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004226 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 break;
4228 (*work_done)++;
Jeff Kirsher2d0bb1c2010-08-08 16:02:31 +00004229 rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004230
Jeff Kirshera292ca62006-01-12 16:51:30 -08004231 status = rx_desc->status;
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004232 skb = buffer_info->skb;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004233 buffer_info->skb = NULL;
4234
Jeff Kirsher30320be2006-03-02 18:21:57 -08004235 prefetch(skb->data - NET_IP_ALIGN);
4236
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004237 if (++i == rx_ring->count) i = 0;
4238 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004239 prefetch(next_rxd);
4240
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004241 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004242
Joe Perchesc3033b02008-03-21 11:06:25 -07004243 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004244 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004245 dma_unmap_single(&pdev->dev, buffer_info->dma,
4246 buffer_info->length, DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004247 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 length = le16_to_cpu(rx_desc->length);
Neil Hormanea30e112009-06-02 01:29:58 -07004250 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004251 * packet, if thats the case we need to toss it. In fact, we
4252 * to toss every packet with the EOP bit clear and the next
4253 * frame that _does_ have the EOP bit set, as it is by
4254 * definition only a frame fragment
4255 */
4256 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4257 adapter->discarding = true;
4258
4259 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004260 /* All receives must fit into a single buffer */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004261 e_dbg("Receive packet consumed multiple buffers\n");
Auke Kok864c4e42006-06-27 09:06:53 -07004262 /* recycle */
Auke Kok8fc897b2006-08-28 14:56:16 -07004263 buffer_info->skb = skb;
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004264 if (status & E1000_RXD_STAT_EOP)
4265 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266 goto next_desc;
4267 }
4268
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004269 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004270 u8 last_byte = *(skb->data + length - 1);
Joe Perches1dc32912008-07-11 15:17:08 -07004271 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4272 last_byte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004274 e1000_tbi_adjust_stats(hw, &adapter->stats,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004275 length, skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 spin_unlock_irqrestore(&adapter->stats_lock,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004277 flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 length--;
4279 } else {
Ben Greeare825b732012-04-04 06:01:29 +00004280 if (netdev->features & NETIF_F_RXALL)
4281 goto process_skb;
Auke Kok9e2feac2006-04-14 19:05:18 -07004282 /* recycle */
4283 buffer_info->skb = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 goto next_desc;
4285 }
Auke Kok1cb58212006-04-18 12:31:04 -07004286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287
Ben Greeare825b732012-04-04 06:01:29 +00004288process_skb:
Ben Greearb0d15622012-02-11 15:40:11 +00004289 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004290 total_rx_packets++;
4291
Ben Greearb0d15622012-02-11 15:40:11 +00004292 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4293 /* adjust length to remove Ethernet CRC, this must be
4294 * done after the TBI_ACCEPT workaround above
4295 */
4296 length -= 4;
4297
Joe Perches57bf6ee2010-05-13 15:26:17 +00004298 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4299
Auke Kok996695d2006-11-01 08:47:50 -08004300 skb_put(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301
4302 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004303 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004304 (u32)(status) |
4305 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004306 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004307
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004308 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004309
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310next_desc:
4311 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004313 /* return some buffers to hardware, one at a time is too slow */
4314 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4315 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4316 cleaned_count = 0;
4317 }
4318
Jeff Kirsher30320be2006-03-02 18:21:57 -08004319 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004320 rx_desc = next_rxd;
4321 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004324
4325 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4326 if (cleaned_count)
4327 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004329 adapter->total_rx_packets += total_rx_packets;
4330 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004331 netdev->stats.rx_bytes += total_rx_bytes;
4332 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 return cleaned;
4334}
4335
4336/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004337 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4338 * @adapter: address of board private structure
4339 * @rx_ring: pointer to receive ring structure
4340 * @cleaned_count: number of buffers to allocate this pass
4341 **/
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004342static void
4343e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004344 struct e1000_rx_ring *rx_ring, int cleaned_count)
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004345{
4346 struct net_device *netdev = adapter->netdev;
4347 struct pci_dev *pdev = adapter->pdev;
4348 struct e1000_rx_desc *rx_desc;
4349 struct e1000_buffer *buffer_info;
4350 struct sk_buff *skb;
4351 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004352 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004353
4354 i = rx_ring->next_to_use;
4355 buffer_info = &rx_ring->buffer_info[i];
4356
4357 while (cleaned_count--) {
4358 skb = buffer_info->skb;
4359 if (skb) {
4360 skb_trim(skb, 0);
4361 goto check_page;
4362 }
4363
Eric Dumazet89d71a62009-10-13 05:34:20 +00004364 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004365 if (unlikely(!skb)) {
4366 /* Better luck next round */
4367 adapter->alloc_rx_buff_failed++;
4368 break;
4369 }
4370
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004371 buffer_info->skb = skb;
4372 buffer_info->length = adapter->rx_buffer_len;
4373check_page:
4374 /* allocate a new page if necessary */
4375 if (!buffer_info->page) {
4376 buffer_info->page = alloc_page(GFP_ATOMIC);
4377 if (unlikely(!buffer_info->page)) {
4378 adapter->alloc_rx_buff_failed++;
4379 break;
4380 }
4381 }
4382
Anton Blanchardb5abb022010-02-19 17:54:53 +00004383 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004384 buffer_info->dma = dma_map_page(&pdev->dev,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004385 buffer_info->page, 0,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004386 buffer_info->length,
4387 DMA_FROM_DEVICE);
4388 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004389 put_page(buffer_info->page);
4390 dev_kfree_skb(skb);
4391 buffer_info->page = NULL;
4392 buffer_info->skb = NULL;
4393 buffer_info->dma = 0;
4394 adapter->alloc_rx_buff_failed++;
4395 break; /* while !buffer_info->skb */
4396 }
4397 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004398
4399 rx_desc = E1000_RX_DESC(*rx_ring, i);
4400 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4401
4402 if (unlikely(++i == rx_ring->count))
4403 i = 0;
4404 buffer_info = &rx_ring->buffer_info[i];
4405 }
4406
4407 if (likely(rx_ring->next_to_use != i)) {
4408 rx_ring->next_to_use = i;
4409 if (unlikely(i-- == 0))
4410 i = (rx_ring->count - 1);
4411
4412 /* Force memory writes to complete before letting h/w
4413 * know there are new descriptors to fetch. (Only
4414 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004415 * such as IA-64).
4416 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004417 wmb();
4418 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4419 }
4420}
4421
4422/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004423 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424 * @adapter: address of board private structure
4425 **/
Joe Perches64798842008-07-11 15:17:02 -07004426static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4427 struct e1000_rx_ring *rx_ring,
4428 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429{
Joe Perches1dc32912008-07-11 15:17:08 -07004430 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 struct net_device *netdev = adapter->netdev;
4432 struct pci_dev *pdev = adapter->pdev;
4433 struct e1000_rx_desc *rx_desc;
4434 struct e1000_buffer *buffer_info;
4435 struct sk_buff *skb;
Malli Chilakala26483452005-04-28 19:44:46 -07004436 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004437 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438
4439 i = rx_ring->next_to_use;
4440 buffer_info = &rx_ring->buffer_info[i];
4441
Jeff Kirshera292ca62006-01-12 16:51:30 -08004442 while (cleaned_count--) {
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004443 skb = buffer_info->skb;
4444 if (skb) {
Jeff Kirshera292ca62006-01-12 16:51:30 -08004445 skb_trim(skb, 0);
4446 goto map_skb;
4447 }
4448
Eric Dumazet89d71a62009-10-13 05:34:20 +00004449 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004450 if (unlikely(!skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004452 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004453 break;
4454 }
4455
Malli Chilakala26483452005-04-28 19:44:46 -07004456 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4458 struct sk_buff *oldskb = skb;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004459 e_err(rx_err, "skb align check failed: %u bytes at "
4460 "%p\n", bufsz, skb->data);
Malli Chilakala26483452005-04-28 19:44:46 -07004461 /* Try again, without freeing the previous */
Eric Dumazet89d71a62009-10-13 05:34:20 +00004462 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
Malli Chilakala26483452005-04-28 19:44:46 -07004463 /* Failed allocation, critical failure */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 if (!skb) {
4465 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004466 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 break;
4468 }
Malli Chilakala26483452005-04-28 19:44:46 -07004469
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4471 /* give up */
4472 dev_kfree_skb(skb);
4473 dev_kfree_skb(oldskb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004474 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 break; /* while !buffer_info->skb */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004477
4478 /* Use new allocation */
4479 dev_kfree_skb(oldskb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 buffer_info->skb = skb;
4482 buffer_info->length = adapter->rx_buffer_len;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004483map_skb:
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004484 buffer_info->dma = dma_map_single(&pdev->dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 skb->data,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004486 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004487 DMA_FROM_DEVICE);
4488 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Anton Blanchardb5abb022010-02-19 17:54:53 +00004489 dev_kfree_skb(skb);
4490 buffer_info->skb = NULL;
4491 buffer_info->dma = 0;
4492 adapter->alloc_rx_buff_failed++;
4493 break; /* while !buffer_info->skb */
4494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004496 /* XXX if it was allocated cleanly it will never map to a
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004497 * boundary crossing
4498 */
4499
Malli Chilakala26483452005-04-28 19:44:46 -07004500 /* Fix for errata 23, can't cross 64kB boundary */
4501 if (!e1000_check_64k_bound(adapter,
4502 (void *)(unsigned long)buffer_info->dma,
4503 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004504 e_err(rx_err, "dma align check failed: %u bytes at "
4505 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004506 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 dev_kfree_skb(skb);
4508 buffer_info->skb = NULL;
4509
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004510 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004512 DMA_FROM_DEVICE);
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004513 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004515 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 break; /* while !buffer_info->skb */
4517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518 rx_desc = E1000_RX_DESC(*rx_ring, i);
4519 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4520
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004521 if (unlikely(++i == rx_ring->count))
4522 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 buffer_info = &rx_ring->buffer_info[i];
4524 }
4525
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004526 if (likely(rx_ring->next_to_use != i)) {
4527 rx_ring->next_to_use = i;
4528 if (unlikely(i-- == 0))
4529 i = (rx_ring->count - 1);
4530
4531 /* Force memory writes to complete before letting h/w
4532 * know there are new descriptors to fetch. (Only
4533 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004534 * such as IA-64).
4535 */
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004536 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004537 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004538 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539}
4540
4541/**
4542 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4543 * @adapter:
4544 **/
Joe Perches64798842008-07-11 15:17:02 -07004545static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546{
Joe Perches1dc32912008-07-11 15:17:08 -07004547 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004548 u16 phy_status;
4549 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004550
Joe Perches1dc32912008-07-11 15:17:08 -07004551 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4552 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553 return;
4554
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004555 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 /* If Master/Slave config fault is asserted twice,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004557 * we assume back-to-back
4558 */
Joe Perches1dc32912008-07-11 15:17:08 -07004559 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004560 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004561 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004562 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
Joe Perches1dc32912008-07-11 15:17:08 -07004563 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004564 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004566 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 phy_ctrl);
4568 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004569 if (!e1000_phy_setup_autoneg(hw) &&
4570 !e1000_read_phy_reg(hw, PHY_CTRL,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004571 &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4573 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004574 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004575 phy_ctrl);
4576 }
4577 }
4578 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004579 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004581 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004583 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4584 if (!e1000_phy_setup_autoneg(hw) &&
4585 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4587 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004588 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 }
4590 }
4591 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004592 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593 adapter->smartspeed = 0;
4594}
4595
4596/**
4597 * e1000_ioctl -
4598 * @netdev:
4599 * @ifreq:
4600 * @cmd:
4601 **/
Joe Perches64798842008-07-11 15:17:02 -07004602static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603{
4604 switch (cmd) {
4605 case SIOCGMIIPHY:
4606 case SIOCGMIIREG:
4607 case SIOCSMIIREG:
4608 return e1000_mii_ioctl(netdev, ifr, cmd);
4609 default:
4610 return -EOPNOTSUPP;
4611 }
4612}
4613
4614/**
4615 * e1000_mii_ioctl -
4616 * @netdev:
4617 * @ifreq:
4618 * @cmd:
4619 **/
Joe Perches64798842008-07-11 15:17:02 -07004620static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4621 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004623 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004624 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625 struct mii_ioctl_data *data = if_mii(ifr);
4626 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004627 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004628 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629
Joe Perches1dc32912008-07-11 15:17:08 -07004630 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 return -EOPNOTSUPP;
4632
4633 switch (cmd) {
4634 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004635 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 break;
4637 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004638 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004639 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004640 &data->val_out)) {
4641 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004643 }
4644 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 break;
4646 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004647 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648 return -EFAULT;
4649 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004650 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004651 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004652 mii_reg)) {
4653 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004655 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004656 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004657 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 switch (data->reg_num) {
4659 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004660 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004662 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004663 hw->autoneg = 1;
4664 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004666 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004668 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004670 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671 else
David Decotigny14ad2512011-04-27 18:32:43 +00004672 speed = SPEED_10;
4673 retval = e1000_set_spd_dplx(
4674 adapter, speed,
4675 ((mii_reg & 0x100)
4676 ? DUPLEX_FULL :
4677 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004678 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 return retval;
4680 }
Auke Kok2db10a02006-06-27 09:06:28 -07004681 if (netif_running(adapter->netdev))
4682 e1000_reinit_locked(adapter);
4683 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 e1000_reset(adapter);
4685 break;
4686 case M88E1000_PHY_SPEC_CTRL:
4687 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004688 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689 return -EIO;
4690 break;
4691 }
4692 } else {
4693 switch (data->reg_num) {
4694 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004695 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004697 if (netif_running(adapter->netdev))
4698 e1000_reinit_locked(adapter);
4699 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 e1000_reset(adapter);
4701 break;
4702 }
4703 }
4704 break;
4705 default:
4706 return -EOPNOTSUPP;
4707 }
4708 return E1000_SUCCESS;
4709}
4710
Joe Perches64798842008-07-11 15:17:02 -07004711void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712{
4713 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004714 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004716 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004717 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718}
4719
Joe Perches64798842008-07-11 15:17:02 -07004720void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721{
4722 struct e1000_adapter *adapter = hw->back;
4723
4724 pci_clear_mwi(adapter->pdev);
4725}
4726
Joe Perches64798842008-07-11 15:17:02 -07004727int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004728{
4729 struct e1000_adapter *adapter = hw->back;
4730 return pcix_get_mmrbc(adapter->pdev);
4731}
4732
Joe Perches64798842008-07-11 15:17:02 -07004733void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004734{
4735 struct e1000_adapter *adapter = hw->back;
4736 pcix_set_mmrbc(adapter->pdev, mmrbc);
4737}
4738
Joe Perches64798842008-07-11 15:17:02 -07004739void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740{
4741 outl(value, port);
4742}
4743
Jiri Pirko5622e402011-07-21 03:26:31 +00004744static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745{
Jiri Pirko5622e402011-07-21 03:26:31 +00004746 u16 vid;
4747
4748 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4749 return true;
4750 return false;
4751}
4752
Jiri Pirko52f55092012-03-20 18:10:01 +00004753static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4754 netdev_features_t features)
4755{
4756 struct e1000_hw *hw = &adapter->hw;
4757 u32 ctrl;
4758
4759 ctrl = er32(CTRL);
Patrick McHardyf6469682013-04-19 02:04:27 +00004760 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Jiri Pirko52f55092012-03-20 18:10:01 +00004761 /* enable VLAN tag insert/strip */
4762 ctrl |= E1000_CTRL_VME;
4763 } else {
4764 /* disable VLAN tag insert/strip */
4765 ctrl &= ~E1000_CTRL_VME;
4766 }
4767 ew32(CTRL, ctrl);
4768}
Jiri Pirko5622e402011-07-21 03:26:31 +00004769static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4770 bool filter_on)
4771{
Joe Perches1dc32912008-07-11 15:17:08 -07004772 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004773 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004774
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004775 if (!test_bit(__E1000_DOWN, &adapter->flags))
4776 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004777
Jiri Pirko52f55092012-03-20 18:10:01 +00004778 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004779 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004780 /* enable VLAN receive filtering */
4781 rctl = er32(RCTL);
4782 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004783 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004784 rctl |= E1000_RCTL_VFE;
4785 ew32(RCTL, rctl);
4786 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004788 /* disable VLAN receive filtering */
4789 rctl = er32(RCTL);
4790 rctl &= ~E1000_RCTL_VFE;
4791 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 }
4793
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004794 if (!test_bit(__E1000_DOWN, &adapter->flags))
4795 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004796}
4797
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004798static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004799 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004800{
4801 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004802
4803 if (!test_bit(__E1000_DOWN, &adapter->flags))
4804 e1000_irq_disable(adapter);
4805
Jiri Pirko52f55092012-03-20 18:10:01 +00004806 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004807
4808 if (!test_bit(__E1000_DOWN, &adapter->flags))
4809 e1000_irq_enable(adapter);
4810}
4811
Patrick McHardy80d5c362013-04-19 02:04:28 +00004812static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4813 __be16 proto, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004815 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004816 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004817 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004818
Joe Perches1dc32912008-07-11 15:17:08 -07004819 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004820 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4821 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004822 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004823
4824 if (!e1000_vlan_used(adapter))
4825 e1000_vlan_filter_on_off(adapter, true);
4826
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 /* add VID to filter table */
4828 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004829 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004831 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004832
4833 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05004834
4835 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836}
4837
Patrick McHardy80d5c362013-04-19 02:04:28 +00004838static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4839 __be16 proto, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004841 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004842 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004843 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004844
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004845 if (!test_bit(__E1000_DOWN, &adapter->flags))
4846 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004847 if (!test_bit(__E1000_DOWN, &adapter->flags))
4848 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004849
4850 /* remove VID from filter table */
4851 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004852 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004853 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004854 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004855
4856 clear_bit(vid, adapter->active_vlans);
4857
4858 if (!e1000_vlan_used(adapter))
4859 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05004860
4861 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862}
4863
Joe Perches64798842008-07-11 15:17:02 -07004864static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865{
Jiri Pirko5622e402011-07-21 03:26:31 +00004866 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867
Jiri Pirko5622e402011-07-21 03:26:31 +00004868 if (!e1000_vlan_used(adapter))
4869 return;
4870
4871 e1000_vlan_filter_on_off(adapter, true);
4872 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00004873 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874}
4875
David Decotigny14ad2512011-04-27 18:32:43 +00004876int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877{
Joe Perches1dc32912008-07-11 15:17:08 -07004878 struct e1000_hw *hw = &adapter->hw;
4879
4880 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881
David Decotigny14ad2512011-04-27 18:32:43 +00004882 /* Make sure dplx is at most 1 bit and lsb of speed is not set
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004883 * for the switch() below to work
4884 */
David Decotigny14ad2512011-04-27 18:32:43 +00004885 if ((spd & 1) || (dplx & ~1))
4886 goto err_inval;
4887
Malli Chilakala69213682005-06-17 17:44:20 -07004888 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07004889 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00004890 spd != SPEED_1000 &&
4891 dplx != DUPLEX_FULL)
4892 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07004893
David Decotigny14ad2512011-04-27 18:32:43 +00004894 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004896 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 break;
4898 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004899 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004900 break;
4901 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07004902 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004903 break;
4904 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004905 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906 break;
4907 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07004908 hw->autoneg = 1;
4909 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004910 break;
4911 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4912 default:
David Decotigny14ad2512011-04-27 18:32:43 +00004913 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 }
Jesse Brandeburgc819bbd52012-07-26 02:31:09 +00004915
4916 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4917 hw->mdix = AUTO_ALL_MODES;
4918
Linus Torvalds1da177e2005-04-16 15:20:36 -07004919 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00004920
4921err_inval:
4922 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4923 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924}
4925
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00004926static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927{
4928 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07004929 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004930 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004931 u32 ctrl, ctrl_ext, rctl, status;
4932 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07004933#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08004934 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07004935#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936
4937 netif_device_detach(netdev);
4938
Auke Kok2db10a02006-06-27 09:06:28 -07004939 if (netif_running(netdev)) {
yzhu16a7d64e2013-11-23 07:07:40 +00004940 int count = E1000_CHECK_RESET_COUNT;
4941
4942 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
4943 usleep_range(10000, 20000);
4944
Auke Kok2db10a02006-06-27 09:06:28 -07004945 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07004947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004948
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004949#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08004950 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00004951 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08004952 return retval;
4953#endif
4954
Joe Perches1dc32912008-07-11 15:17:08 -07004955 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004956 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 wufc &= ~E1000_WUFC_LNKC;
4958
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004959 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08004961 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962
Dean Nelsonb8681792012-01-19 17:47:24 +00004963 rctl = er32(RCTL);
4964
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00004966 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00004968
4969 /* enable receives in the hardware */
4970 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971
Joe Perches1dc32912008-07-11 15:17:08 -07004972 if (hw->mac_type >= e1000_82540) {
4973 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 /* advertise wake from D3Cold */
4975 #define E1000_CTRL_ADVD3WUC 0x00100000
4976 /* phy power management enable */
4977 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4978 ctrl |= E1000_CTRL_ADVD3WUC |
4979 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07004980 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 }
4982
Joe Perches1dc32912008-07-11 15:17:08 -07004983 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004984 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07004986 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07004988 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989 }
4990
Joe Perches1dc32912008-07-11 15:17:08 -07004991 ew32(WUC, E1000_WUC_PME_EN);
4992 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07004994 ew32(WUC, 0);
4995 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996 }
4997
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05004998 e1000_release_manageability(adapter);
4999
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005000 *enable_wake = !!wufc;
5001
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005002 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005003 if (adapter->en_mng_pt)
5004 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005
Auke Kokedd106f2006-11-06 08:57:12 -08005006 if (netif_running(netdev))
5007 e1000_free_irq(adapter);
5008
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005010
Linus Torvalds1da177e2005-04-16 15:20:36 -07005011 return 0;
5012}
5013
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005014#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005015static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5016{
5017 int retval;
5018 bool wake;
5019
5020 retval = __e1000_shutdown(pdev, &wake);
5021 if (retval)
5022 return retval;
5023
5024 if (wake) {
5025 pci_prepare_to_sleep(pdev);
5026 } else {
5027 pci_wake_from_d3(pdev, false);
5028 pci_set_power_state(pdev, PCI_D3hot);
5029 }
5030
5031 return 0;
5032}
5033
Joe Perches64798842008-07-11 15:17:02 -07005034static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005035{
5036 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005037 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005038 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005039 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040
Auke Kokd0e027d2006-04-14 19:04:40 -07005041 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005042 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005043 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005044
5045 if (adapter->need_ioport)
5046 err = pci_enable_device(pdev);
5047 else
5048 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005049 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005050 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005051 return err;
5052 }
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005053 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054
Auke Kokd0e027d2006-04-14 19:04:40 -07005055 pci_enable_wake(pdev, PCI_D3hot, 0);
5056 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057
Joe Perchesc7be73b2008-07-11 15:17:28 -07005058 if (netif_running(netdev)) {
5059 err = e1000_request_irq(adapter);
5060 if (err)
5061 return err;
5062 }
Auke Kokedd106f2006-11-06 08:57:12 -08005063
5064 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005065 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005066 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005067
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005068 e1000_init_manageability(adapter);
5069
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005070 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005071 e1000_up(adapter);
5072
5073 netif_device_attach(netdev);
5074
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 return 0;
5076}
5077#endif
Auke Kokc653e632006-05-23 13:35:57 -07005078
5079static void e1000_shutdown(struct pci_dev *pdev)
5080{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005081 bool wake;
5082
5083 __e1000_shutdown(pdev, &wake);
5084
5085 if (system_state == SYSTEM_POWER_OFF) {
5086 pci_wake_from_d3(pdev, wake);
5087 pci_set_power_state(pdev, PCI_D3hot);
5088 }
Auke Kokc653e632006-05-23 13:35:57 -07005089}
5090
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091#ifdef CONFIG_NET_POLL_CONTROLLER
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00005092/* Polling 'interrupt' - used by things like netconsole to send skbs
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093 * without having to re-enable interrupts. It's not called while
5094 * the interrupt routine is executing.
5095 */
Joe Perches64798842008-07-11 15:17:02 -07005096static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005097{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005098 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005099
Linus Torvalds1da177e2005-04-16 15:20:36 -07005100 disable_irq(adapter->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005101 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102 enable_irq(adapter->pdev->irq);
5103}
5104#endif
5105
Auke Kok90267292006-06-08 09:30:24 -07005106/**
5107 * e1000_io_error_detected - called when PCI error is detected
5108 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005109 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005110 *
5111 * This function is called after a PCI bus error affecting
5112 * this device has been detected.
5113 */
Joe Perches64798842008-07-11 15:17:02 -07005114static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5115 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005116{
5117 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005118 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005119
5120 netif_device_detach(netdev);
5121
Andre Detscheab63302009-06-30 12:46:13 +00005122 if (state == pci_channel_io_perm_failure)
5123 return PCI_ERS_RESULT_DISCONNECT;
5124
Auke Kok90267292006-06-08 09:30:24 -07005125 if (netif_running(netdev))
5126 e1000_down(adapter);
Linas Vepstas72e8d6b2006-09-18 20:58:06 -07005127 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005128
5129 /* Request a slot slot reset. */
5130 return PCI_ERS_RESULT_NEED_RESET;
5131}
5132
5133/**
5134 * e1000_io_slot_reset - called after the pci bus has been reset.
5135 * @pdev: Pointer to PCI device
5136 *
5137 * Restart the card from scratch, as if from a cold-boot. Implementation
5138 * resembles the first-half of the e1000_resume routine.
5139 */
5140static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5141{
5142 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005143 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005144 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005145 int err;
Auke Kok90267292006-06-08 09:30:24 -07005146
Taku Izumi81250292008-07-11 15:17:44 -07005147 if (adapter->need_ioport)
5148 err = pci_enable_device(pdev);
5149 else
5150 err = pci_enable_device_mem(pdev);
5151 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005152 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005153 return PCI_ERS_RESULT_DISCONNECT;
5154 }
5155 pci_set_master(pdev);
5156
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005157 pci_enable_wake(pdev, PCI_D3hot, 0);
5158 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005159
Auke Kok90267292006-06-08 09:30:24 -07005160 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005161 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005162
5163 return PCI_ERS_RESULT_RECOVERED;
5164}
5165
5166/**
5167 * e1000_io_resume - called when traffic can start flowing again.
5168 * @pdev: Pointer to PCI device
5169 *
5170 * This callback is called when the error recovery driver tells us that
5171 * its OK to resume normal operation. Implementation resembles the
5172 * second-half of the e1000_resume routine.
5173 */
5174static void e1000_io_resume(struct pci_dev *pdev)
5175{
5176 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005177 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005178
5179 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005180
5181 if (netif_running(netdev)) {
5182 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005183 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005184 return;
5185 }
5186 }
5187
5188 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005189}
5190
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191/* e1000_main.c */