blob: 3dd4aeb2706d393cd8fbf4998a7582d33b9bafcd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id e1000_pci_tbl[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200108 struct e1000_rx_ring *rx_ring);
Nicholas Nunley35574762006-09-27 12:53:34 -0700109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500114static void e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
Stefan Assmann1f2f83f2016-02-03 09:20:51 +0100117int e1000_open(struct net_device *netdev);
118int e1000_close(struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200125 struct e1000_tx_ring *tx_ring);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
135static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100136static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700137static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
138 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700139static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700140static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
141 struct e1000_rx_ring *rx_ring,
142 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000143static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
144 struct e1000_rx_ring *rx_ring,
145 int *work_done, int work_to_do);
Sabrina Dubroca08e83312015-02-26 05:35:41 +0000146static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
147 struct e1000_rx_ring *rx_ring,
148 int cleaned_count)
149{
150}
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400151static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000152 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800153 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000154static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
155 struct e1000_rx_ring *rx_ring,
156 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
158static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
159 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
161static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
162static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000163static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700165static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200166 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
Jiri Pirko5622e402011-07-21 03:26:31 +0000168static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000169static void e1000_vlan_mode(struct net_device *netdev,
170 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000171static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
172 bool filter_on);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000173static int e1000_vlan_rx_add_vid(struct net_device *netdev,
174 __be16 proto, u16 vid);
175static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
176 __be16 proto, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177static void e1000_restore_vlan(struct e1000_adapter *adapter);
178
Auke Kok6fdfef12006-06-27 09:06:36 -0700179#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000180static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181static int e1000_resume(struct pci_dev *pdev);
182#endif
Auke Kokc653e632006-05-23 13:35:57 -0700183static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
185#ifdef CONFIG_NET_POLL_CONTROLLER
186/* for netdump / net console */
187static void e1000_netpoll (struct net_device *netdev);
188#endif
189
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100190#define COPYBREAK_DEFAULT 256
191static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
192module_param(copybreak, uint, 0644);
193MODULE_PARM_DESC(copybreak,
194 "Maximum size of packet that is copied to a new buffer on receive");
195
Auke Kok90267292006-06-08 09:30:24 -0700196static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200197 pci_channel_state_t state);
Auke Kok90267292006-06-08 09:30:24 -0700198static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
199static void e1000_io_resume(struct pci_dev *pdev);
200
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700201static const struct pci_error_handlers e1000_err_handler = {
Auke Kok90267292006-06-08 09:30:24 -0700202 .error_detected = e1000_io_error_detected,
203 .slot_reset = e1000_io_slot_reset,
204 .resume = e1000_io_resume,
205};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207static struct pci_driver e1000_driver = {
208 .name = e1000_driver_name,
209 .id_table = e1000_pci_tbl,
210 .probe = e1000_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500211 .remove = e1000_remove,
Auke Kokc4e24f02006-09-27 12:53:19 -0700212#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300213 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700215 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216#endif
Auke Kok90267292006-06-08 09:30:24 -0700217 .shutdown = e1000_shutdown,
218 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219};
220
221MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
222MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
223MODULE_LICENSE("GPL");
224MODULE_VERSION(DRV_VERSION);
225
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000226#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
227static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228module_param(debug, int, 0);
229MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
230
231/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000232 * e1000_get_hw_dev - return device
233 * used by hardware layer to print debugging information
234 *
235 **/
236struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
237{
238 struct e1000_adapter *adapter = hw->back;
239 return adapter->netdev;
240}
241
242/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 * e1000_init_module - Driver Registration Routine
244 *
245 * e1000_init_module is the first routine called when the driver is
246 * loaded. All it does is register with the PCI subsystem.
247 **/
Joe Perches64798842008-07-11 15:17:02 -0700248static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
250 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000251 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Emil Tantilov675ad472010-04-27 14:02:58 +0000253 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Jeff Garzik29917622006-08-19 17:48:59 -0400255 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100256 if (copybreak != COPYBREAK_DEFAULT) {
257 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000258 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100259 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000260 pr_info("copybreak enabled for "
261 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 return ret;
264}
265
266module_init(e1000_init_module);
267
268/**
269 * e1000_exit_module - Driver Exit Cleanup Routine
270 *
271 * e1000_exit_module is called just before the driver is removed
272 * from memory.
273 **/
Joe Perches64798842008-07-11 15:17:02 -0700274static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 pci_unregister_driver(&e1000_driver);
277}
278
279module_exit(e1000_exit_module);
280
Auke Kok2db10a02006-06-27 09:06:28 -0700281static int e1000_request_irq(struct e1000_adapter *adapter)
282{
283 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000284 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700285 int irq_flags = IRQF_SHARED;
286 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700287
Auke Koke94bd232007-05-16 01:49:46 -0700288 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200289 netdev);
Auke Koke94bd232007-05-16 01:49:46 -0700290 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700291 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700292 }
Auke Kok2db10a02006-06-27 09:06:28 -0700293
294 return err;
295}
296
297static void e1000_free_irq(struct e1000_adapter *adapter)
298{
299 struct net_device *netdev = adapter->netdev;
300
301 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700302}
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304/**
305 * e1000_irq_disable - Mask off interrupt generation on the NIC
306 * @adapter: board private structure
307 **/
Joe Perches64798842008-07-11 15:17:02 -0700308static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
Joe Perches1dc32912008-07-11 15:17:08 -0700310 struct e1000_hw *hw = &adapter->hw;
311
312 ew32(IMC, ~0);
313 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 synchronize_irq(adapter->pdev->irq);
315}
316
317/**
318 * e1000_irq_enable - Enable default interrupt generation settings
319 * @adapter: board private structure
320 **/
Joe Perches64798842008-07-11 15:17:02 -0700321static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
Joe Perches1dc32912008-07-11 15:17:08 -0700323 struct e1000_hw *hw = &adapter->hw;
324
325 ew32(IMS, IMS_ENABLE_MASK);
326 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100328
Joe Perches64798842008-07-11 15:17:02 -0700329static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700330{
Joe Perches1dc32912008-07-11 15:17:08 -0700331 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700332 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700333 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700334 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800335
Jiri Pirko5622e402011-07-21 03:26:31 +0000336 if (!e1000_vlan_used(adapter))
337 return;
338
339 if (!test_bit(vid, adapter->active_vlans)) {
340 if (hw->mng_cookie.status &
341 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
Patrick McHardy80d5c362013-04-19 02:04:28 +0000342 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800343 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000344 } else {
345 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
346 }
347 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
348 (vid != old_vid) &&
349 !test_bit(old_vid, adapter->active_vlans))
Patrick McHardy80d5c362013-04-19 02:04:28 +0000350 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
351 old_vid);
Jiri Pirko5622e402011-07-21 03:26:31 +0000352 } else {
353 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700354 }
355}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800356
Joe Perches64798842008-07-11 15:17:02 -0700357static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500358{
Joe Perches1dc32912008-07-11 15:17:08 -0700359 struct e1000_hw *hw = &adapter->hw;
360
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500361 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700362 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500363
364 /* disable hardware interception of ARP */
365 manc &= ~(E1000_MANC_ARP_EN);
366
Joe Perches1dc32912008-07-11 15:17:08 -0700367 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500368 }
369}
370
Joe Perches64798842008-07-11 15:17:02 -0700371static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500372{
Joe Perches1dc32912008-07-11 15:17:08 -0700373 struct e1000_hw *hw = &adapter->hw;
374
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500375 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700376 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500377
378 /* re-enable hardware interception of ARP */
379 manc |= E1000_MANC_ARP_EN;
380
Joe Perches1dc32912008-07-11 15:17:08 -0700381 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500382 }
383}
384
Auke Koke0aac5a2007-03-06 08:57:21 -0800385/**
386 * e1000_configure - configure the hardware for RX and TX
387 * @adapter = private board structure
388 **/
389static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700392 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Patrick McHardydb0ce502007-11-13 20:54:59 -0800394 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395
396 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500397 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
399 e1000_configure_tx(adapter);
400 e1000_setup_rctl(adapter);
401 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800402 /* call E1000_DESC_UNUSED which always leaves
403 * at least 1 descriptor unused to make sure
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000404 * next_to_use != next_to_clean
405 */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800406 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800407 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800408 adapter->alloc_rx_buf(adapter, ring,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000409 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800410 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800411}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800412
Auke Koke0aac5a2007-03-06 08:57:21 -0800413int e1000_up(struct e1000_adapter *adapter)
414{
Joe Perches1dc32912008-07-11 15:17:08 -0700415 struct e1000_hw *hw = &adapter->hw;
416
Auke Koke0aac5a2007-03-06 08:57:21 -0800417 /* hardware has been reset, we need to reload some things */
418 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700419
Auke Kok1314bbf2006-09-27 12:54:02 -0700420 clear_bit(__E1000_DOWN, &adapter->flags);
421
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700422 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700423
Auke Koke0aac5a2007-03-06 08:57:21 -0800424 e1000_irq_enable(adapter);
425
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000426 netif_wake_queue(adapter->netdev);
427
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100428 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700429 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 return 0;
431}
432
Auke Kok79f05bf2006-06-27 09:06:32 -0700433/**
434 * e1000_power_up_phy - restore link in case the phy was powered down
435 * @adapter: address of board private structure
436 *
437 * The phy may be powered down to save power and turn off link when the
438 * driver is unloaded and wake on lan is not enabled (among others)
439 * *** this routine MUST be followed by a call to e1000_reset ***
Auke Kok79f05bf2006-06-27 09:06:32 -0700440 **/
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700441void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700442{
Joe Perches1dc32912008-07-11 15:17:08 -0700443 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700444 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700445
446 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700447 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700448 /* according to the manual, the phy will retain its
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000449 * settings across a power-down/up cycle
450 */
Joe Perches1dc32912008-07-11 15:17:08 -0700451 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700452 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700453 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700454 }
455}
456
457static void e1000_power_down_phy(struct e1000_adapter *adapter)
458{
Joe Perches1dc32912008-07-11 15:17:08 -0700459 struct e1000_hw *hw = &adapter->hw;
460
Bruce Allan61c25052006-09-27 12:53:54 -0700461 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700462 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700463 * (a) WoL is enabled
464 * (b) AMT is active
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000465 * (c) SoL/IDER session is active
466 */
Joe Perches1dc32912008-07-11 15:17:08 -0700467 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
468 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700469 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700470
Joe Perches1dc32912008-07-11 15:17:08 -0700471 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700472 case e1000_82540:
473 case e1000_82545:
474 case e1000_82545_rev_3:
475 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000476 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700477 case e1000_82546_rev_3:
478 case e1000_82541:
479 case e1000_82541_rev_2:
480 case e1000_82547:
481 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700482 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700483 goto out;
484 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700485 default:
486 goto out;
487 }
Joe Perches1dc32912008-07-11 15:17:08 -0700488 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700489 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700490 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000491 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700492 }
Bruce Allan61c25052006-09-27 12:53:54 -0700493out:
494 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700495}
496
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000497static void e1000_down_and_stop(struct e1000_adapter *adapter)
498{
499 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000500
Vladimir Davydov74a1b1e2013-11-23 07:18:01 +0000501 cancel_delayed_work_sync(&adapter->watchdog_task);
502
503 /*
504 * Since the watchdog task can reschedule other tasks, we should cancel
505 * it first, otherwise we can run into the situation when a work is
506 * still running after the adapter has been turned down.
507 */
508
509 cancel_delayed_work_sync(&adapter->phy_info_task);
510 cancel_delayed_work_sync(&adapter->fifo_stall_task);
511
Tushar Dave8ce69092012-05-17 01:04:50 +0000512 /* Only kill reset task if adapter is not resetting */
513 if (!test_bit(__E1000_RESETTING, &adapter->flags))
514 cancel_work_sync(&adapter->reset_task);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000515}
516
Joe Perches64798842008-07-11 15:17:02 -0700517void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000519 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000521 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000523 /* disable receives in the hardware */
524 rctl = er32(RCTL);
525 ew32(RCTL, rctl & ~E1000_RCTL_EN);
526 /* flush and sleep below */
527
Jesse Brandeburg51851072009-09-25 12:17:01 +0000528 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000529
530 /* disable transmits in the hardware */
531 tctl = er32(TCTL);
532 tctl &= ~E1000_TCTL_EN;
533 ew32(TCTL, tctl);
534 /* flush both disables and wait for them to finish */
535 E1000_WRITE_FLUSH();
536 msleep(10);
537
Vincenzo Maffione44c445c2017-09-16 18:00:00 +0200538 /* Set the carrier off after transmits have been disabled in the
539 * hardware, to avoid race conditions with e1000_watchdog() (which
540 * may be running concurrently to us, checking for the carrier
541 * bit to decide whether it should enable transmits again). Such
542 * a race condition would result into transmission being disabled
543 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
544 */
545 netif_carrier_off(netdev);
546
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700547 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800550
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000551 /* Setting DOWN must be after irq_disable to prevent
Anupam Chandaab088532010-11-21 09:54:21 -0800552 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000553 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800554 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000555 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 adapter->link_speed = 0;
558 adapter->link_duplex = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400561 e1000_clean_all_tx_rings(adapter);
562 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Joe Perches64798842008-07-11 15:17:02 -0700565void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700566{
567 WARN_ON(in_interrupt());
568 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
569 msleep(1);
570 e1000_down(adapter);
571 e1000_up(adapter);
572 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573}
574
Joe Perches64798842008-07-11 15:17:02 -0700575void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Joe Perches1dc32912008-07-11 15:17:08 -0700577 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700578 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700579 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000580 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
582 /* Repartition Pba for greater than 9k mtu
583 * To take effect CTRL.RST is required.
584 */
585
Joe Perches1dc32912008-07-11 15:17:08 -0700586 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100587 case e1000_82542_rev2_0:
588 case e1000_82542_rev2_1:
589 case e1000_82543:
590 case e1000_82544:
591 case e1000_82540:
592 case e1000_82541:
593 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700594 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100595 pba = E1000_PBA_48K;
596 break;
597 case e1000_82545:
598 case e1000_82545_rev_3:
599 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000600 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100601 case e1000_82546_rev_3:
602 pba = E1000_PBA_48K;
603 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700604 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700605 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700606 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700607 pba = E1000_PBA_30K;
608 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100609 case e1000_undefined:
610 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700611 break;
612 }
613
Joe Perchesc3033b02008-03-21 11:06:25 -0700614 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000615 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100616 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700617
Joe Perches1dc32912008-07-11 15:17:08 -0700618 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100619 adapter->tx_fifo_head = 0;
620 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
621 adapter->tx_fifo_size =
622 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
623 atomic_set(&adapter->tx_fifo_stall, 0);
624 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000625 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100626 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700627 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700628
Bruce Allan018ea442006-12-15 10:39:45 +0100629 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000630 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100631 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000632 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100633 * one full receive packet and is similarly rounded up and
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000634 * expressed in KB.
635 */
Joe Perches1dc32912008-07-11 15:17:08 -0700636 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100637 /* upper 16 bits has Tx packet buffer allocation size in KB */
638 tx_space = pba >> 16;
639 /* lower 16 bits has Rx packet buffer allocation size in KB */
640 pba &= 0xffff;
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000641 /* the Tx fifo also stores 16 bytes of information about the Tx
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000642 * but don't include ethernet FCS because hardware appends it
643 */
644 min_tx_space = (hw->max_frame_size +
Janusz Wolaka48954c2015-09-17 23:34:29 +0200645 sizeof(struct e1000_tx_desc) -
646 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700647 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100648 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000649 /* software strips receive CRC, so leave room for it */
650 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700651 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100652 min_rx_space >>= 10;
653
654 /* If current Tx allocation is less than the min Tx FIFO size,
655 * and the min Tx FIFO size is less than the current Rx FIFO
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000656 * allocation, take space away from current Rx allocation
657 */
Bruce Allan018ea442006-12-15 10:39:45 +0100658 if (tx_space < min_tx_space &&
659 ((min_tx_space - tx_space) < pba)) {
660 pba = pba - (min_tx_space - tx_space);
661
662 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700663 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100664 case e1000_82545 ... e1000_82546_rev_3:
665 pba &= ~(E1000_PBA_8K - 1);
666 break;
667 default:
668 break;
669 }
670
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000671 /* if short on Rx space, Rx wins and must trump Tx
672 * adjustment or use Early Receive if available
673 */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000674 if (pba < min_rx_space)
675 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700678
Joe Perches1dc32912008-07-11 15:17:08 -0700679 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000681 /* flow control settings:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000682 * The high water mark must be low enough to fit one full frame
683 * (or the size used for early receive) above it in the Rx FIFO.
684 * Set it to the lower of:
685 * - 90% of the Rx FIFO size, and
686 * - the full Rx FIFO size minus the early receive size (for parts
687 * with ERT support assuming ERT set to E1000_ERT_2048), or
688 * - the full Rx FIFO size minus one full frame
689 */
690 hwm = min(((pba << 10) * 9 / 10),
691 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800692
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000693 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
694 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000695 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700696 hw->fc_send_xon = 1;
697 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700699 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700700 e1000_reset_hw(hw);
701 if (hw->mac_type >= e1000_82544)
702 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700703
Joe Perches1dc32912008-07-11 15:17:08 -0700704 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700705 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700706 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100707
708 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700709 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700710 hw->autoneg == 1 &&
711 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
712 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100713 /* clear phy power management bit if we are in gig only mode,
714 * which if enabled will attempt negotiation to 100Mb, which
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000715 * can cause a loss of link at power off or driver unload
716 */
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100717 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700718 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100719 }
720
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700722 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Joe Perches1dc32912008-07-11 15:17:08 -0700724 e1000_reset_adaptive(hw);
725 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700726
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500727 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Ben Hutchings1aa8b472012-07-10 10:56:59 +0000730/* Dump the eeprom for users having checksum issues */
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800731static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800732{
733 struct net_device *netdev = adapter->netdev;
734 struct ethtool_eeprom eeprom;
735 const struct ethtool_ops *ops = netdev->ethtool_ops;
736 u8 *data;
737 int i;
738 u16 csum_old, csum_new = 0;
739
740 eeprom.len = ops->get_eeprom_len(netdev);
741 eeprom.offset = 0;
742
743 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000744 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800745 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800746
747 ops->get_eeprom(netdev, &eeprom, data);
748
749 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
750 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
751 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
752 csum_new += data[i] + (data[i + 1] << 8);
753 csum_new = EEPROM_SUM - csum_new;
754
Emil Tantilov675ad472010-04-27 14:02:58 +0000755 pr_err("/*********************/\n");
756 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
757 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800758
Emil Tantilov675ad472010-04-27 14:02:58 +0000759 pr_err("Offset Values\n");
760 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800761 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
762
Emil Tantilov675ad472010-04-27 14:02:58 +0000763 pr_err("Include this output when contacting your support provider.\n");
764 pr_err("This is not a software error! Something bad happened to\n");
765 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
766 pr_err("result in further problems, possibly loss of data,\n");
767 pr_err("corruption or system hangs!\n");
768 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
769 pr_err("which is invalid and requires you to set the proper MAC\n");
770 pr_err("address manually before continuing to enable this network\n");
771 pr_err("device. Please inspect the EEPROM dump and report the\n");
772 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
773 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800774
775 kfree(data);
776}
777
778/**
Taku Izumi81250292008-07-11 15:17:44 -0700779 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
780 * @pdev: PCI device information struct
781 *
782 * Return true if an adapter needs ioport resources
783 **/
784static int e1000_is_need_ioport(struct pci_dev *pdev)
785{
786 switch (pdev->device) {
787 case E1000_DEV_ID_82540EM:
788 case E1000_DEV_ID_82540EM_LOM:
789 case E1000_DEV_ID_82540EP:
790 case E1000_DEV_ID_82540EP_LOM:
791 case E1000_DEV_ID_82540EP_LP:
792 case E1000_DEV_ID_82541EI:
793 case E1000_DEV_ID_82541EI_MOBILE:
794 case E1000_DEV_ID_82541ER:
795 case E1000_DEV_ID_82541ER_LOM:
796 case E1000_DEV_ID_82541GI:
797 case E1000_DEV_ID_82541GI_LF:
798 case E1000_DEV_ID_82541GI_MOBILE:
799 case E1000_DEV_ID_82544EI_COPPER:
800 case E1000_DEV_ID_82544EI_FIBER:
801 case E1000_DEV_ID_82544GC_COPPER:
802 case E1000_DEV_ID_82544GC_LOM:
803 case E1000_DEV_ID_82545EM_COPPER:
804 case E1000_DEV_ID_82545EM_FIBER:
805 case E1000_DEV_ID_82546EB_COPPER:
806 case E1000_DEV_ID_82546EB_FIBER:
807 case E1000_DEV_ID_82546EB_QUAD_COPPER:
808 return true;
809 default:
810 return false;
811 }
812}
813
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000814static netdev_features_t e1000_fix_features(struct net_device *netdev,
815 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000816{
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000817 /* Since there is no support for separate Rx/Tx vlan accel
818 * enable/disable make sure Tx flag is always in same state as Rx.
Jiri Pirko5622e402011-07-21 03:26:31 +0000819 */
Patrick McHardyf6469682013-04-19 02:04:27 +0000820 if (features & NETIF_F_HW_VLAN_CTAG_RX)
821 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko5622e402011-07-21 03:26:31 +0000822 else
Patrick McHardyf6469682013-04-19 02:04:27 +0000823 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko5622e402011-07-21 03:26:31 +0000824
825 return features;
826}
827
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000828static int e1000_set_features(struct net_device *netdev,
829 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000830{
831 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000832 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000833
Patrick McHardyf6469682013-04-19 02:04:27 +0000834 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko5622e402011-07-21 03:26:31 +0000835 e1000_vlan_mode(netdev, features);
836
Ben Greeare825b732012-04-04 06:01:29 +0000837 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
Michał Mirosławe97d3202011-06-08 08:36:42 +0000838 return 0;
839
Ben Greeare825b732012-04-04 06:01:29 +0000840 netdev->features = features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000841 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
842
843 if (netif_running(netdev))
844 e1000_reinit_locked(adapter);
845 else
846 e1000_reset(adapter);
847
848 return 0;
849}
850
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800851static const struct net_device_ops e1000_netdev_ops = {
852 .ndo_open = e1000_open,
853 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800854 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800855 .ndo_set_rx_mode = e1000_set_rx_mode,
856 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000857 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800858 .ndo_change_mtu = e1000_change_mtu,
859 .ndo_do_ioctl = e1000_ioctl,
860 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800861 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
862 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
863#ifdef CONFIG_NET_POLL_CONTROLLER
864 .ndo_poll_controller = e1000_netpoll,
865#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000866 .ndo_fix_features = e1000_fix_features,
867 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800868};
869
Taku Izumi81250292008-07-11 15:17:44 -0700870/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000871 * e1000_init_hw_struct - initialize members of hw struct
872 * @adapter: board private struct
873 * @hw: structure used by e1000_hw.c
874 *
875 * Factors out initialization of the e1000_hw struct to its own function
876 * that can be called very early at init (just after struct allocation).
877 * Fields are initialized based on PCI device information and
878 * OS network device settings (MTU size).
879 * Returns negative error codes if MAC type setup fails.
880 */
881static int e1000_init_hw_struct(struct e1000_adapter *adapter,
882 struct e1000_hw *hw)
883{
884 struct pci_dev *pdev = adapter->pdev;
885
886 /* PCI config space info */
887 hw->vendor_id = pdev->vendor;
888 hw->device_id = pdev->device;
889 hw->subsystem_vendor_id = pdev->subsystem_vendor;
890 hw->subsystem_id = pdev->subsystem_device;
891 hw->revision_id = pdev->revision;
892
893 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
894
895 hw->max_frame_size = adapter->netdev->mtu +
896 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
897 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
898
899 /* identify the MAC */
900 if (e1000_set_mac_type(hw)) {
901 e_err(probe, "Unknown MAC Type\n");
902 return -EIO;
903 }
904
905 switch (hw->mac_type) {
906 default:
907 break;
908 case e1000_82541:
909 case e1000_82547:
910 case e1000_82541_rev_2:
911 case e1000_82547_rev_2:
912 hw->phy_init_script = 1;
913 break;
914 }
915
916 e1000_set_media_type(hw);
917 e1000_get_bus_info(hw);
918
919 hw->wait_autoneg_complete = false;
920 hw->tbi_compatibility_en = true;
921 hw->adaptive_ifs = true;
922
923 /* Copper options */
924
925 if (hw->media_type == e1000_media_type_copper) {
926 hw->mdix = AUTO_ALL_MODES;
927 hw->disable_polarity_correction = false;
928 hw->master_slave = E1000_MASTER_SLAVE;
929 }
930
931 return 0;
932}
933
934/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 * e1000_probe - Device Initialization Routine
936 * @pdev: PCI device information struct
937 * @ent: entry in e1000_pci_tbl
938 *
939 * Returns 0 on success, negative on failure
940 *
941 * e1000_probe initializes an adapter identified by a pci_dev structure.
942 * The OS initialization, configuring of the adapter private structure,
943 * and a hardware reset occur.
944 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +0000945static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
947 struct net_device *netdev;
Tushar Dave0b76aae2017-12-06 02:26:29 +0530948 struct e1000_adapter *adapter = NULL;
Joe Perches1dc32912008-07-11 15:17:08 -0700949 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700950
Janusz Wolaka48954c2015-09-17 23:34:29 +0200951 static int cards_found;
952 static int global_quad_port_a; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700953 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700954 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000955 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700956 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700957 int bars, need_ioport;
Tushar Dave0b76aae2017-12-06 02:26:29 +0530958 bool disable_dev = false;
Joe Perches0795af52007-10-03 17:59:30 -0700959
Taku Izumi81250292008-07-11 15:17:44 -0700960 /* do not allocate ioport bars when not needed */
961 need_ioport = e1000_is_need_ioport(pdev);
962 if (need_ioport) {
963 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
964 err = pci_enable_device(pdev);
965 } else {
966 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800967 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700968 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700969 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 return err;
971
Taku Izumi81250292008-07-11 15:17:44 -0700972 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700973 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700974 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
976 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000977 err = pci_save_state(pdev);
978 if (err)
979 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700981 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700983 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 SET_NETDEV_DEV(netdev, &pdev->dev);
987
988 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700989 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 adapter->netdev = netdev;
991 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000992 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700993 adapter->bars = bars;
994 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995
Joe Perches1dc32912008-07-11 15:17:08 -0700996 hw = &adapter->hw;
997 hw->back = adapter;
998
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700999 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -07001000 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -07001001 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Taku Izumi81250292008-07-11 15:17:44 -07001004 if (adapter->need_ioport) {
1005 for (i = BAR_1; i <= BAR_5; i++) {
1006 if (pci_resource_len(pdev, i) == 0)
1007 continue;
1008 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1009 hw->io_base = pci_resource_start(pdev, i);
1010 break;
1011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 }
1013 }
1014
Jesse Brandeburge508be12010-09-07 21:01:12 +00001015 /* make ready for any if (hw->...) below */
1016 err = e1000_init_hw_struct(adapter, hw);
1017 if (err)
1018 goto err_sw_init;
1019
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001020 /* there is a workaround being applied below that limits
Jesse Brandeburge508be12010-09-07 21:01:12 +00001021 * 64-bit DMA addresses to 64-bit hardware. There are some
1022 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1023 */
1024 pci_using_dac = 0;
1025 if ((hw->bus_type == e1000_bus_type_pcix) &&
Russell King9931a262013-06-26 23:49:11 +01001026 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Jesse Brandeburge508be12010-09-07 21:01:12 +00001027 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001028 } else {
Russell King9931a262013-06-26 23:49:11 +01001029 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Dean Nelson19a0b672010-11-11 05:50:25 +00001030 if (err) {
1031 pr_err("No usable DMA config, aborting\n");
1032 goto err_dma;
1033 }
Jesse Brandeburge508be12010-09-07 21:01:12 +00001034 }
1035
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001036 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001039 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001040
Auke Kok0eb5a342006-09-27 12:53:17 -07001041 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 adapter->bd_number = cards_found;
1044
1045 /* setup the private structure */
1046
Joe Perchesc7be73b2008-07-11 15:17:28 -07001047 err = e1000_sw_init(adapter);
1048 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 goto err_sw_init;
1050
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001051 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001052 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001053 hw->ce4100_gbe_mdio_base_virt =
1054 ioremap(pci_resource_start(pdev, BAR_1),
Janusz Wolaka48954c2015-09-17 23:34:29 +02001055 pci_resource_len(pdev, BAR_1));
Dirk Brandewie5377a412011-01-06 14:29:54 +00001056
Florian Fainelli13acde82012-01-04 20:23:35 +00001057 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001058 goto err_mdio_ioremap;
1059 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001060
Joe Perches1dc32912008-07-11 15:17:08 -07001061 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001062 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001063 NETIF_F_HW_CSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00001064 NETIF_F_HW_VLAN_CTAG_RX;
1065 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1066 NETIF_F_HW_VLAN_CTAG_FILTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 }
1068
Joe Perches1dc32912008-07-11 15:17:08 -07001069 if ((hw->mac_type >= e1000_82544) &&
1070 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001071 netdev->hw_features |= NETIF_F_TSO;
1072
Ben Greear11a78dc2012-02-11 15:40:01 +00001073 netdev->priv_flags |= IFF_SUPP_NOFCS;
1074
Michał Mirosławe97d3202011-06-08 08:36:42 +00001075 netdev->features |= netdev->hw_features;
Tushar Dave75006732012-06-12 13:03:29 +00001076 netdev->hw_features |= (NETIF_F_RXCSUM |
1077 NETIF_F_RXALL |
1078 NETIF_F_RXFCS);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001079
Yi Zou7b872a52010-09-22 17:57:58 +00001080 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001082 netdev->vlan_features |= NETIF_F_HIGHDMA;
1083 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Tushar Dave75006732012-06-12 13:03:29 +00001085 netdev->vlan_features |= (NETIF_F_TSO |
1086 NETIF_F_HW_CSUM |
1087 NETIF_F_SG);
Patrick McHardy20501a62008-10-11 12:25:59 -07001088
Francesco Ruggeria22bb0b2014-10-22 15:29:24 +00001089 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1090 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1091 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1092 netdev->priv_flags |= IFF_UNICAST_FLT;
Jiri Pirko01789342011-08-16 06:29:00 +00001093
Jarod Wilson91c527a2016-10-17 15:54:05 -04001094 /* MTU range: 46 - 16110 */
1095 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1096 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1097
Joe Perches1dc32912008-07-11 15:17:08 -07001098 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001099
Auke Kokcd94dd02006-06-27 09:08:22 -07001100 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001101 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001102 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001103 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001104 }
1105
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001106 /* before reading the EEPROM, reset the controller to
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001107 * put the device in a known good starting state
1108 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001109
Joe Perches1dc32912008-07-11 15:17:08 -07001110 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111
1112 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001113 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001114 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001115 e1000_dump_eeprom(adapter);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001116 /* set MAC address to all zeroes to invalidate and temporary
Auke Kok67b3c272007-12-17 13:50:23 -08001117 * disable this device for the user. This blocks regular
1118 * traffic while still permitting ethtool ioctls from reaching
1119 * the hardware as well as allowing the user to run the
1120 * interface after manually setting a hw addr using
1121 * `ip set address`
1122 */
Joe Perches1dc32912008-07-11 15:17:08 -07001123 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001124 } else {
1125 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001126 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001127 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 }
Joe Perchesdbedd442015-03-06 20:49:12 -08001129 /* don't block initialization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001130 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00001132 if (!is_valid_ether_addr(netdev->dev_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001133 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001136 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1137 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1138 e1000_82547_tx_fifo_stall_task);
1139 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001140 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 e1000_check_options(adapter);
1143
1144 /* Initial Wake on LAN setting
1145 * If APM wake is enabled in the EEPROM,
1146 * enable the ACPI Magic Packet filter
1147 */
1148
Joe Perches1dc32912008-07-11 15:17:08 -07001149 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 case e1000_82542_rev2_0:
1151 case e1000_82542_rev2_1:
1152 case e1000_82543:
1153 break;
1154 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001155 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1157 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1158 break;
1159 case e1000_82546:
1160 case e1000_82546_rev_3:
Janusz Wolaka48954c2015-09-17 23:34:29 +02001161 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
Joe Perches1dc32912008-07-11 15:17:08 -07001162 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1164 break;
1165 }
1166 /* Fall Through */
1167 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001168 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1170 break;
1171 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001172 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001173 adapter->eeprom_wol |= E1000_WUFC_MAG;
1174
1175 /* now that we have the eeprom settings, apply the special cases
1176 * where the eeprom may be wrong or the board simply won't support
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001177 * wake on lan on a particular port
1178 */
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001179 switch (pdev->device) {
1180 case E1000_DEV_ID_82546GB_PCIE:
1181 adapter->eeprom_wol = 0;
1182 break;
1183 case E1000_DEV_ID_82546EB_FIBER:
1184 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001185 /* Wake events only supported on port A for dual fiber
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001186 * regardless of eeprom setting
1187 */
Joe Perches1dc32912008-07-11 15:17:08 -07001188 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001189 adapter->eeprom_wol = 0;
1190 break;
1191 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1192 /* if quad port adapter, disable WoL on all but port A */
1193 if (global_quad_port_a != 0)
1194 adapter->eeprom_wol = 0;
1195 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001196 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001197 /* Reset for multiple quad port adapters */
1198 if (++global_quad_port_a == 4)
1199 global_quad_port_a = 0;
1200 break;
1201 }
1202
1203 /* initialize the wol settings based on the eeprom settings */
1204 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001205 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Dirk Brandewie5377a412011-01-06 14:29:54 +00001207 /* Auto detect PHY address */
1208 if (hw->mac_type == e1000_ce4100) {
1209 for (i = 0; i < 32; i++) {
1210 hw->phy_addr = i;
1211 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
Jean Sacren4e01f3a2015-09-19 05:08:40 -06001212
1213 if (tmp != 0 && tmp != 0xFF)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001214 break;
1215 }
Jean Sacren4e01f3a2015-09-19 05:08:40 -06001216
1217 if (i >= 32)
1218 goto err_eeprom;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001219 }
1220
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 /* reset the hardware with the new settings */
1222 e1000_reset(adapter);
1223
Auke Kok416b5d12007-06-01 10:22:39 -07001224 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001225 err = register_netdev(netdev);
1226 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001227 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001228
Jiri Pirko52f55092012-03-20 18:10:01 +00001229 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001230
Emil Tantilov675ad472010-04-27 14:02:58 +00001231 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001232 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001233 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1234 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1235 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1236 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1237 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1238 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1239 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001240
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001241 /* carrier off reporting is important to ethtool even BEFORE open */
1242 netif_carrier_off(netdev);
1243
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001244 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 cards_found++;
1247 return 0;
1248
1249err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001250err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001251 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001252
Joe Perches1dc32912008-07-11 15:17:08 -07001253 if (hw->flash_address)
1254 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001255 kfree(adapter->tx_ring);
1256 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001257err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001259err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001260 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001261 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262err_ioremap:
Tushar Dave0b76aae2017-12-06 02:26:29 +05301263 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 free_netdev(netdev);
1265err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001266 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001267err_pci_reg:
Tushar Dave0b76aae2017-12-06 02:26:29 +05301268 if (!adapter || disable_dev)
1269 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return err;
1271}
1272
1273/**
1274 * e1000_remove - Device Removal Routine
1275 * @pdev: PCI device information struct
1276 *
1277 * e1000_remove is called by the PCI subsystem to alert the driver
Jean Sacrenb6fad9f2015-09-19 05:08:41 -06001278 * that it should release a PCI device. That could be caused by a
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 * Hot-Plug event, or because the driver is going to be removed from
1280 * memory.
1281 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001282static void e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283{
1284 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001285 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001286 struct e1000_hw *hw = &adapter->hw;
Tushar Dave0b76aae2017-12-06 02:26:29 +05301287 bool disable_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001289 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001290 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001292 unregister_netdev(netdev);
1293
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001294 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001296 kfree(adapter->tx_ring);
1297 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001298
Florian Fainelli1c267502012-01-04 20:23:34 +00001299 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001300 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001301 iounmap(hw->hw_addr);
1302 if (hw->flash_address)
1303 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001304 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
Tushar Dave0b76aae2017-12-06 02:26:29 +05301306 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 free_netdev(netdev);
1308
Tushar Dave0b76aae2017-12-06 02:26:29 +05301309 if (disable_dev)
1310 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311}
1312
1313/**
1314 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1315 * @adapter: board private structure to initialize
1316 *
1317 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001318 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001320static int e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321{
Auke Kokeb0f8052006-07-14 16:14:48 -07001322 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001324 adapter->num_tx_queues = 1;
1325 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001326
1327 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001328 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001329 return -ENOMEM;
1330 }
1331
Herbert Xu47313052007-05-29 15:07:31 -07001332 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001333 e1000_irq_disable(adapter);
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 spin_lock_init(&adapter->stats_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336
Auke Kok1314bbf2006-09-27 12:54:02 -07001337 set_bit(__E1000_DOWN, &adapter->flags);
1338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 return 0;
1340}
1341
1342/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001343 * e1000_alloc_queues - Allocate memory for all rings
1344 * @adapter: board private structure to initialize
1345 *
1346 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001347 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001348 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001349static int e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001350{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001351 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Janusz Wolaka48954c2015-09-17 23:34:29 +02001352 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001353 if (!adapter->tx_ring)
1354 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001355
Yan Burman1c7e5b12007-03-06 08:58:04 -08001356 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Janusz Wolaka48954c2015-09-17 23:34:29 +02001357 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001358 if (!adapter->rx_ring) {
1359 kfree(adapter->tx_ring);
1360 return -ENOMEM;
1361 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001362
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001363 return E1000_SUCCESS;
1364}
1365
1366/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 * e1000_open - Called when a network interface is made active
1368 * @netdev: network interface device structure
1369 *
1370 * Returns 0 on success, negative value on failure
1371 *
1372 * The open entry point is called when a network interface is made
1373 * active by the system (IFF_UP). At this point all resources needed
1374 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001375 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 * and the stack is notified that the interface is ready.
1377 **/
Stefan Assmann1f2f83f2016-02-03 09:20:51 +01001378int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001380 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001381 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 int err;
1383
Auke Kok2db10a02006-06-27 09:06:28 -07001384 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001385 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001386 return -EBUSY;
1387
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001388 netif_carrier_off(netdev);
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001391 err = e1000_setup_all_tx_resources(adapter);
1392 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 goto err_setup_tx;
1394
1395 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001396 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001397 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001398 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001399
Auke Kok79f05bf2006-06-27 09:06:32 -07001400 e1000_power_up_phy(adapter);
1401
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001402 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001403 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001404 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1405 e1000_update_mng_vlan(adapter);
1406 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
Auke Koke0aac5a2007-03-06 08:57:21 -08001408 /* before we allocate an interrupt, we must be ready to handle it.
1409 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1410 * as soon as we call pci_request_irq, so we have to setup our
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001411 * clean_rx handler before we do so.
1412 */
Auke Koke0aac5a2007-03-06 08:57:21 -08001413 e1000_configure(adapter);
1414
1415 err = e1000_request_irq(adapter);
1416 if (err)
1417 goto err_req_irq;
1418
1419 /* From here on the code is the same as e1000_up() */
1420 clear_bit(__E1000_DOWN, &adapter->flags);
1421
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001422 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001423
Auke Koke0aac5a2007-03-06 08:57:21 -08001424 e1000_irq_enable(adapter);
1425
Ben Hutchings076152d2008-07-18 17:50:57 -07001426 netif_start_queue(netdev);
1427
Auke Koke0aac5a2007-03-06 08:57:21 -08001428 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001429 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 return E1000_SUCCESS;
1432
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001433err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001434 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001435 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001437 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438err_setup_tx:
1439 e1000_reset(adapter);
1440
1441 return err;
1442}
1443
1444/**
1445 * e1000_close - Disables a network interface
1446 * @netdev: network interface device structure
1447 *
1448 * Returns 0, this is not allowed to fail
1449 *
1450 * The close entry point is called when an interface is de-activated
1451 * by the OS. The hardware is still under the drivers control, but
1452 * needs to be disabled. A global MAC reset is issued to stop the
1453 * hardware, and all transmit and receive resources are freed.
1454 **/
Stefan Assmann1f2f83f2016-02-03 09:20:51 +01001455int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001457 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001458 struct e1000_hw *hw = &adapter->hw;
yzhu16a7d64e2013-11-23 07:07:40 +00001459 int count = E1000_CHECK_RESET_COUNT;
1460
1461 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1462 usleep_range(10000, 20000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
Auke Kok2db10a02006-06-27 09:06:28 -07001464 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001466 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001467 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001469 e1000_free_all_tx_resources(adapter);
1470 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
Bruce Allan46665602006-09-27 12:54:08 -07001472 /* kill manageability vlan ID if supported, but not if a vlan with
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001473 * the same ID is registered on the host OS (let 8021q kill it)
1474 */
Joe Perches1dc32912008-07-11 15:17:08 -07001475 if ((hw->mng_cookie.status &
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001476 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1477 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Patrick McHardy80d5c362013-04-19 02:04:28 +00001478 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1479 adapter->mng_vlan_id);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001480 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 return 0;
1483}
1484
1485/**
1486 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1487 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001488 * @start: address of beginning of memory
1489 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 **/
Joe Perches64798842008-07-11 15:17:02 -07001491static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1492 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493{
Joe Perches1dc32912008-07-11 15:17:08 -07001494 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001495 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 unsigned long end = begin + len;
1497
Malli Chilakala26483452005-04-28 19:44:46 -07001498 /* First rev 82545 and 82546 need to not allow any memory
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001499 * write location to cross 64k boundary due to errata 23
1500 */
Joe Perches1dc32912008-07-11 15:17:08 -07001501 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001502 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001503 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001504 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 }
1506
Joe Perchesc3033b02008-03-21 11:06:25 -07001507 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
1510/**
1511 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1512 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001513 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 *
1515 * Return 0 on success, negative on failure
1516 **/
Joe Perches64798842008-07-11 15:17:02 -07001517static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1518 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 struct pci_dev *pdev = adapter->pdev;
1521 int size;
1522
Florian Westphal580f3212014-09-03 13:34:31 +00001523 size = sizeof(struct e1000_tx_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001524 txdr->buffer_info = vzalloc(size);
Joe Perches14f8dc42013-02-07 11:46:27 +00001525 if (!txdr->buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
1528 /* round up to nearest 4K */
1529
1530 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001531 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001533 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1534 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001535 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 vfree(txdr->buffer_info);
1538 return -ENOMEM;
1539 }
1540
Malli Chilakala26483452005-04-28 19:44:46 -07001541 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1543 void *olddesc = txdr->desc;
1544 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001545 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001546 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001547 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001548 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1549 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001550 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001551 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001552 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1553 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 goto setup_tx_desc_die;
1555 }
1556
1557 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1558 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001559 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1560 txdr->dma);
1561 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1562 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001563 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001564 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 vfree(txdr->buffer_info);
1566 return -ENOMEM;
1567 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001568 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001569 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1570 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 }
1572 }
1573 memset(txdr->desc, 0, txdr->size);
1574
1575 txdr->next_to_use = 0;
1576 txdr->next_to_clean = 0;
1577
1578 return 0;
1579}
1580
1581/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001582 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1583 * (Descriptors) for all queues
1584 * @adapter: board private structure
1585 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001586 * Return 0 on success, negative on failure
1587 **/
Joe Perches64798842008-07-11 15:17:02 -07001588int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001589{
1590 int i, err = 0;
1591
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001592 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001593 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1594 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001595 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001596 for (i-- ; i >= 0; i--)
1597 e1000_free_tx_resources(adapter,
1598 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001599 break;
1600 }
1601 }
1602
1603 return err;
1604}
1605
1606/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1608 * @adapter: board private structure
1609 *
1610 * Configure the Tx unit of the MAC after a reset.
1611 **/
Joe Perches64798842008-07-11 15:17:02 -07001612static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613{
Joe Perches406874a2008-04-03 10:06:32 -07001614 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001615 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001616 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001617 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
1619 /* Setup the HW Tx Head and Tail descriptor pointers */
1620
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001621 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001622 case 1:
1623 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001624 tdba = adapter->tx_ring[0].dma;
1625 tdlen = adapter->tx_ring[0].count *
1626 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001627 ew32(TDLEN, tdlen);
1628 ew32(TDBAH, (tdba >> 32));
1629 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1630 ew32(TDT, 0);
1631 ew32(TDH, 0);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001632 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1633 E1000_TDH : E1000_82542_TDH);
1634 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1635 E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001636 break;
1637 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001640 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001641 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001642 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1643 else
1644 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1645
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001646 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 case e1000_82542_rev2_0:
1648 case e1000_82542_rev2_1:
1649 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001650 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1651 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 break;
1653 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001654 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1655 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1656 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001658 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1659 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001660 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
1662 /* Set the Tx Interrupt Delay register */
1663
Joe Perches1dc32912008-07-11 15:17:08 -07001664 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001665 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001666 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
1668 /* Program the Transmit Control Register */
1669
Joe Perches1dc32912008-07-11 15:17:08 -07001670 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001672 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1674
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001675 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
1677 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001678 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1679
1680 /* only set IDE if we are delaying interrupts using the timers */
1681 if (adapter->tx_int_delay)
1682 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001684 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1686 else
1687 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1688
1689 /* Cache if we're 82544 running in PCI-X because we'll
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001690 * need this to apply a workaround later in the send path.
1691 */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001692 if (hw->mac_type == e1000_82544 &&
1693 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001694 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001695
Joe Perches1dc32912008-07-11 15:17:08 -07001696 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001697
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698}
1699
1700/**
1701 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1702 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001703 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 *
1705 * Returns 0 on success, negative on failure
1706 **/
Joe Perches64798842008-07-11 15:17:02 -07001707static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1708 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001711 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
Florian Westphal93f0afe2014-09-03 13:34:26 +00001713 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001714 rxdr->buffer_info = vzalloc(size);
Joe Perches14f8dc42013-02-07 11:46:27 +00001715 if (!rxdr->buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001718 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 /* Round up to nearest 4K */
1721
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001722 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001723 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001725 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1726 GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001727 if (!rxdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 vfree(rxdr->buffer_info);
1730 return -ENOMEM;
1731 }
1732
Malli Chilakala26483452005-04-28 19:44:46 -07001733 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 void *olddesc = rxdr->desc;
1736 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001737 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001738 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001739 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001740 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1741 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001742 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001743 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 goto setup_rx_desc_die;
1747 }
1748
1749 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1750 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001751 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1752 rxdr->dma);
1753 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1754 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001755 e_err(probe, "Unable to allocate aligned memory for "
1756 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001757 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001759 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001760 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1761 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763 }
1764 memset(rxdr->desc, 0, rxdr->size);
1765
1766 rxdr->next_to_clean = 0;
1767 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001768 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 return 0;
1771}
1772
1773/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001774 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1775 * (Descriptors) for all queues
1776 * @adapter: board private structure
1777 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001778 * Return 0 on success, negative on failure
1779 **/
Joe Perches64798842008-07-11 15:17:02 -07001780int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001781{
1782 int i, err = 0;
1783
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001784 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001785 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1786 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001787 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001788 for (i-- ; i >= 0; i--)
1789 e1000_free_rx_resources(adapter,
1790 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001791 break;
1792 }
1793 }
1794
1795 return err;
1796}
1797
1798/**
Malli Chilakala26483452005-04-28 19:44:46 -07001799 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 * @adapter: Board private structure
1801 **/
Joe Perches64798842008-07-11 15:17:02 -07001802static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
Joe Perches1dc32912008-07-11 15:17:08 -07001804 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001805 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Joe Perches1dc32912008-07-11 15:17:08 -07001807 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1810
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001811 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1812 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001813 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Joe Perches1dc32912008-07-11 15:17:08 -07001815 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 rctl |= E1000_RCTL_SBP;
1817 else
1818 rctl &= ~E1000_RCTL_SBP;
1819
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001820 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1821 rctl &= ~E1000_RCTL_LPE;
1822 else
1823 rctl |= E1000_RCTL_LPE;
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001826 rctl &= ~E1000_RCTL_SZ_4096;
1827 rctl |= E1000_RCTL_BSEX;
1828 switch (adapter->rx_buffer_len) {
Janusz Wolaka48954c2015-09-17 23:34:29 +02001829 case E1000_RXBUFFER_2048:
1830 default:
1831 rctl |= E1000_RCTL_SZ_2048;
1832 rctl &= ~E1000_RCTL_BSEX;
1833 break;
1834 case E1000_RXBUFFER_4096:
1835 rctl |= E1000_RCTL_SZ_4096;
1836 break;
1837 case E1000_RXBUFFER_8192:
1838 rctl |= E1000_RCTL_SZ_8192;
1839 break;
1840 case E1000_RXBUFFER_16384:
1841 rctl |= E1000_RCTL_SZ_16384;
1842 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001843 }
1844
Ben Greeare825b732012-04-04 06:01:29 +00001845 /* This is useful for sniffing bad packets. */
1846 if (adapter->netdev->features & NETIF_F_RXALL) {
1847 /* UPE and MPE will be handled by normal PROMISC logic
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001848 * in e1000e_set_rx_mode
1849 */
Ben Greeare825b732012-04-04 06:01:29 +00001850 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1851 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1852 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1853
1854 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1855 E1000_RCTL_DPF | /* Allow filtered pause */
1856 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1857 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1858 * and that breaks VLANs.
1859 */
1860 }
1861
Joe Perches1dc32912008-07-11 15:17:08 -07001862 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863}
1864
1865/**
1866 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1867 * @adapter: board private structure
1868 *
1869 * Configure the Rx unit of the MAC after a reset.
1870 **/
Joe Perches64798842008-07-11 15:17:02 -07001871static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872{
Joe Perches406874a2008-04-03 10:06:32 -07001873 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001874 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001875 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001876
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001877 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1878 rdlen = adapter->rx_ring[0].count *
Janusz Wolaka48954c2015-09-17 23:34:29 +02001879 sizeof(struct e1000_rx_desc);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001880 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1881 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1882 } else {
1883 rdlen = adapter->rx_ring[0].count *
Janusz Wolaka48954c2015-09-17 23:34:29 +02001884 sizeof(struct e1000_rx_desc);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001885 adapter->clean_rx = e1000_clean_rx_irq;
1886 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1887 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
1889 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001890 rctl = er32(RCTL);
1891 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
1893 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001894 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001896 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001897 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001898 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001899 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 }
1901
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001902 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001903 * the Base and Length of the Rx Descriptor Ring
1904 */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001905 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001906 case 1:
1907 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001908 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001909 ew32(RDLEN, rdlen);
1910 ew32(RDBAH, (rdba >> 32));
1911 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1912 ew32(RDT, 0);
1913 ew32(RDH, 0);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001914 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1915 E1000_RDH : E1000_82542_RDH);
1916 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1917 E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001918 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001919 }
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001922 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001923 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001924 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001925 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001926 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001927 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001928 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001929 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 }
1931
1932 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001933 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934}
1935
1936/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001937 * e1000_free_tx_resources - Free Tx Resources per Queue
1938 * @adapter: board private structure
1939 * @tx_ring: Tx descriptor ring for a specific queue
1940 *
1941 * Free all transmit software resources
1942 **/
Joe Perches64798842008-07-11 15:17:02 -07001943static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1944 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001945{
1946 struct pci_dev *pdev = adapter->pdev;
1947
1948 e1000_clean_tx_ring(adapter, tx_ring);
1949
1950 vfree(tx_ring->buffer_info);
1951 tx_ring->buffer_info = NULL;
1952
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001953 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1954 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001955
1956 tx_ring->desc = NULL;
1957}
1958
1959/**
1960 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 * @adapter: board private structure
1962 *
1963 * Free all transmit software resources
1964 **/
Joe Perches64798842008-07-11 15:17:02 -07001965void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001967 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001969 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001970 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971}
1972
Florian Westphal580f3212014-09-03 13:34:31 +00001973static void
1974e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1975 struct e1000_tx_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
Alexander Duyck602c0552009-12-02 16:46:00 +00001977 if (buffer_info->dma) {
1978 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001979 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1980 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001981 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001982 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001983 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001984 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001985 buffer_info->dma = 0;
1986 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001987 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001989 buffer_info->skb = NULL;
1990 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001991 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001992 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993}
1994
1995/**
1996 * e1000_clean_tx_ring - Free Tx Buffers
1997 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001998 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 **/
Joe Perches64798842008-07-11 15:17:02 -07002000static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2001 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002{
Joe Perches1dc32912008-07-11 15:17:08 -07002003 struct e1000_hw *hw = &adapter->hw;
Florian Westphal580f3212014-09-03 13:34:31 +00002004 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 unsigned long size;
2006 unsigned int i;
2007
2008 /* Free all the Tx ring sk_buffs */
2009
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002010 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 buffer_info = &tx_ring->buffer_info[i];
2012 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2013 }
2014
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00002015 netdev_reset_queue(adapter->netdev);
Florian Westphal580f3212014-09-03 13:34:31 +00002016 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 memset(tx_ring->buffer_info, 0, size);
2018
2019 /* Zero out the descriptor ring */
2020
2021 memset(tx_ring->desc, 0, tx_ring->size);
2022
2023 tx_ring->next_to_use = 0;
2024 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00002025 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Joe Perches1dc32912008-07-11 15:17:08 -07002027 writel(0, hw->hw_addr + tx_ring->tdh);
2028 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002029}
2030
2031/**
2032 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2033 * @adapter: board private structure
2034 **/
Joe Perches64798842008-07-11 15:17:02 -07002035static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002036{
2037 int i;
2038
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002039 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002040 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041}
2042
2043/**
2044 * e1000_free_rx_resources - Free Rx Resources
2045 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002046 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 *
2048 * Free all receive software resources
2049 **/
Joe Perches64798842008-07-11 15:17:02 -07002050static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2051 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 struct pci_dev *pdev = adapter->pdev;
2054
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002055 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057 vfree(rx_ring->buffer_info);
2058 rx_ring->buffer_info = NULL;
2059
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002060 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2061 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
2063 rx_ring->desc = NULL;
2064}
2065
2066/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002067 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002069 *
2070 * Free all receive software resources
2071 **/
Joe Perches64798842008-07-11 15:17:02 -07002072void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002073{
2074 int i;
2075
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002076 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002077 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2078}
2079
Florian Westphal13809602014-09-03 13:34:36 +00002080#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2081static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2082{
2083 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2084 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2085}
2086
2087static void *e1000_alloc_frag(const struct e1000_adapter *a)
2088{
2089 unsigned int len = e1000_frag_len(a);
2090 u8 *data = netdev_alloc_frag(len);
2091
2092 if (likely(data))
2093 data += E1000_HEADROOM;
2094 return data;
2095}
2096
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002097/**
2098 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2099 * @adapter: board private structure
2100 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 **/
Joe Perches64798842008-07-11 15:17:02 -07002102static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2103 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104{
Joe Perches1dc32912008-07-11 15:17:08 -07002105 struct e1000_hw *hw = &adapter->hw;
Florian Westphal93f0afe2014-09-03 13:34:26 +00002106 struct e1000_rx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 struct pci_dev *pdev = adapter->pdev;
2108 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002109 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
Florian Westphal13809602014-09-03 13:34:36 +00002111 /* Free all the Rx netfrags */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002112 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 buffer_info = &rx_ring->buffer_info[i];
Florian Westphal13809602014-09-03 13:34:36 +00002114 if (adapter->clean_rx == e1000_clean_rx_irq) {
2115 if (buffer_info->dma)
2116 dma_unmap_single(&pdev->dev, buffer_info->dma,
2117 adapter->rx_buffer_len,
2118 DMA_FROM_DEVICE);
2119 if (buffer_info->rxbuf.data) {
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07002120 skb_free_frag(buffer_info->rxbuf.data);
Florian Westphal13809602014-09-03 13:34:36 +00002121 buffer_info->rxbuf.data = NULL;
2122 }
2123 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2124 if (buffer_info->dma)
2125 dma_unmap_page(&pdev->dev, buffer_info->dma,
2126 adapter->rx_buffer_len,
2127 DMA_FROM_DEVICE);
2128 if (buffer_info->rxbuf.page) {
2129 put_page(buffer_info->rxbuf.page);
2130 buffer_info->rxbuf.page = NULL;
2131 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002134 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002137 /* there also may be some cached data from a chained receive */
Florian Westphalde591c72014-09-03 13:34:42 +00002138 napi_free_frags(&adapter->napi);
2139 rx_ring->rx_skb_top = NULL;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002140
Florian Westphal93f0afe2014-09-03 13:34:26 +00002141 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 memset(rx_ring->buffer_info, 0, size);
2143
2144 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 memset(rx_ring->desc, 0, rx_ring->size);
2146
2147 rx_ring->next_to_clean = 0;
2148 rx_ring->next_to_use = 0;
2149
Joe Perches1dc32912008-07-11 15:17:08 -07002150 writel(0, hw->hw_addr + rx_ring->rdh);
2151 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002152}
2153
2154/**
2155 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2156 * @adapter: board private structure
2157 **/
Joe Perches64798842008-07-11 15:17:02 -07002158static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002159{
2160 int i;
2161
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002162 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002163 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164}
2165
2166/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2167 * and memory write and invalidate disabled for certain operations
2168 */
Joe Perches64798842008-07-11 15:17:02 -07002169static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170{
Joe Perches1dc32912008-07-11 15:17:08 -07002171 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002173 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Joe Perches1dc32912008-07-11 15:17:08 -07002175 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Joe Perches1dc32912008-07-11 15:17:08 -07002177 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002179 ew32(RCTL, rctl);
2180 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 mdelay(5);
2182
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002183 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002184 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185}
2186
Joe Perches64798842008-07-11 15:17:02 -07002187static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188{
Joe Perches1dc32912008-07-11 15:17:08 -07002189 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002191 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Joe Perches1dc32912008-07-11 15:17:08 -07002193 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002195 ew32(RCTL, rctl);
2196 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 mdelay(5);
2198
Joe Perches1dc32912008-07-11 15:17:08 -07002199 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2200 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002202 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002203 /* No need to loop, because 82542 supports only 1 queue */
2204 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002205 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002206 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 }
2208}
2209
2210/**
2211 * e1000_set_mac - Change the Ethernet Address of the NIC
2212 * @netdev: network interface device structure
2213 * @p: pointer to an address structure
2214 *
2215 * Returns 0 on success, negative on failure
2216 **/
Joe Perches64798842008-07-11 15:17:02 -07002217static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002219 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002220 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 struct sockaddr *addr = p;
2222
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002223 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 return -EADDRNOTAVAIL;
2225
2226 /* 82542 2.0 needs to be in reset to write receive address registers */
2227
Joe Perches1dc32912008-07-11 15:17:08 -07002228 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 e1000_enter_82542_rst(adapter);
2230
2231 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002232 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
Joe Perches1dc32912008-07-11 15:17:08 -07002234 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Joe Perches1dc32912008-07-11 15:17:08 -07002236 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 e1000_leave_82542_rst(adapter);
2238
2239 return 0;
2240}
2241
2242/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002243 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 * @netdev: network interface device structure
2245 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002246 * The set_rx_mode entry point is called whenever the unicast or multicast
2247 * address lists or the network interface flags are updated. This routine is
2248 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 * promiscuous mode, and all-multi behavior.
2250 **/
Joe Perches64798842008-07-11 15:17:02 -07002251static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002253 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002255 struct netdev_hw_addr *ha;
2256 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002257 u32 rctl;
2258 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002259 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002260 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002261 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2262
Joe Perches14f8dc42013-02-07 11:46:27 +00002263 if (!mcarray)
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002264 return;
Auke Kokcd94dd02006-06-27 09:08:22 -07002265
Malli Chilakala26483452005-04-28 19:44:46 -07002266 /* Check for Promiscuous and All Multicast modes */
2267
Joe Perches1dc32912008-07-11 15:17:08 -07002268 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002270 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002272 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002274 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002275 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002276 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002277 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002278 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002279 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002280 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002281 }
2282
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002283 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002284 rctl |= E1000_RCTL_UPE;
2285 } else if (!(netdev->flags & IFF_PROMISC)) {
2286 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002287 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 }
2289
Joe Perches1dc32912008-07-11 15:17:08 -07002290 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291
2292 /* 82542 2.0 needs to be in reset to write receive address registers */
2293
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002294 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 e1000_enter_82542_rst(adapter);
2296
Patrick McHardydb0ce502007-11-13 20:54:59 -08002297 /* load the first 14 addresses into the exact filters 1-14. Unicast
2298 * addresses take precedence to avoid disabling unicast filtering
2299 * when possible.
2300 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002301 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 * if there are not 14 addresses, go ahead and clear the filters
2303 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002304 i = 1;
2305 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002306 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002307 if (i == rar_entries)
2308 break;
2309 e1000_rar_set(hw, ha->addr, i++);
2310 }
2311
Jiri Pirko22bedad32010-04-01 21:22:57 +00002312 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002313 if (i == rar_entries) {
2314 /* load any remaining addresses into the hash table */
2315 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002316 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002317 hash_reg = (hash_value >> 5) & 0x7F;
2318 hash_bit = hash_value & 0x1F;
2319 mta = (1 << hash_bit);
2320 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002321 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002322 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 }
2324 }
2325
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002326 for (; i < rar_entries; i++) {
2327 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2328 E1000_WRITE_FLUSH();
2329 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2330 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 }
2332
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002333 /* write the hash table completely, write from bottom to avoid
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002334 * both stupid write combining chipsets, and flushing each write
2335 */
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002336 for (i = mta_reg_count - 1; i >= 0 ; i--) {
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002337 /* If we are on an 82544 has an errata where writing odd
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002338 * offsets overwrites the previous even offset, but writing
2339 * backwards over the range solves the issue by always
2340 * writing the odd offset first
2341 */
2342 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2343 }
2344 E1000_WRITE_FLUSH();
2345
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002346 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002348
2349 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350}
2351
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002352/**
2353 * e1000_update_phy_info_task - get phy info
2354 * @work: work struct contained inside adapter struct
2355 *
2356 * Need to wait a few seconds after link up to get diagnostic information from
2357 * the phy
2358 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002359static void e1000_update_phy_info_task(struct work_struct *work)
2360{
2361 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002362 struct e1000_adapter,
2363 phy_info_task.work);
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00002364
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002365 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
2367
2368/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002369 * e1000_82547_tx_fifo_stall_task - task to complete work
2370 * @work: work struct contained inside adapter struct
2371 **/
2372static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2373{
2374 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002375 struct e1000_adapter,
2376 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002377 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002379 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002381 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002382 if ((er32(TDT) == er32(TDH)) &&
2383 (er32(TDFT) == er32(TDFH)) &&
2384 (er32(TDFTS) == er32(TDFHS))) {
2385 tctl = er32(TCTL);
2386 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2387 ew32(TDFT, adapter->tx_head_addr);
2388 ew32(TDFH, adapter->tx_head_addr);
2389 ew32(TDFTS, adapter->tx_head_addr);
2390 ew32(TDFHS, adapter->tx_head_addr);
2391 ew32(TCTL, tctl);
2392 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 adapter->tx_fifo_head = 0;
2395 atomic_set(&adapter->tx_fifo_stall, 0);
2396 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002397 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002398 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 }
2400 }
2401}
2402
Nick Nunleyb5481922010-02-03 14:49:28 +00002403bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002404{
2405 struct e1000_hw *hw = &adapter->hw;
2406 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002407
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002408 /* get_link_status is set on LSC (link status) interrupt or rx
2409 * sequence error interrupt (except on intel ce4100).
2410 * get_link_status will stay false until the
2411 * e1000_check_for_link establishes link for copper adapters
2412 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002413 */
2414 switch (hw->media_type) {
2415 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002416 if (hw->mac_type == e1000_ce4100)
2417 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002418 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002419 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002420 link_active = !hw->get_link_status;
2421 } else {
2422 link_active = true;
2423 }
2424 break;
2425 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002426 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002427 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2428 break;
2429 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002430 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002431 link_active = hw->serdes_has_link;
2432 break;
2433 default:
2434 break;
2435 }
2436
2437 return link_active;
2438}
2439
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002441 * e1000_watchdog - work function
2442 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002444static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002446 struct e1000_adapter *adapter = container_of(work,
2447 struct e1000_adapter,
2448 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002449 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002451 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002452 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002454 link = e1000_has_link(adapter);
2455 if ((netif_carrier_ok(netdev)) && link)
2456 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002458 if (link) {
2459 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002460 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002461 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002462 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002463 e1000_get_speed_and_duplex(hw,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002464 &adapter->link_speed,
2465 &adapter->link_duplex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Joe Perches1dc32912008-07-11 15:17:08 -07002467 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002468 pr_info("%s NIC Link is Up %d Mbps %s, "
2469 "Flow Control: %s\n",
2470 netdev->name,
2471 adapter->link_speed,
2472 adapter->link_duplex == FULL_DUPLEX ?
2473 "Full Duplex" : "Half Duplex",
2474 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2475 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2476 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2477 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002479 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002480 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002481 switch (adapter->link_speed) {
2482 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002483 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002484 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002485 break;
2486 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002487 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002488 /* maybe add some timeout factor ? */
2489 break;
2490 }
2491
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002492 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002493 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002494 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002495 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002498 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002499 schedule_delayed_work(&adapter->phy_info_task,
2500 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 adapter->smartspeed = 0;
2502 }
2503 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002504 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 adapter->link_speed = 0;
2506 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002507 pr_info("%s NIC Link is Down\n",
2508 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002510
2511 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002512 schedule_delayed_work(&adapter->phy_info_task,
2513 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 }
2515
2516 e1000_smartspeed(adapter);
2517 }
2518
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002519link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 e1000_update_stats(adapter);
2521
Joe Perches1dc32912008-07-11 15:17:08 -07002522 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002524 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 adapter->colc_old = adapter->stats.colc;
2526
2527 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2528 adapter->gorcl_old = adapter->stats.gorcl;
2529 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2530 adapter->gotcl_old = adapter->stats.gotcl;
2531
Joe Perches1dc32912008-07-11 15:17:08 -07002532 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002534 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002535 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 /* We've lost link, so the controller stops DMA,
2537 * but we've got queued Tx work that's never going
2538 * to get done, so reset controller to flush Tx.
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002539 * (Do the reset outside of interrupt context).
2540 */
Jeff Kirsher87041632006-03-02 18:21:24 -08002541 adapter->tx_timeout_count++;
2542 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002543 /* exit immediately since reset is imminent */
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00002544 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 }
2546 }
2547
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002548 /* Simple mode for Interrupt Throttle Rate (ITR) */
2549 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002550 /* Symmetric Tx/Rx gets a reduced ITR=2000;
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002551 * Total asymmetrical Tx or Rx gets ITR=8000;
2552 * everyone else is between 2000-8000.
2553 */
2554 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2555 u32 dif = (adapter->gotcl > adapter->gorcl ?
2556 adapter->gotcl - adapter->gorcl :
2557 adapter->gorcl - adapter->gotcl) / 10000;
2558 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2559
2560 ew32(ITR, 1000000000 / (itr * 256));
2561 }
2562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002564 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
Malli Chilakala26483452005-04-28 19:44:46 -07002566 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002567 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002569 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002570 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002571 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572}
2573
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002574enum latency_range {
2575 lowest_latency = 0,
2576 low_latency = 1,
2577 bulk_latency = 2,
2578 latency_invalid = 255
2579};
2580
2581/**
2582 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002583 * @adapter: pointer to adapter
2584 * @itr_setting: current adapter->itr
2585 * @packets: the number of packets during this measurement interval
2586 * @bytes: the number of bytes during this measurement interval
2587 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002588 * Stores a new ITR value based on packets and byte
2589 * counts during the last interrupt. The advantage of per interrupt
2590 * computation is faster updates and more accurate ITR for the current
2591 * traffic pattern. Constants in this function were computed
2592 * based on theoretical maximum wire speed and thresholds were set based
2593 * on testing data as well as attempting to minimize response time
2594 * while increasing bulk throughput.
2595 * this functionality is controlled by the InterruptThrottleRate module
2596 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002597 **/
2598static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002599 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002600{
2601 unsigned int retval = itr_setting;
2602 struct e1000_hw *hw = &adapter->hw;
2603
2604 if (unlikely(hw->mac_type < e1000_82540))
2605 goto update_itr_done;
2606
2607 if (packets == 0)
2608 goto update_itr_done;
2609
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002610 switch (itr_setting) {
2611 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002612 /* jumbo frames get bulk treatment*/
2613 if (bytes/packets > 8000)
2614 retval = bulk_latency;
2615 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002616 retval = low_latency;
2617 break;
2618 case low_latency: /* 50 usec aka 20000 ints/s */
2619 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002620 /* jumbo frames need bulk latency setting */
2621 if (bytes/packets > 8000)
2622 retval = bulk_latency;
2623 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002624 retval = bulk_latency;
2625 else if ((packets > 35))
2626 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002627 } else if (bytes/packets > 2000)
2628 retval = bulk_latency;
2629 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002630 retval = lowest_latency;
2631 break;
2632 case bulk_latency: /* 250 usec aka 4000 ints/s */
2633 if (bytes > 25000) {
2634 if (packets > 35)
2635 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002636 } else if (bytes < 6000) {
2637 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002638 }
2639 break;
2640 }
2641
2642update_itr_done:
2643 return retval;
2644}
2645
2646static void e1000_set_itr(struct e1000_adapter *adapter)
2647{
2648 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002649 u16 current_itr;
2650 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002651
2652 if (unlikely(hw->mac_type < e1000_82540))
2653 return;
2654
2655 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2656 if (unlikely(adapter->link_speed != SPEED_1000)) {
2657 current_itr = 0;
2658 new_itr = 4000;
2659 goto set_itr_now;
2660 }
2661
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002662 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2663 adapter->total_tx_packets,
2664 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002665 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2666 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2667 adapter->tx_itr = low_latency;
2668
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002669 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2670 adapter->total_rx_packets,
2671 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002672 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2673 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2674 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002675
2676 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2677
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002678 switch (current_itr) {
2679 /* counts and packets in update_itr are dependent on these numbers */
2680 case lowest_latency:
2681 new_itr = 70000;
2682 break;
2683 case low_latency:
2684 new_itr = 20000; /* aka hwitr = ~200 */
2685 break;
2686 case bulk_latency:
2687 new_itr = 4000;
2688 break;
2689 default:
2690 break;
2691 }
2692
2693set_itr_now:
2694 if (new_itr != adapter->itr) {
2695 /* this attempts to bias the interrupt rate towards Bulk
2696 * by adding intermediate steps when interrupt rate is
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002697 * increasing
2698 */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002699 new_itr = new_itr > adapter->itr ?
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002700 min(adapter->itr + (new_itr >> 2), new_itr) :
2701 new_itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002702 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002703 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002704 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002705}
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707#define E1000_TX_FLAGS_CSUM 0x00000001
2708#define E1000_TX_FLAGS_VLAN 0x00000002
2709#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002710#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002711#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2713#define E1000_TX_FLAGS_VLAN_SHIFT 16
2714
Joe Perches64798842008-07-11 15:17:02 -07002715static int e1000_tso(struct e1000_adapter *adapter,
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002716 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2717 __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 struct e1000_context_desc *context_desc;
Florian Westphal580f3212014-09-03 13:34:31 +00002720 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002722 u32 cmd_length = 0;
2723 u16 ipcse = 0, tucse, mss;
2724 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
Herbert Xu89114af2006-07-08 13:34:32 -07002726 if (skb_is_gso(skb)) {
Francois Romieu4a54b1e2014-03-30 03:14:37 +00002727 int err;
2728
2729 err = skb_cow_head(skb, 0);
2730 if (err < 0)
2731 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002733 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002734 mss = skb_shinfo(skb)->gso_size;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002735 if (protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002736 struct iphdr *iph = ip_hdr(skb);
2737 iph->tot_len = 0;
2738 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002739 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2740 iph->daddr, 0,
2741 IPPROTO_TCP,
2742 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002743 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002744 ipcse = skb_transport_offset(skb) - 1;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002745 } else if (skb_is_gso_v6(skb)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002746 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002747 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002748 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2749 &ipv6_hdr(skb)->daddr,
2750 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002751 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002752 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002753 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002754 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002755 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002756 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 tucse = 0;
2758
2759 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002760 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002762 i = tx_ring->next_to_use;
2763 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002764 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2767 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2768 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2769 context_desc->upper_setup.tcp_fields.tucss = tucss;
2770 context_desc->upper_setup.tcp_fields.tucso = tucso;
2771 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2772 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2773 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2774 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2775
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002776 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002777 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002778
Janusz Wolaka48954c2015-09-17 23:34:29 +02002779 if (++i == tx_ring->count)
2780 i = 0;
2781
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002782 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
Joe Perchesc3033b02008-03-21 11:06:25 -07002784 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002786 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787}
2788
Joe Perches64798842008-07-11 15:17:02 -07002789static bool e1000_tx_csum(struct e1000_adapter *adapter,
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002790 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2791 __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792{
2793 struct e1000_context_desc *context_desc;
Florian Westphal580f3212014-09-03 13:34:31 +00002794 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002796 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002797 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
Dave Graham3ed30672008-10-09 14:29:26 -07002799 if (skb->ip_summed != CHECKSUM_PARTIAL)
2800 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002802 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002803 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002804 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2805 cmd_len |= E1000_TXD_CMD_TCP;
2806 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002807 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002808 /* XXX not handling all IPV6 headers */
2809 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2810 cmd_len |= E1000_TXD_CMD_TCP;
2811 break;
2812 default:
2813 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002814 e_warn(drv, "checksum_partial proto=%x!\n",
2815 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002816 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 }
2818
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002819 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002820
2821 i = tx_ring->next_to_use;
2822 buffer_info = &tx_ring->buffer_info[i];
2823 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2824
2825 context_desc->lower_setup.ip_config = 0;
2826 context_desc->upper_setup.tcp_fields.tucss = css;
2827 context_desc->upper_setup.tcp_fields.tucso =
2828 css + skb->csum_offset;
2829 context_desc->upper_setup.tcp_fields.tucse = 0;
2830 context_desc->tcp_seg_setup.data = 0;
2831 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2832
2833 buffer_info->time_stamp = jiffies;
2834 buffer_info->next_to_watch = i;
2835
Janusz Wolaka48954c2015-09-17 23:34:29 +02002836 if (unlikely(++i == tx_ring->count))
2837 i = 0;
2838
Dave Graham3ed30672008-10-09 14:29:26 -07002839 tx_ring->next_to_use = i;
2840
2841 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842}
2843
2844#define E1000_MAX_TXD_PWR 12
2845#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2846
Joe Perches64798842008-07-11 15:17:02 -07002847static int e1000_tx_map(struct e1000_adapter *adapter,
2848 struct e1000_tx_ring *tx_ring,
2849 struct sk_buff *skb, unsigned int first,
2850 unsigned int max_per_txd, unsigned int nr_frags,
2851 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852{
Joe Perches1dc32912008-07-11 15:17:08 -07002853 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002854 struct pci_dev *pdev = adapter->pdev;
Florian Westphal580f3212014-09-03 13:34:31 +00002855 struct e1000_tx_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002856 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002857 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002858 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
2860 i = tx_ring->next_to_use;
2861
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002862 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002863 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002865 /* Workaround for Controller erratum --
2866 * descriptor for non-tso packet in a linear SKB that follows a
2867 * tso gets written back prematurely before the data is fully
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002868 * DMA'd to the controller
2869 */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002870 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002871 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002872 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002873 size -= 4;
2874 }
2875
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 /* Workaround for premature desc write-backs
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002877 * in TSO mode. Append 4-byte sentinel desc
2878 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002879 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002881 /* work-around for errata 10 and it applies
2882 * to all controllers in PCI-X mode
2883 * The fix is to make sure that the first descriptor of a
2884 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2885 */
Joe Perches1dc32912008-07-11 15:17:08 -07002886 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Janusz Wolaka48954c2015-09-17 23:34:29 +02002887 (size > 2015) && count == 0))
2888 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002889
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 /* Workaround for potential 82544 hang in PCI-X. Avoid
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002891 * terminating buffers within evenly-aligned dwords.
2892 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002893 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2895 size > 4))
2896 size -= 4;
2897
2898 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002899 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002901 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002902 buffer_info->dma = dma_map_single(&pdev->dev,
2903 skb->data + offset,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002904 size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002905 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002906 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002907 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
2909 len -= size;
2910 offset += size;
2911 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002912 if (len) {
2913 i++;
2914 if (unlikely(i == tx_ring->count))
2915 i = 0;
2916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 }
2918
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002919 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002920 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
2922 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002923 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002924 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002926 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002927 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002928 i++;
2929 if (unlikely(i == tx_ring->count))
2930 i = 0;
2931
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 buffer_info = &tx_ring->buffer_info[i];
2933 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 /* Workaround for premature desc write-backs
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002935 * in TSO mode. Append 4-byte sentinel desc
2936 */
2937 if (unlikely(mss && f == (nr_frags-1) &&
2938 size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 /* Workaround for potential 82544 hang in PCI-X.
2941 * Avoid terminating buffers within evenly-aligned
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002942 * dwords.
2943 */
Ian Campbell877749b2011-08-29 23:18:26 +00002944 bufend = (unsigned long)
2945 page_to_phys(skb_frag_page(frag));
2946 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002947 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002948 !(bufend & 4) &&
2949 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 size -= 4;
2951
2952 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002954 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002955 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2956 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002957 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002958 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002959 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960
2961 len -= size;
2962 offset += size;
2963 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 }
2965 }
2966
Dean Nelson31c15a22011-08-25 14:39:24 +00002967 segs = skb_shinfo(skb)->gso_segs ?: 1;
2968 /* multiply data chunks by size of headers */
2969 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2970
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002972 tx_ring->buffer_info[i].segs = segs;
2973 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 tx_ring->buffer_info[first].next_to_watch = i;
2975
2976 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002977
2978dma_error:
2979 dev_err(&pdev->dev, "TX DMA map failed\n");
2980 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002981 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002982 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002983
2984 while (count--) {
Janusz Wolaka48954c2015-09-17 23:34:29 +02002985 if (i == 0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002986 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002987 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002988 buffer_info = &tx_ring->buffer_info[i];
2989 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2990 }
2991
2992 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993}
2994
Joe Perches64798842008-07-11 15:17:02 -07002995static void e1000_tx_queue(struct e1000_adapter *adapter,
2996 struct e1000_tx_ring *tx_ring, int tx_flags,
2997 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 struct e1000_tx_desc *tx_desc = NULL;
Florian Westphal580f3212014-09-03 13:34:31 +00003000 struct e1000_tx_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07003001 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 unsigned int i;
3003
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003004 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003006 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003007 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3008
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003009 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003010 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 }
3012
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003013 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3015 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3016 }
3017
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003018 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 txd_lower |= E1000_TXD_CMD_VLE;
3020 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3021 }
3022
Ben Greear11a78dc2012-02-11 15:40:01 +00003023 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3024 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3025
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 i = tx_ring->next_to_use;
3027
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003028 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029 buffer_info = &tx_ring->buffer_info[i];
3030 tx_desc = E1000_TX_DESC(*tx_ring, i);
3031 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3032 tx_desc->lower.data =
3033 cpu_to_le32(txd_lower | buffer_info->length);
3034 tx_desc->upper.data = cpu_to_le32(txd_upper);
Janusz Wolaka48954c2015-09-17 23:34:29 +02003035 if (unlikely(++i == tx_ring->count))
3036 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003037 }
3038
3039 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3040
Ben Greear11a78dc2012-02-11 15:40:01 +00003041 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3042 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3043 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 /* Force memory writes to complete before letting h/w
3046 * know there are new descriptors to fetch. (Only
3047 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003048 * such as IA-64).
3049 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 wmb();
3051
3052 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053}
3054
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003055/* 82547 workaround to avoid controller hang in half-duplex environment.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056 * The workaround is to avoid queuing a large packet that would span
3057 * the internal Tx FIFO ring boundary by notifying the stack to resend
3058 * the packet at a later time. This gives the Tx FIFO an opportunity to
3059 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3060 * to the beginning of the Tx FIFO.
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003061 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
3063#define E1000_FIFO_HDR 0x10
3064#define E1000_82547_PAD_LEN 0x3E0
3065
Joe Perches64798842008-07-11 15:17:02 -07003066static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3067 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068{
Joe Perches406874a2008-04-03 10:06:32 -07003069 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3070 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003072 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003074 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 goto no_fifo_stall_required;
3076
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003077 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 return 1;
3079
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003080 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 atomic_set(&adapter->tx_fifo_stall, 1);
3082 return 1;
3083 }
3084
3085no_fifo_stall_required:
3086 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003087 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003088 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3089 return 0;
3090}
3091
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003092static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3093{
3094 struct e1000_adapter *adapter = netdev_priv(netdev);
3095 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3096
3097 netif_stop_queue(netdev);
3098 /* Herbert's original patch had:
3099 * smp_mb__after_netif_stop_queue();
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003100 * but since that doesn't exist yet, just open code it.
3101 */
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003102 smp_mb();
3103
3104 /* We need to check again in a case another CPU has just
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003105 * made room available.
3106 */
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003107 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3108 return -EBUSY;
3109
3110 /* A reprieve! */
3111 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003112 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003113 return 0;
3114}
3115
3116static int e1000_maybe_stop_tx(struct net_device *netdev,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003117 struct e1000_tx_ring *tx_ring, int size)
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003118{
3119 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3120 return 0;
3121 return __e1000_maybe_stop_tx(netdev, size);
3122}
3123
Alexander Duyck847a1d62016-03-02 16:16:01 -05003124#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003125static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3126 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003127{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003128 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003129 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003130 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3132 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3133 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003134 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003135 unsigned int nr_frags;
3136 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003138 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 unsigned int f;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003140 __be16 protocol = vlan_get_protocol(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003142 /* This goes back to the question of how to logically map a Tx queue
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003143 * to a flow. Right now, performance is impacted slightly negatively
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003144 * if using multiple Tx queues. If the stack breaks away from a
3145 * single qdisc implementation, we can look at this again.
3146 */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003147 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003148
Tushar Dave59d86c72012-09-15 10:16:57 +00003149 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3150 * packets may get corrupted during padding by HW.
3151 * To WA this issue, pad all small packets manually.
3152 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003153 if (eth_skb_pad(skb))
3154 return NETDEV_TX_OK;
Tushar Dave59d86c72012-09-15 10:16:57 +00003155
Herbert Xu79671682006-06-22 02:40:14 -07003156 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003157 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158 * make sure there is enough room in the FIFO before
3159 * initiating the DMA for each buffer. The calc is:
3160 * 4 = ceil(buffer len/mss). To make sure we don't
3161 * overrun the FIFO, adjust the max buffer len if mss
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003162 * drops.
3163 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003164 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003165 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 max_per_txd = min(mss << 2, max_per_txd);
3167 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003168
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003169 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003170 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003171 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003172 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003173 case e1000_82544:
3174 /* Make sure we have room to chop off 4 bytes,
3175 * and that the end alignment will work out to
3176 * this hardware's requirements
3177 * NOTE: this is a TSO only workaround
3178 * if end byte alignment not correct move us
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003179 * into the next dword
3180 */
3181 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3182 & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003183 break;
3184 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003185 pull_size = min((unsigned int)4, skb->data_len);
3186 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003187 e_err(drv, "__pskb_pull_tail "
3188 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003189 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003190 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003191 }
Eric Dumazete743d312010-04-14 15:59:40 -07003192 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003193 break;
3194 default:
3195 /* do nothing */
3196 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003197 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199 }
3200
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003201 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003202 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003204 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003205
Jeff Kirsherfd803242005-12-13 00:06:22 -05003206 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003207 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003208 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003209
Linus Torvalds1da177e2005-04-16 15:20:36 -07003210 count += TXD_USE_COUNT(len, max_txd_pwr);
3211
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003212 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 count++;
3214
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003215 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003216 * in PCI-X mode, so add one more descriptor to the count
3217 */
Joe Perches1dc32912008-07-11 15:17:08 -07003218 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003219 (len > 2015)))
3220 count++;
3221
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003223 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003224 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003226 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003227 count += nr_frags;
3228
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 /* need: count + 2 desc gap to keep tail from touching
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003230 * head, otherwise try next time
3231 */
Alexander Duyck80179432009-01-21 14:42:47 -08003232 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003235 if (unlikely((hw->mac_type == e1000_82547) &&
3236 (e1000_82547_fifo_workaround(adapter, skb)))) {
3237 netif_stop_queue(netdev);
3238 if (!test_bit(__E1000_DOWN, &adapter->flags))
3239 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3240 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241 }
3242
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003243 if (skb_vlan_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 tx_flags |= E1000_TX_FLAGS_VLAN;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003245 tx_flags |= (skb_vlan_tag_get(skb) <<
3246 E1000_TX_FLAGS_VLAN_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 }
3248
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003249 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003250
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003251 tso = e1000_tso(adapter, tx_ring, skb, protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 if (tso < 0) {
3253 dev_kfree_skb_any(skb);
3254 return NETDEV_TX_OK;
3255 }
3256
Jeff Kirsherfd803242005-12-13 00:06:22 -05003257 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003258 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003259 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 tx_flags |= E1000_TX_FLAGS_TSO;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003261 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262 tx_flags |= E1000_TX_FLAGS_CSUM;
3263
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003264 if (protocol == htons(ETH_P_IP))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003265 tx_flags |= E1000_TX_FLAGS_IPV4;
3266
Ben Greear11a78dc2012-02-11 15:40:01 +00003267 if (unlikely(skb->no_fcs))
3268 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3269
Alexander Duyck37e73df2009-03-25 21:58:45 +00003270 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003271 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272
Alexander Duyck37e73df2009-03-25 21:58:45 +00003273 if (count) {
Alexander Duycka4605fe2016-03-02 16:16:08 -05003274 /* The descriptors needed is higher than other Intel drivers
3275 * due to a number of workarounds. The breakdown is below:
3276 * Data descriptors: MAX_SKB_FRAGS + 1
3277 * Context Descriptor: 1
3278 * Keep head from touching tail: 2
3279 * Workarounds: 3
3280 */
3281 int desc_needed = MAX_SKB_FRAGS + 7;
3282
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003283 netdev_sent_queue(netdev, skb->len);
Willem de Bruijneab467f2012-04-27 09:04:04 +00003284 skb_tx_timestamp(skb);
3285
Alexander Duyck37e73df2009-03-25 21:58:45 +00003286 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duycka4605fe2016-03-02 16:16:08 -05003287
3288 /* 82544 potentially requires twice as many data descriptors
3289 * in order to guarantee buffers don't end on evenly-aligned
3290 * dwords
3291 */
3292 if (adapter->pcix_82544)
3293 desc_needed += MAX_SKB_FRAGS + 1;
3294
Alexander Duyck37e73df2009-03-25 21:58:45 +00003295 /* Make sure there is space in the ring for the next send. */
Alexander Duycka4605fe2016-03-02 16:16:08 -05003296 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297
Florian Westphal8a4d0b92015-01-07 11:40:33 +00003298 if (!skb->xmit_more ||
3299 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3300 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3301 /* we need this if more than one processor can write to
3302 * our tail at a time, it synchronizes IO on IA64/Altix
3303 * systems
3304 */
3305 mmiowb();
3306 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00003307 } else {
3308 dev_kfree_skb_any(skb);
3309 tx_ring->buffer_info[first].time_stamp = 0;
3310 tx_ring->next_to_use = first;
3311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313 return NETDEV_TX_OK;
3314}
3315
Tushar Daveb04e36b2012-01-27 09:00:46 +00003316#define NUM_REGS 38 /* 1 based count */
3317static void e1000_regdump(struct e1000_adapter *adapter)
3318{
3319 struct e1000_hw *hw = &adapter->hw;
3320 u32 regs[NUM_REGS];
3321 u32 *regs_buff = regs;
3322 int i = 0;
3323
Tushar Davee29b5d82012-02-10 08:06:36 +00003324 static const char * const reg_name[] = {
3325 "CTRL", "STATUS",
3326 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3327 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3328 "TIDV", "TXDCTL", "TADV", "TARC0",
3329 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3330 "TXDCTL1", "TARC1",
3331 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3332 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3333 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003334 };
3335
3336 regs_buff[0] = er32(CTRL);
3337 regs_buff[1] = er32(STATUS);
3338
3339 regs_buff[2] = er32(RCTL);
3340 regs_buff[3] = er32(RDLEN);
3341 regs_buff[4] = er32(RDH);
3342 regs_buff[5] = er32(RDT);
3343 regs_buff[6] = er32(RDTR);
3344
3345 regs_buff[7] = er32(TCTL);
3346 regs_buff[8] = er32(TDBAL);
3347 regs_buff[9] = er32(TDBAH);
3348 regs_buff[10] = er32(TDLEN);
3349 regs_buff[11] = er32(TDH);
3350 regs_buff[12] = er32(TDT);
3351 regs_buff[13] = er32(TIDV);
3352 regs_buff[14] = er32(TXDCTL);
3353 regs_buff[15] = er32(TADV);
3354 regs_buff[16] = er32(TARC0);
3355
3356 regs_buff[17] = er32(TDBAL1);
3357 regs_buff[18] = er32(TDBAH1);
3358 regs_buff[19] = er32(TDLEN1);
3359 regs_buff[20] = er32(TDH1);
3360 regs_buff[21] = er32(TDT1);
3361 regs_buff[22] = er32(TXDCTL1);
3362 regs_buff[23] = er32(TARC1);
3363 regs_buff[24] = er32(CTRL_EXT);
3364 regs_buff[25] = er32(ERT);
3365 regs_buff[26] = er32(RDBAL0);
3366 regs_buff[27] = er32(RDBAH0);
3367 regs_buff[28] = er32(TDFH);
3368 regs_buff[29] = er32(TDFT);
3369 regs_buff[30] = er32(TDFHS);
3370 regs_buff[31] = er32(TDFTS);
3371 regs_buff[32] = er32(TDFPC);
3372 regs_buff[33] = er32(RDFH);
3373 regs_buff[34] = er32(RDFT);
3374 regs_buff[35] = er32(RDFHS);
3375 regs_buff[36] = er32(RDFTS);
3376 regs_buff[37] = er32(RDFPC);
3377
3378 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003379 for (i = 0; i < NUM_REGS; i++)
3380 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003381}
3382
3383/*
3384 * e1000_dump: Print registers, tx ring and rx ring
3385 */
3386static void e1000_dump(struct e1000_adapter *adapter)
3387{
3388 /* this code doesn't handle multiple rings */
3389 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3390 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3391 int i;
3392
3393 if (!netif_msg_hw(adapter))
3394 return;
3395
3396 /* Print Registers */
3397 e1000_regdump(adapter);
3398
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003399 /* transmit dump */
Tushar Daveb04e36b2012-01-27 09:00:46 +00003400 pr_info("TX Desc ring0 dump\n");
3401
3402 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3403 *
3404 * Legacy Transmit Descriptor
3405 * +--------------------------------------------------------------+
3406 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3407 * +--------------------------------------------------------------+
3408 * 8 | Special | CSS | Status | CMD | CSO | Length |
3409 * +--------------------------------------------------------------+
3410 * 63 48 47 36 35 32 31 24 23 16 15 0
3411 *
3412 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3413 * 63 48 47 40 39 32 31 16 15 8 7 0
3414 * +----------------------------------------------------------------+
3415 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3416 * +----------------------------------------------------------------+
3417 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3418 * +----------------------------------------------------------------+
3419 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3420 *
3421 * Extended Data Descriptor (DTYP=0x1)
3422 * +----------------------------------------------------------------+
3423 * 0 | Buffer Address [63:0] |
3424 * +----------------------------------------------------------------+
3425 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3426 * +----------------------------------------------------------------+
3427 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3428 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003429 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3430 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003431
3432 if (!netif_msg_tx_done(adapter))
3433 goto rx_ring_summary;
3434
3435 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3436 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
Florian Westphal580f3212014-09-03 13:34:31 +00003437 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003438 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003439 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003440 const char *type;
3441
Tushar Daveb04e36b2012-01-27 09:00:46 +00003442 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003443 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003444 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003445 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003446 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003447 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003448 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003449 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003450
Tushar Davee29b5d82012-02-10 08:06:36 +00003451 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3452 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3453 le64_to_cpu(u->a), le64_to_cpu(u->b),
3454 (u64)buffer_info->dma, buffer_info->length,
3455 buffer_info->next_to_watch,
3456 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003457 }
3458
3459rx_ring_summary:
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003460 /* receive dump */
Tushar Daveb04e36b2012-01-27 09:00:46 +00003461 pr_info("\nRX Desc ring dump\n");
3462
3463 /* Legacy Receive Descriptor Format
3464 *
3465 * +-----------------------------------------------------+
3466 * | Buffer Address [63:0] |
3467 * +-----------------------------------------------------+
3468 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3469 * +-----------------------------------------------------+
3470 * 63 48 47 40 39 32 31 16 15 0
3471 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003472 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003473
3474 if (!netif_msg_rx_status(adapter))
3475 goto exit;
3476
3477 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3478 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
Florian Westphal93f0afe2014-09-03 13:34:26 +00003479 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003480 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003481 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003482 const char *type;
3483
Tushar Daveb04e36b2012-01-27 09:00:46 +00003484 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003485 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003486 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003487 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003488 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003489 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003490
Tushar Davee29b5d82012-02-10 08:06:36 +00003491 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3492 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
Florian Westphal13809602014-09-03 13:34:36 +00003493 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003494 } /* for */
3495
3496 /* dump the descriptor caches */
3497 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003498 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003499 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003500 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3501 i,
3502 readl(adapter->hw.hw_addr + i+4),
3503 readl(adapter->hw.hw_addr + i),
3504 readl(adapter->hw.hw_addr + i+12),
3505 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003506 }
3507 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003508 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003509 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003510 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3511 i,
3512 readl(adapter->hw.hw_addr + i+4),
3513 readl(adapter->hw.hw_addr + i),
3514 readl(adapter->hw.hw_addr + i+12),
3515 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003516 }
3517exit:
3518 return;
3519}
3520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521/**
3522 * e1000_tx_timeout - Respond to a Tx Hang
3523 * @netdev: network interface device structure
3524 **/
Joe Perches64798842008-07-11 15:17:02 -07003525static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003527 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528
3529 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003530 adapter->tx_timeout_count++;
3531 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532}
3533
Joe Perches64798842008-07-11 15:17:02 -07003534static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535{
David Howells65f27f32006-11-22 14:55:48 +00003536 struct e1000_adapter *adapter =
3537 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538
Tushar Daveb04e36b2012-01-27 09:00:46 +00003539 e_err(drv, "Reset adapter\n");
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00003540 e1000_reinit_locked(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541}
3542
3543/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 * e1000_change_mtu - Change the Maximum Transfer Unit
3545 * @netdev: network interface device structure
3546 * @new_mtu: new value for maximum frame size
3547 *
3548 * Returns 0 on success, negative on failure
3549 **/
Joe Perches64798842008-07-11 15:17:02 -07003550static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003552 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003553 struct e1000_hw *hw = &adapter->hw;
Jarod Wilson91c527a2016-10-17 15:54:05 -04003554 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003555
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003556 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003557 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003558 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003559 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003560 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003561 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003562 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003563 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003564 default:
3565 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3566 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003567 }
3568
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003569 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3570 msleep(1);
3571 /* e1000_down has a dependency on max_frame_size */
3572 hw->max_frame_size = max_frame;
Sabrina Dubroca08e83312015-02-26 05:35:41 +00003573 if (netif_running(netdev)) {
3574 /* prevent buffers from being reallocated */
3575 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003576 e1000_down(adapter);
Sabrina Dubroca08e83312015-02-26 05:35:41 +00003577 }
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003578
David S. Miller87f50322006-07-31 22:39:40 -07003579 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003580 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003581 * larger slab size.
3582 * i.e. RXBUFFER_2048 --> size-4096 slab
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003583 * however with the new *_jumbo_rx* routines, jumbo receives will use
3584 * fragmented skbs
3585 */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003586
Jesse Brandeburg99261462010-01-22 22:56:16 +00003587 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003588 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003589 else
3590#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003591 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003592#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3593 adapter->rx_buffer_len = PAGE_SIZE;
3594#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003595
3596 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003597 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003598 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003599 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3600 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003601
Emil Tantilov675ad472010-04-27 14:02:58 +00003602 pr_info("%s changing MTU from %d to %d\n",
3603 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003604 netdev->mtu = new_mtu;
3605
Auke Kok2db10a02006-06-27 09:06:28 -07003606 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003607 e1000_up(adapter);
3608 else
3609 e1000_reset(adapter);
3610
3611 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003612
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 return 0;
3614}
3615
3616/**
3617 * e1000_update_stats - Update the board statistics counters
3618 * @adapter: board private structure
3619 **/
Joe Perches64798842008-07-11 15:17:02 -07003620void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003622 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003624 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003626 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003627
3628#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3629
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003630 /* Prevent stats update while adapter is being reset, or if the pci
Linas Vepstas282f33c2006-06-08 22:19:44 -07003631 * connection is down.
3632 */
Auke Kok90267292006-06-08 09:30:24 -07003633 if (adapter->link_speed == 0)
3634 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003635 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003636 return;
Auke Kok90267292006-06-08 09:30:24 -07003637
Linus Torvalds1da177e2005-04-16 15:20:36 -07003638 spin_lock_irqsave(&adapter->stats_lock, flags);
3639
Masatake YAMATO828d0552007-10-20 03:06:37 +02003640 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003641 * called from the interrupt context, so they must only
3642 * be written while holding adapter->stats_lock
3643 */
3644
Joe Perches1dc32912008-07-11 15:17:08 -07003645 adapter->stats.crcerrs += er32(CRCERRS);
3646 adapter->stats.gprc += er32(GPRC);
3647 adapter->stats.gorcl += er32(GORCL);
3648 adapter->stats.gorch += er32(GORCH);
3649 adapter->stats.bprc += er32(BPRC);
3650 adapter->stats.mprc += er32(MPRC);
3651 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003652
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003653 adapter->stats.prc64 += er32(PRC64);
3654 adapter->stats.prc127 += er32(PRC127);
3655 adapter->stats.prc255 += er32(PRC255);
3656 adapter->stats.prc511 += er32(PRC511);
3657 adapter->stats.prc1023 += er32(PRC1023);
3658 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659
Joe Perches1dc32912008-07-11 15:17:08 -07003660 adapter->stats.symerrs += er32(SYMERRS);
3661 adapter->stats.mpc += er32(MPC);
3662 adapter->stats.scc += er32(SCC);
3663 adapter->stats.ecol += er32(ECOL);
3664 adapter->stats.mcc += er32(MCC);
3665 adapter->stats.latecol += er32(LATECOL);
3666 adapter->stats.dc += er32(DC);
3667 adapter->stats.sec += er32(SEC);
3668 adapter->stats.rlec += er32(RLEC);
3669 adapter->stats.xonrxc += er32(XONRXC);
3670 adapter->stats.xontxc += er32(XONTXC);
3671 adapter->stats.xoffrxc += er32(XOFFRXC);
3672 adapter->stats.xofftxc += er32(XOFFTXC);
3673 adapter->stats.fcruc += er32(FCRUC);
3674 adapter->stats.gptc += er32(GPTC);
3675 adapter->stats.gotcl += er32(GOTCL);
3676 adapter->stats.gotch += er32(GOTCH);
3677 adapter->stats.rnbc += er32(RNBC);
3678 adapter->stats.ruc += er32(RUC);
3679 adapter->stats.rfc += er32(RFC);
3680 adapter->stats.rjc += er32(RJC);
3681 adapter->stats.torl += er32(TORL);
3682 adapter->stats.torh += er32(TORH);
3683 adapter->stats.totl += er32(TOTL);
3684 adapter->stats.toth += er32(TOTH);
3685 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003686
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003687 adapter->stats.ptc64 += er32(PTC64);
3688 adapter->stats.ptc127 += er32(PTC127);
3689 adapter->stats.ptc255 += er32(PTC255);
3690 adapter->stats.ptc511 += er32(PTC511);
3691 adapter->stats.ptc1023 += er32(PTC1023);
3692 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003693
Joe Perches1dc32912008-07-11 15:17:08 -07003694 adapter->stats.mptc += er32(MPTC);
3695 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696
3697 /* used for adaptive IFS */
3698
Joe Perches1dc32912008-07-11 15:17:08 -07003699 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003700 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003701 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003702 adapter->stats.colc += hw->collision_delta;
3703
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003704 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003705 adapter->stats.algnerrc += er32(ALGNERRC);
3706 adapter->stats.rxerrc += er32(RXERRC);
3707 adapter->stats.tncrs += er32(TNCRS);
3708 adapter->stats.cexterr += er32(CEXTERR);
3709 adapter->stats.tsctc += er32(TSCTC);
3710 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003711 }
3712
3713 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003714 netdev->stats.multicast = adapter->stats.mprc;
3715 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
3717 /* Rx Errors */
3718
Jeff Kirsher87041632006-03-02 18:21:24 -08003719 /* RLEC on some newer hardware can be incorrect so build
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003720 * our own version based on RUC and ROC
3721 */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003722 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003724 adapter->stats.ruc + adapter->stats.roc +
3725 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003726 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003727 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3728 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3729 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3730 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003731
3732 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003733 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003734 netdev->stats.tx_errors = adapter->stats.txerrc;
3735 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3736 netdev->stats.tx_window_errors = adapter->stats.latecol;
3737 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003738 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003739 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003740 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003741 adapter->stats.tncrs = 0;
3742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743
3744 /* Tx Dropped needs to be maintained elsewhere */
3745
3746 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003747 if (hw->media_type == e1000_media_type_copper) {
3748 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3750 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3751 adapter->phy_stats.idle_errors += phy_tmp;
3752 }
3753
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003754 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 (hw->phy_type == e1000_phy_m88) &&
3756 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3757 adapter->phy_stats.receive_errors += phy_tmp;
3758 }
3759
Jeff Garzik15e376b2006-12-15 11:16:33 -05003760 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003761 if (hw->has_smbus) {
3762 adapter->stats.mgptc += er32(MGTPTC);
3763 adapter->stats.mgprc += er32(MGTPRC);
3764 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003765 }
3766
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3768}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003769
3770/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771 * e1000_intr - Interrupt Handler
3772 * @irq: interrupt number
3773 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774 **/
Joe Perches64798842008-07-11 15:17:02 -07003775static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776{
3777 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003778 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003779 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003780 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003781
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003782 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003783 return IRQ_NONE; /* Not our interrupt */
3784
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003785 /* we might have caused the interrupt, but the above
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003786 * read cleared it, and just in case the driver is
3787 * down there is nothing to do so return handled
3788 */
3789 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3790 return IRQ_HANDLED;
3791
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003792 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003794 /* guard against interrupt when we're going down */
3795 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003796 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797 }
3798
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003799 /* disable interrupts, without the synchronize_irq bit */
3800 ew32(IMC, ~0);
3801 E1000_WRITE_FLUSH();
3802
Ben Hutchings288379f2009-01-19 16:43:59 -08003803 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003804 adapter->total_tx_bytes = 0;
3805 adapter->total_tx_packets = 0;
3806 adapter->total_rx_bytes = 0;
3807 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003808 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003809 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003810 /* this really should not happen! if it does it is basically a
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003811 * bug, but not a hard error, so enable ints and continue
3812 */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003813 if (!test_bit(__E1000_DOWN, &adapter->flags))
3814 e1000_irq_enable(adapter);
3815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003816
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817 return IRQ_HANDLED;
3818}
3819
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820/**
3821 * e1000_clean - NAPI Rx polling callback
3822 * @adapter: board private structure
3823 **/
Joe Perches64798842008-07-11 15:17:02 -07003824static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825{
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003826 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3827 napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003828 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003829
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003830 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003831
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003832 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003833
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003834 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003835 work_done = budget;
3836
David S. Miller53e52c72008-01-07 21:06:12 -08003837 /* If budget not fully consumed, exit the polling mode */
3838 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003839 if (likely(adapter->itr_setting & 3))
3840 e1000_set_itr(adapter);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07003841 napi_complete_done(napi, work_done);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003842 if (!test_bit(__E1000_DOWN, &adapter->flags))
3843 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844 }
3845
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003846 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003847}
3848
Linus Torvalds1da177e2005-04-16 15:20:36 -07003849/**
3850 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3851 * @adapter: board private structure
3852 **/
Joe Perches64798842008-07-11 15:17:02 -07003853static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3854 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003855{
Joe Perches1dc32912008-07-11 15:17:08 -07003856 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857 struct net_device *netdev = adapter->netdev;
3858 struct e1000_tx_desc *tx_desc, *eop_desc;
Florian Westphal580f3212014-09-03 13:34:31 +00003859 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003861 unsigned int count = 0;
Janusz Wolaka48954c2015-09-17 23:34:29 +02003862 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003863 unsigned int bytes_compl = 0, pkts_compl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864
3865 i = tx_ring->next_to_clean;
3866 eop = tx_ring->buffer_info[i].next_to_watch;
3867 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3868
Alexander Duyckccfb3422009-03-25 21:59:04 +00003869 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3870 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003871 bool cleaned = false;
Alexander Duyck837a1db2015-04-07 16:55:27 -07003872 dma_rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003873 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874 tx_desc = E1000_TX_DESC(*tx_ring, i);
3875 buffer_info = &tx_ring->buffer_info[i];
3876 cleaned = (i == eop);
3877
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003878 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003879 total_tx_packets += buffer_info->segs;
3880 total_tx_bytes += buffer_info->bytecount;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003881 if (buffer_info->skb) {
3882 bytes_compl += buffer_info->skb->len;
3883 pkts_compl++;
3884 }
3885
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003886 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003887 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003888 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889
Janusz Wolaka48954c2015-09-17 23:34:29 +02003890 if (unlikely(++i == tx_ring->count))
3891 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003893
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 eop = tx_ring->buffer_info[i].next_to_watch;
3895 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3896 }
3897
Dmitriy Vyukov9eab46b2015-09-08 10:52:44 +02003898 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3899 * which will reuse the cleaned buffers.
3900 */
3901 smp_store_release(&tx_ring->next_to_clean, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003903 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3904
Auke Kok77b2aad2006-04-14 19:05:25 -07003905#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003906 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003907 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3908 /* Make sure that anybody stopping the queue after this
3909 * sees the new next_to_clean.
3910 */
3911 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003912
3913 if (netif_queue_stopped(netdev) &&
3914 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003915 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003916 ++adapter->restart_queue;
3917 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003918 }
Malli Chilakala26483452005-04-28 19:44:46 -07003919
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003920 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003921 /* Detect a transmit hang in hardware, this serializes the
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003922 * check with the clearing of time_stamp and movement of i
3923 */
Joe Perchesc3033b02008-03-21 11:06:25 -07003924 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003925 if (tx_ring->buffer_info[eop].time_stamp &&
3926 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003927 (adapter->tx_timeout_factor * HZ)) &&
Joe Perches8e95a202009-12-03 07:58:21 +00003928 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003929
3930 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003931 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003932 " Tx Queue <%lu>\n"
3933 " TDH <%x>\n"
3934 " TDT <%x>\n"
3935 " next_to_use <%x>\n"
3936 " next_to_clean <%x>\n"
3937 "buffer_info[next_to_clean]\n"
3938 " time_stamp <%lx>\n"
3939 " next_to_watch <%x>\n"
3940 " jiffies <%lx>\n"
3941 " next_to_watch.status <%x>\n",
Hong Zhiguo49a45a02013-10-22 18:32:56 +00003942 (unsigned long)(tx_ring - adapter->tx_ring),
Joe Perches1dc32912008-07-11 15:17:08 -07003943 readl(hw->hw_addr + tx_ring->tdh),
3944 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003945 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003946 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003947 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003948 eop,
3949 jiffies,
3950 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003951 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003953 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003955 adapter->total_tx_bytes += total_tx_bytes;
3956 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003957 netdev->stats.tx_bytes += total_tx_bytes;
3958 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003959 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960}
3961
3962/**
3963 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003964 * @adapter: board private structure
3965 * @status_err: receive descriptor status and error fields
3966 * @csum: receive descriptor csum field
3967 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 **/
Joe Perches64798842008-07-11 15:17:02 -07003969static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3970 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003971{
Joe Perches1dc32912008-07-11 15:17:08 -07003972 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003973 u16 status = (u16)status_err;
3974 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003975
3976 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003977
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 /* 82543 or newer only */
Janusz Wolaka48954c2015-09-17 23:34:29 +02003979 if (unlikely(hw->mac_type < e1000_82543))
3980 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981 /* Ignore Checksum bit is set */
Janusz Wolaka48954c2015-09-17 23:34:29 +02003982 if (unlikely(status & E1000_RXD_STAT_IXSM))
3983 return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003984 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003985 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003986 /* let the stack verify checksum errors */
3987 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 return;
3989 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003990 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003991 if (!(status & E1000_RXD_STAT_TCPCS))
3992 return;
3993
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003994 /* It must be a TCP or UDP packet with a valid checksum */
3995 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996 /* TCP checksum is good */
3997 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003999 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000}
4001
4002/**
Florian Westphal13809602014-09-03 13:34:36 +00004003 * e1000_consume_page - helper function for jumbo Rx path
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004004 **/
Florian Westphal93f0afe2014-09-03 13:34:26 +00004005static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004006 u16 length)
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004007{
Florian Westphal13809602014-09-03 13:34:36 +00004008 bi->rxbuf.page = NULL;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004009 skb->len += length;
4010 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00004011 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004012}
4013
4014/**
4015 * e1000_receive_skb - helper function to handle rx indications
4016 * @adapter: board private structure
4017 * @status: descriptor status field as written by hardware
4018 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4019 * @skb: pointer to sk_buff to be indicated to stack
4020 */
4021static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4022 __le16 vlan, struct sk_buff *skb)
4023{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00004024 skb->protocol = eth_type_trans(skb, adapter->netdev);
4025
Jiri Pirko5622e402011-07-21 03:26:31 +00004026 if (status & E1000_RXD_STAT_VP) {
4027 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4028
Patrick McHardy86a9bad2013-04-19 02:04:30 +00004029 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Jiri Pirko5622e402011-07-21 03:26:31 +00004030 }
4031 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004032}
4033
4034/**
Florian Westphal4f0aeb12014-09-03 13:34:10 +00004035 * e1000_tbi_adjust_stats
4036 * @hw: Struct containing variables accessed by shared code
4037 * @frame_len: The length of the frame in question
4038 * @mac_addr: The Ethernet destination address of the frame in question
4039 *
4040 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4041 */
4042static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4043 struct e1000_hw_stats *stats,
4044 u32 frame_len, const u8 *mac_addr)
4045{
4046 u64 carry_bit;
4047
4048 /* First adjust the frame length. */
4049 frame_len--;
4050 /* We need to adjust the statistics counters, since the hardware
4051 * counters overcount this packet as a CRC error and undercount
4052 * the packet as a good packet
4053 */
4054 /* This packet should not be counted as a CRC error. */
4055 stats->crcerrs--;
4056 /* This packet does count as a Good Packet Received. */
4057 stats->gprc++;
4058
4059 /* Adjust the Good Octets received counters */
4060 carry_bit = 0x80000000 & stats->gorcl;
4061 stats->gorcl += frame_len;
4062 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4063 * Received Count) was one before the addition,
4064 * AND it is zero after, then we lost the carry out,
4065 * need to add one to Gorch (Good Octets Received Count High).
4066 * This could be simplified if all environments supported
4067 * 64-bit integers.
4068 */
4069 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4070 stats->gorch++;
4071 /* Is this a broadcast or multicast? Check broadcast first,
4072 * since the test for a multicast frame will test positive on
4073 * a broadcast frame.
4074 */
4075 if (is_broadcast_ether_addr(mac_addr))
4076 stats->bprc++;
4077 else if (is_multicast_ether_addr(mac_addr))
4078 stats->mprc++;
4079
4080 if (frame_len == hw->max_frame_size) {
4081 /* In this case, the hardware has overcounted the number of
4082 * oversize frames.
4083 */
4084 if (stats->roc > 0)
4085 stats->roc--;
4086 }
4087
4088 /* Adjust the bin counters when the extra byte put the frame in the
4089 * wrong bin. Remember that the frame_len was adjusted above.
4090 */
4091 if (frame_len == 64) {
4092 stats->prc64++;
4093 stats->prc127--;
4094 } else if (frame_len == 127) {
4095 stats->prc127++;
4096 stats->prc255--;
4097 } else if (frame_len == 255) {
4098 stats->prc255++;
4099 stats->prc511--;
4100 } else if (frame_len == 511) {
4101 stats->prc511++;
4102 stats->prc1023--;
4103 } else if (frame_len == 1023) {
4104 stats->prc1023++;
4105 stats->prc1522--;
4106 } else if (frame_len == 1522) {
4107 stats->prc1522++;
4108 }
4109}
4110
Florian Westphal20371102014-09-03 13:34:15 +00004111static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4112 u8 status, u8 errors,
4113 u32 length, const u8 *data)
4114{
4115 struct e1000_hw *hw = &adapter->hw;
4116 u8 last_byte = *(data + length - 1);
4117
4118 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4119 unsigned long irq_flags;
4120
4121 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4122 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4123 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4124
4125 return true;
4126 }
4127
4128 return false;
4129}
4130
Florian Westphal2b294b12014-09-03 13:34:21 +00004131static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4132 unsigned int bufsz)
4133{
Alexander Duyck67fd8932014-12-09 19:40:56 -08004134 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
Florian Westphal2b294b12014-09-03 13:34:21 +00004135
4136 if (unlikely(!skb))
4137 adapter->alloc_rx_buff_failed++;
4138 return skb;
4139}
4140
Florian Westphal4f0aeb12014-09-03 13:34:10 +00004141/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004142 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4143 * @adapter: board private structure
4144 * @rx_ring: ring to clean
4145 * @work_done: amount of napi work completed this call
4146 * @work_to_do: max amount of work allowed for this call to do
4147 *
4148 * the return value indicates whether actual cleaning was done, there
4149 * is no guarantee that everything was cleaned
4150 */
4151static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4152 struct e1000_rx_ring *rx_ring,
4153 int *work_done, int work_to_do)
4154{
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004155 struct net_device *netdev = adapter->netdev;
4156 struct pci_dev *pdev = adapter->pdev;
4157 struct e1000_rx_desc *rx_desc, *next_rxd;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004158 struct e1000_rx_buffer *buffer_info, *next_buffer;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004159 u32 length;
4160 unsigned int i;
4161 int cleaned_count = 0;
4162 bool cleaned = false;
Janusz Wolaka48954c2015-09-17 23:34:29 +02004163 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004164
4165 i = rx_ring->next_to_clean;
4166 rx_desc = E1000_RX_DESC(*rx_ring, i);
4167 buffer_info = &rx_ring->buffer_info[i];
4168
4169 while (rx_desc->status & E1000_RXD_STAT_DD) {
4170 struct sk_buff *skb;
4171 u8 status;
4172
4173 if (*work_done >= work_to_do)
4174 break;
4175 (*work_done)++;
Alexander Duyck837a1db2015-04-07 16:55:27 -07004176 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004177
4178 status = rx_desc->status;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004179
Janusz Wolaka48954c2015-09-17 23:34:29 +02004180 if (++i == rx_ring->count)
4181 i = 0;
4182
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004183 next_rxd = E1000_RX_DESC(*rx_ring, i);
4184 prefetch(next_rxd);
4185
4186 next_buffer = &rx_ring->buffer_info[i];
4187
4188 cleaned = true;
4189 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004190 dma_unmap_page(&pdev->dev, buffer_info->dma,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004191 adapter->rx_buffer_len, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004192 buffer_info->dma = 0;
4193
4194 length = le16_to_cpu(rx_desc->length);
4195
4196 /* errors is only valid for DD + EOP descriptors */
4197 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4198 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
Florian Westphal13809602014-09-03 13:34:36 +00004199 u8 *mapped = page_address(buffer_info->rxbuf.page);
Sebastian Andrzej Siewiora3060852012-05-11 16:30:46 +00004200
Florian Westphal20371102014-09-03 13:34:15 +00004201 if (e1000_tbi_should_accept(adapter, status,
4202 rx_desc->errors,
4203 length, mapped)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004204 length--;
Florian Westphal20371102014-09-03 13:34:15 +00004205 } else if (netdev->features & NETIF_F_RXALL) {
4206 goto process_skb;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004207 } else {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004208 /* an error means any chain goes out the window
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004209 * too
4210 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004211 if (rx_ring->rx_skb_top)
4212 dev_kfree_skb(rx_ring->rx_skb_top);
4213 rx_ring->rx_skb_top = NULL;
4214 goto next_desc;
4215 }
4216 }
4217
4218#define rxtop rx_ring->rx_skb_top
Ben Greeare825b732012-04-04 06:01:29 +00004219process_skb:
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004220 if (!(status & E1000_RXD_STAT_EOP)) {
4221 /* this descriptor is only the beginning (or middle) */
4222 if (!rxtop) {
4223 /* this is the beginning of a chain */
Florian Westphalde591c72014-09-03 13:34:42 +00004224 rxtop = napi_get_frags(&adapter->napi);
Florian Westphal13809602014-09-03 13:34:36 +00004225 if (!rxtop)
4226 break;
4227
4228 skb_fill_page_desc(rxtop, 0,
4229 buffer_info->rxbuf.page,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004230 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004231 } else {
4232 /* this is the middle of a chain */
4233 skb_fill_page_desc(rxtop,
4234 skb_shinfo(rxtop)->nr_frags,
Florian Westphal13809602014-09-03 13:34:36 +00004235 buffer_info->rxbuf.page, 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004236 }
4237 e1000_consume_page(buffer_info, rxtop, length);
4238 goto next_desc;
4239 } else {
4240 if (rxtop) {
4241 /* end of the chain */
4242 skb_fill_page_desc(rxtop,
4243 skb_shinfo(rxtop)->nr_frags,
Florian Westphal13809602014-09-03 13:34:36 +00004244 buffer_info->rxbuf.page, 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004245 skb = rxtop;
4246 rxtop = NULL;
4247 e1000_consume_page(buffer_info, skb, length);
4248 } else {
Florian Westphal13809602014-09-03 13:34:36 +00004249 struct page *p;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004250 /* no chain, got EOP, this buf is the packet
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004251 * copybreak to save the put_page/alloc_page
4252 */
Florian Westphal13809602014-09-03 13:34:36 +00004253 p = buffer_info->rxbuf.page;
Florian Westphalde591c72014-09-03 13:34:42 +00004254 if (length <= copybreak) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004255 u8 *vaddr;
Florian Westphal13809602014-09-03 13:34:36 +00004256
Florian Westphalde591c72014-09-03 13:34:42 +00004257 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4258 length -= 4;
4259 skb = e1000_alloc_rx_skb(adapter,
4260 length);
4261 if (!skb)
4262 break;
4263
Florian Westphal13809602014-09-03 13:34:36 +00004264 vaddr = kmap_atomic(p);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004265 memcpy(skb_tail_pointer(skb), vaddr,
4266 length);
Cong Wang46790262011-11-25 23:14:23 +08004267 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004268 /* re-use the page, so don't erase
Florian Westphal13809602014-09-03 13:34:36 +00004269 * buffer_info->rxbuf.page
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004270 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004271 skb_put(skb, length);
Florian Westphalde591c72014-09-03 13:34:42 +00004272 e1000_rx_checksum(adapter,
4273 status | rx_desc->errors << 24,
4274 le16_to_cpu(rx_desc->csum), skb);
4275
4276 total_rx_bytes += skb->len;
4277 total_rx_packets++;
4278
4279 e1000_receive_skb(adapter, status,
4280 rx_desc->special, skb);
4281 goto next_desc;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004282 } else {
Florian Westphalde591c72014-09-03 13:34:42 +00004283 skb = napi_get_frags(&adapter->napi);
4284 if (!skb) {
4285 adapter->alloc_rx_buff_failed++;
4286 break;
4287 }
Florian Westphal13809602014-09-03 13:34:36 +00004288 skb_fill_page_desc(skb, 0, p, 0,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004289 length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004290 e1000_consume_page(buffer_info, skb,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004291 length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004292 }
4293 }
4294 }
4295
4296 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4297 e1000_rx_checksum(adapter,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004298 (u32)(status) |
4299 ((u32)(rx_desc->errors) << 24),
4300 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004301
Ben Greearb0d15622012-02-11 15:40:11 +00004302 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4303 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4304 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004305 total_rx_packets++;
4306
Florian Westphalde591c72014-09-03 13:34:42 +00004307 if (status & E1000_RXD_STAT_VP) {
4308 __le16 vlan = rx_desc->special;
4309 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4310
4311 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004312 }
4313
Florian Westphalde591c72014-09-03 13:34:42 +00004314 napi_gro_frags(&adapter->napi);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004315
4316next_desc:
4317 rx_desc->status = 0;
4318
4319 /* return some buffers to hardware, one at a time is too slow */
4320 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4321 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4322 cleaned_count = 0;
4323 }
4324
4325 /* use prefetched values */
4326 rx_desc = next_rxd;
4327 buffer_info = next_buffer;
4328 }
4329 rx_ring->next_to_clean = i;
4330
4331 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4332 if (cleaned_count)
4333 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4334
4335 adapter->total_rx_packets += total_rx_packets;
4336 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004337 netdev->stats.rx_bytes += total_rx_bytes;
4338 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004339 return cleaned;
4340}
4341
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004342/* this should improve performance for small packets with large amounts
Joe Perches57bf6ee2010-05-13 15:26:17 +00004343 * of reassembly being done in the stack
4344 */
Florian Westphal2b294b12014-09-03 13:34:21 +00004345static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004346 struct e1000_rx_buffer *buffer_info,
Florian Westphal2b294b12014-09-03 13:34:21 +00004347 u32 length, const void *data)
Joe Perches57bf6ee2010-05-13 15:26:17 +00004348{
Florian Westphal2b294b12014-09-03 13:34:21 +00004349 struct sk_buff *skb;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004350
4351 if (length > copybreak)
Florian Westphal2b294b12014-09-03 13:34:21 +00004352 return NULL;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004353
Florian Westphal2b294b12014-09-03 13:34:21 +00004354 skb = e1000_alloc_rx_skb(adapter, length);
4355 if (!skb)
4356 return NULL;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004357
Florian Westphal2b294b12014-09-03 13:34:21 +00004358 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4359 length, DMA_FROM_DEVICE);
4360
Johannes Berg59ae1d12017-06-16 14:29:20 +02004361 skb_put_data(skb, data, length);
Florian Westphal2b294b12014-09-03 13:34:21 +00004362
4363 return skb;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004364}
4365
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004366/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004367 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004369 * @rx_ring: ring to clean
4370 * @work_done: amount of napi work completed this call
4371 * @work_to_do: max amount of work allowed for this call to do
4372 */
Joe Perches64798842008-07-11 15:17:02 -07004373static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4374 struct e1000_rx_ring *rx_ring,
4375 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004377 struct net_device *netdev = adapter->netdev;
4378 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004379 struct e1000_rx_desc *rx_desc, *next_rxd;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004380 struct e1000_rx_buffer *buffer_info, *next_buffer;
Joe Perches406874a2008-04-03 10:06:32 -07004381 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004383 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004384 bool cleaned = false;
Janusz Wolaka48954c2015-09-17 23:34:29 +02004385 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386
4387 i = rx_ring->next_to_clean;
4388 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004389 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004391 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004392 struct sk_buff *skb;
Florian Westphal13809602014-09-03 13:34:36 +00004393 u8 *data;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004394 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004395
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004396 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004397 break;
4398 (*work_done)++;
Alexander Duyck837a1db2015-04-07 16:55:27 -07004399 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004400
Jeff Kirshera292ca62006-01-12 16:51:30 -08004401 status = rx_desc->status;
Florian Westphal2b294b12014-09-03 13:34:21 +00004402 length = le16_to_cpu(rx_desc->length);
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004403
Florian Westphal13809602014-09-03 13:34:36 +00004404 data = buffer_info->rxbuf.data;
4405 prefetch(data);
4406 skb = e1000_copybreak(adapter, buffer_info, length, data);
Florian Westphal2b294b12014-09-03 13:34:21 +00004407 if (!skb) {
Florian Westphal13809602014-09-03 13:34:36 +00004408 unsigned int frag_len = e1000_frag_len(adapter);
4409
4410 skb = build_skb(data - E1000_HEADROOM, frag_len);
4411 if (!skb) {
4412 adapter->alloc_rx_buff_failed++;
4413 break;
4414 }
4415
4416 skb_reserve(skb, E1000_HEADROOM);
Florian Westphal2b294b12014-09-03 13:34:21 +00004417 dma_unmap_single(&pdev->dev, buffer_info->dma,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004418 adapter->rx_buffer_len,
4419 DMA_FROM_DEVICE);
Florian Westphal2b294b12014-09-03 13:34:21 +00004420 buffer_info->dma = 0;
Florian Westphal13809602014-09-03 13:34:36 +00004421 buffer_info->rxbuf.data = NULL;
Florian Westphal2b294b12014-09-03 13:34:21 +00004422 }
Jeff Kirsher30320be2006-03-02 18:21:57 -08004423
Janusz Wolaka48954c2015-09-17 23:34:29 +02004424 if (++i == rx_ring->count)
4425 i = 0;
4426
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004427 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004428 prefetch(next_rxd);
4429
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004430 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004431
Joe Perchesc3033b02008-03-21 11:06:25 -07004432 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004433 cleaned_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434
Neil Hormanea30e112009-06-02 01:29:58 -07004435 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004436 * packet, if thats the case we need to toss it. In fact, we
4437 * to toss every packet with the EOP bit clear and the next
4438 * frame that _does_ have the EOP bit set, as it is by
4439 * definition only a frame fragment
4440 */
4441 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4442 adapter->discarding = true;
4443
4444 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004445 /* All receives must fit into a single buffer */
Florian Westphal20371102014-09-03 13:34:15 +00004446 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
Florian Westphal2b294b12014-09-03 13:34:21 +00004447 dev_kfree_skb(skb);
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004448 if (status & E1000_RXD_STAT_EOP)
4449 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 goto next_desc;
4451 }
4452
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004453 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Florian Westphal20371102014-09-03 13:34:15 +00004454 if (e1000_tbi_should_accept(adapter, status,
4455 rx_desc->errors,
Florian Westphal13809602014-09-03 13:34:36 +00004456 length, data)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 length--;
Florian Westphal20371102014-09-03 13:34:15 +00004458 } else if (netdev->features & NETIF_F_RXALL) {
4459 goto process_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 } else {
Florian Westphal2b294b12014-09-03 13:34:21 +00004461 dev_kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 goto next_desc;
4463 }
Auke Kok1cb58212006-04-18 12:31:04 -07004464 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465
Ben Greeare825b732012-04-04 06:01:29 +00004466process_skb:
Ben Greearb0d15622012-02-11 15:40:11 +00004467 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004468 total_rx_packets++;
4469
Ben Greearb0d15622012-02-11 15:40:11 +00004470 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4471 /* adjust length to remove Ethernet CRC, this must be
4472 * done after the TBI_ACCEPT workaround above
4473 */
4474 length -= 4;
4475
Florian Westphal13809602014-09-03 13:34:36 +00004476 if (buffer_info->rxbuf.data == NULL)
Florian Westphal2b294b12014-09-03 13:34:21 +00004477 skb_put(skb, length);
4478 else /* copybreak skb */
4479 skb_trim(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480
4481 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004482 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004483 (u32)(status) |
4484 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004485 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004486
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004487 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004488
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489next_desc:
4490 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004492 /* return some buffers to hardware, one at a time is too slow */
4493 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4494 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4495 cleaned_count = 0;
4496 }
4497
Jeff Kirsher30320be2006-03-02 18:21:57 -08004498 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004499 rx_desc = next_rxd;
4500 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004503
4504 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4505 if (cleaned_count)
4506 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004508 adapter->total_rx_packets += total_rx_packets;
4509 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004510 netdev->stats.rx_bytes += total_rx_bytes;
4511 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 return cleaned;
4513}
4514
4515/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004516 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4517 * @adapter: address of board private structure
4518 * @rx_ring: pointer to receive ring structure
4519 * @cleaned_count: number of buffers to allocate this pass
4520 **/
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004521static void
4522e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004523 struct e1000_rx_ring *rx_ring, int cleaned_count)
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004524{
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004525 struct pci_dev *pdev = adapter->pdev;
4526 struct e1000_rx_desc *rx_desc;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004527 struct e1000_rx_buffer *buffer_info;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004528 unsigned int i;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004529
4530 i = rx_ring->next_to_use;
4531 buffer_info = &rx_ring->buffer_info[i];
4532
4533 while (cleaned_count--) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004534 /* allocate a new page if necessary */
Florian Westphal13809602014-09-03 13:34:36 +00004535 if (!buffer_info->rxbuf.page) {
4536 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4537 if (unlikely(!buffer_info->rxbuf.page)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004538 adapter->alloc_rx_buff_failed++;
4539 break;
4540 }
4541 }
4542
Anton Blanchardb5abb022010-02-19 17:54:53 +00004543 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004544 buffer_info->dma = dma_map_page(&pdev->dev,
Florian Westphal13809602014-09-03 13:34:36 +00004545 buffer_info->rxbuf.page, 0,
4546 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004547 DMA_FROM_DEVICE);
4548 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Florian Westphal13809602014-09-03 13:34:36 +00004549 put_page(buffer_info->rxbuf.page);
4550 buffer_info->rxbuf.page = NULL;
Anton Blanchardb5abb022010-02-19 17:54:53 +00004551 buffer_info->dma = 0;
4552 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004553 break;
Anton Blanchardb5abb022010-02-19 17:54:53 +00004554 }
4555 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004556
4557 rx_desc = E1000_RX_DESC(*rx_ring, i);
4558 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4559
4560 if (unlikely(++i == rx_ring->count))
4561 i = 0;
4562 buffer_info = &rx_ring->buffer_info[i];
4563 }
4564
4565 if (likely(rx_ring->next_to_use != i)) {
4566 rx_ring->next_to_use = i;
4567 if (unlikely(i-- == 0))
4568 i = (rx_ring->count - 1);
4569
4570 /* Force memory writes to complete before letting h/w
4571 * know there are new descriptors to fetch. (Only
4572 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004573 * such as IA-64).
4574 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004575 wmb();
4576 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4577 }
4578}
4579
4580/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004581 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582 * @adapter: address of board private structure
4583 **/
Joe Perches64798842008-07-11 15:17:02 -07004584static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4585 struct e1000_rx_ring *rx_ring,
4586 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587{
Joe Perches1dc32912008-07-11 15:17:08 -07004588 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004589 struct pci_dev *pdev = adapter->pdev;
4590 struct e1000_rx_desc *rx_desc;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004591 struct e1000_rx_buffer *buffer_info;
Malli Chilakala26483452005-04-28 19:44:46 -07004592 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004593 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594
4595 i = rx_ring->next_to_use;
4596 buffer_info = &rx_ring->buffer_info[i];
4597
Jeff Kirshera292ca62006-01-12 16:51:30 -08004598 while (cleaned_count--) {
Florian Westphal13809602014-09-03 13:34:36 +00004599 void *data;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004600
Florian Westphal13809602014-09-03 13:34:36 +00004601 if (buffer_info->rxbuf.data)
4602 goto skip;
4603
4604 data = e1000_alloc_frag(adapter);
4605 if (!data) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004607 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 break;
4609 }
4610
Malli Chilakala26483452005-04-28 19:44:46 -07004611 /* Fix for errata 23, can't cross 64kB boundary */
Florian Westphal13809602014-09-03 13:34:36 +00004612 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4613 void *olddata = data;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004614 e_err(rx_err, "skb align check failed: %u bytes at "
Florian Westphal13809602014-09-03 13:34:36 +00004615 "%p\n", bufsz, data);
Malli Chilakala26483452005-04-28 19:44:46 -07004616 /* Try again, without freeing the previous */
Florian Westphal13809602014-09-03 13:34:36 +00004617 data = e1000_alloc_frag(adapter);
Malli Chilakala26483452005-04-28 19:44:46 -07004618 /* Failed allocation, critical failure */
Florian Westphal13809602014-09-03 13:34:36 +00004619 if (!data) {
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004620 skb_free_frag(olddata);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004621 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622 break;
4623 }
Malli Chilakala26483452005-04-28 19:44:46 -07004624
Florian Westphal13809602014-09-03 13:34:36 +00004625 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 /* give up */
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004627 skb_free_frag(data);
4628 skb_free_frag(olddata);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004629 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004630 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004632
4633 /* Use new allocation */
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004634 skb_free_frag(olddata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635 }
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004636 buffer_info->dma = dma_map_single(&pdev->dev,
Florian Westphal13809602014-09-03 13:34:36 +00004637 data,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004638 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004639 DMA_FROM_DEVICE);
4640 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004641 skb_free_frag(data);
Anton Blanchardb5abb022010-02-19 17:54:53 +00004642 buffer_info->dma = 0;
4643 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004644 break;
Anton Blanchardb5abb022010-02-19 17:54:53 +00004645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004646
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004647 /* XXX if it was allocated cleanly it will never map to a
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004648 * boundary crossing
4649 */
4650
Malli Chilakala26483452005-04-28 19:44:46 -07004651 /* Fix for errata 23, can't cross 64kB boundary */
4652 if (!e1000_check_64k_bound(adapter,
4653 (void *)(unsigned long)buffer_info->dma,
4654 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004655 e_err(rx_err, "dma align check failed: %u bytes at "
4656 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004657 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004659 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004660 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004661 DMA_FROM_DEVICE);
Florian Westphal13809602014-09-03 13:34:36 +00004662
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004663 skb_free_frag(data);
Florian Westphal13809602014-09-03 13:34:36 +00004664 buffer_info->rxbuf.data = NULL;
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004665 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004667 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004668 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004669 }
Florian Westphal13809602014-09-03 13:34:36 +00004670 buffer_info->rxbuf.data = data;
4671 skip:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004672 rx_desc = E1000_RX_DESC(*rx_ring, i);
4673 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4674
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004675 if (unlikely(++i == rx_ring->count))
4676 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677 buffer_info = &rx_ring->buffer_info[i];
4678 }
4679
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004680 if (likely(rx_ring->next_to_use != i)) {
4681 rx_ring->next_to_use = i;
4682 if (unlikely(i-- == 0))
4683 i = (rx_ring->count - 1);
4684
4685 /* Force memory writes to complete before letting h/w
4686 * know there are new descriptors to fetch. (Only
4687 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004688 * such as IA-64).
4689 */
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004690 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004691 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004692 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693}
4694
4695/**
4696 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4697 * @adapter:
4698 **/
Joe Perches64798842008-07-11 15:17:02 -07004699static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700{
Joe Perches1dc32912008-07-11 15:17:08 -07004701 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004702 u16 phy_status;
4703 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704
Joe Perches1dc32912008-07-11 15:17:08 -07004705 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4706 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707 return;
4708
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004709 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710 /* If Master/Slave config fault is asserted twice,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004711 * we assume back-to-back
4712 */
Joe Perches1dc32912008-07-11 15:17:08 -07004713 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Janusz Wolaka48954c2015-09-17 23:34:29 +02004714 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4715 return;
Joe Perches1dc32912008-07-11 15:17:08 -07004716 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Janusz Wolaka48954c2015-09-17 23:34:29 +02004717 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4718 return;
Joe Perches1dc32912008-07-11 15:17:08 -07004719 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004720 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004722 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 phy_ctrl);
4724 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004725 if (!e1000_phy_setup_autoneg(hw) &&
4726 !e1000_read_phy_reg(hw, PHY_CTRL,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004727 &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4729 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004730 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 phy_ctrl);
4732 }
4733 }
4734 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004735 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004737 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004739 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4740 if (!e1000_phy_setup_autoneg(hw) &&
4741 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4743 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004744 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745 }
4746 }
4747 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004748 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 adapter->smartspeed = 0;
4750}
4751
4752/**
4753 * e1000_ioctl -
4754 * @netdev:
4755 * @ifreq:
4756 * @cmd:
4757 **/
Joe Perches64798842008-07-11 15:17:02 -07004758static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759{
4760 switch (cmd) {
4761 case SIOCGMIIPHY:
4762 case SIOCGMIIREG:
4763 case SIOCSMIIREG:
4764 return e1000_mii_ioctl(netdev, ifr, cmd);
4765 default:
4766 return -EOPNOTSUPP;
4767 }
4768}
4769
4770/**
4771 * e1000_mii_ioctl -
4772 * @netdev:
4773 * @ifreq:
4774 * @cmd:
4775 **/
Joe Perches64798842008-07-11 15:17:02 -07004776static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4777 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004779 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004780 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 struct mii_ioctl_data *data = if_mii(ifr);
4782 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004783 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004784 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004785
Joe Perches1dc32912008-07-11 15:17:08 -07004786 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787 return -EOPNOTSUPP;
4788
4789 switch (cmd) {
4790 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004791 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 break;
4793 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004794 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004795 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004796 &data->val_out)) {
4797 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004799 }
4800 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801 break;
4802 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004803 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 return -EFAULT;
4805 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004806 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004807 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004808 mii_reg)) {
4809 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004810 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004811 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004812 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004813 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 switch (data->reg_num) {
4815 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004816 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004818 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004819 hw->autoneg = 1;
4820 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004821 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004822 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004824 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004826 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 else
David Decotigny14ad2512011-04-27 18:32:43 +00004828 speed = SPEED_10;
4829 retval = e1000_set_spd_dplx(
4830 adapter, speed,
4831 ((mii_reg & 0x100)
4832 ? DUPLEX_FULL :
4833 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004834 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835 return retval;
4836 }
Auke Kok2db10a02006-06-27 09:06:28 -07004837 if (netif_running(adapter->netdev))
4838 e1000_reinit_locked(adapter);
4839 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004840 e1000_reset(adapter);
4841 break;
4842 case M88E1000_PHY_SPEC_CTRL:
4843 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004844 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 return -EIO;
4846 break;
4847 }
4848 } else {
4849 switch (data->reg_num) {
4850 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004851 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004853 if (netif_running(adapter->netdev))
4854 e1000_reinit_locked(adapter);
4855 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004856 e1000_reset(adapter);
4857 break;
4858 }
4859 }
4860 break;
4861 default:
4862 return -EOPNOTSUPP;
4863 }
4864 return E1000_SUCCESS;
4865}
4866
Joe Perches64798842008-07-11 15:17:02 -07004867void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868{
4869 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004870 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004871
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004872 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004873 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874}
4875
Joe Perches64798842008-07-11 15:17:02 -07004876void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004877{
4878 struct e1000_adapter *adapter = hw->back;
4879
4880 pci_clear_mwi(adapter->pdev);
4881}
4882
Joe Perches64798842008-07-11 15:17:02 -07004883int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004884{
4885 struct e1000_adapter *adapter = hw->back;
4886 return pcix_get_mmrbc(adapter->pdev);
4887}
4888
Joe Perches64798842008-07-11 15:17:02 -07004889void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004890{
4891 struct e1000_adapter *adapter = hw->back;
4892 pcix_set_mmrbc(adapter->pdev, mmrbc);
4893}
4894
Joe Perches64798842008-07-11 15:17:02 -07004895void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004896{
4897 outl(value, port);
4898}
4899
Jiri Pirko5622e402011-07-21 03:26:31 +00004900static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004901{
Jiri Pirko5622e402011-07-21 03:26:31 +00004902 u16 vid;
4903
4904 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4905 return true;
4906 return false;
4907}
4908
Jiri Pirko52f55092012-03-20 18:10:01 +00004909static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4910 netdev_features_t features)
4911{
4912 struct e1000_hw *hw = &adapter->hw;
4913 u32 ctrl;
4914
4915 ctrl = er32(CTRL);
Patrick McHardyf6469682013-04-19 02:04:27 +00004916 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Jiri Pirko52f55092012-03-20 18:10:01 +00004917 /* enable VLAN tag insert/strip */
4918 ctrl |= E1000_CTRL_VME;
4919 } else {
4920 /* disable VLAN tag insert/strip */
4921 ctrl &= ~E1000_CTRL_VME;
4922 }
4923 ew32(CTRL, ctrl);
4924}
Jiri Pirko5622e402011-07-21 03:26:31 +00004925static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4926 bool filter_on)
4927{
Joe Perches1dc32912008-07-11 15:17:08 -07004928 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004929 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004930
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004931 if (!test_bit(__E1000_DOWN, &adapter->flags))
4932 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933
Jiri Pirko52f55092012-03-20 18:10:01 +00004934 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004935 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004936 /* enable VLAN receive filtering */
4937 rctl = er32(RCTL);
4938 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004939 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004940 rctl |= E1000_RCTL_VFE;
4941 ew32(RCTL, rctl);
4942 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004944 /* disable VLAN receive filtering */
4945 rctl = er32(RCTL);
4946 rctl &= ~E1000_RCTL_VFE;
4947 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004948 }
4949
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004950 if (!test_bit(__E1000_DOWN, &adapter->flags))
4951 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952}
4953
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004954static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004955 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004956{
4957 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004958
4959 if (!test_bit(__E1000_DOWN, &adapter->flags))
4960 e1000_irq_disable(adapter);
4961
Jiri Pirko52f55092012-03-20 18:10:01 +00004962 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004963
4964 if (!test_bit(__E1000_DOWN, &adapter->flags))
4965 e1000_irq_enable(adapter);
4966}
4967
Patrick McHardy80d5c362013-04-19 02:04:28 +00004968static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4969 __be16 proto, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004971 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004972 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004973 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004974
Joe Perches1dc32912008-07-11 15:17:08 -07004975 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004976 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4977 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004978 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004979
4980 if (!e1000_vlan_used(adapter))
4981 e1000_vlan_filter_on_off(adapter, true);
4982
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983 /* add VID to filter table */
4984 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004985 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004987 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004988
4989 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05004990
4991 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992}
4993
Patrick McHardy80d5c362013-04-19 02:04:28 +00004994static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4995 __be16 proto, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004997 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004998 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004999 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000
Jesse Brandeburg9150b762008-03-21 11:06:58 -07005001 if (!test_bit(__E1000_DOWN, &adapter->flags))
5002 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07005003 if (!test_bit(__E1000_DOWN, &adapter->flags))
5004 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005005
5006 /* remove VID from filter table */
5007 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07005008 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07005010 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00005011
5012 clear_bit(vid, adapter->active_vlans);
5013
5014 if (!e1000_vlan_used(adapter))
5015 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05005016
5017 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005018}
5019
Joe Perches64798842008-07-11 15:17:02 -07005020static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005021{
Jiri Pirko5622e402011-07-21 03:26:31 +00005022 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005023
Jiri Pirko5622e402011-07-21 03:26:31 +00005024 if (!e1000_vlan_used(adapter))
5025 return;
5026
5027 e1000_vlan_filter_on_off(adapter, true);
5028 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00005029 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005030}
5031
David Decotigny14ad2512011-04-27 18:32:43 +00005032int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033{
Joe Perches1dc32912008-07-11 15:17:08 -07005034 struct e1000_hw *hw = &adapter->hw;
5035
5036 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005037
David Decotigny14ad2512011-04-27 18:32:43 +00005038 /* Make sure dplx is at most 1 bit and lsb of speed is not set
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00005039 * for the switch() below to work
5040 */
David Decotigny14ad2512011-04-27 18:32:43 +00005041 if ((spd & 1) || (dplx & ~1))
5042 goto err_inval;
5043
Malli Chilakala69213682005-06-17 17:44:20 -07005044 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07005045 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00005046 spd != SPEED_1000 &&
5047 dplx != DUPLEX_FULL)
5048 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07005049
David Decotigny14ad2512011-04-27 18:32:43 +00005050 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005051 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07005052 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 break;
5054 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07005055 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 break;
5057 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07005058 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 break;
5060 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07005061 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005062 break;
5063 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07005064 hw->autoneg = 1;
5065 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 break;
5067 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5068 default:
David Decotigny14ad2512011-04-27 18:32:43 +00005069 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070 }
Jesse Brandeburgc819bbd52012-07-26 02:31:09 +00005071
5072 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5073 hw->mdix = AUTO_ALL_MODES;
5074
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00005076
5077err_inval:
5078 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5079 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080}
5081
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005082static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083{
5084 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005085 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005086 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005087 u32 ctrl, ctrl_ext, rctl, status;
5088 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07005089#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08005090 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07005091#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005092
5093 netif_device_detach(netdev);
5094
Auke Kok2db10a02006-06-27 09:06:28 -07005095 if (netif_running(netdev)) {
yzhu16a7d64e2013-11-23 07:07:40 +00005096 int count = E1000_CHECK_RESET_COUNT;
5097
5098 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5099 usleep_range(10000, 20000);
5100
Auke Kok2db10a02006-06-27 09:06:28 -07005101 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07005103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005105#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005106 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00005107 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005108 return retval;
5109#endif
5110
Joe Perches1dc32912008-07-11 15:17:08 -07005111 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005112 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 wufc &= ~E1000_WUFC_LNKC;
5114
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005115 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005116 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08005117 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005118
Dean Nelsonb8681792012-01-19 17:47:24 +00005119 rctl = er32(RCTL);
5120
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00005122 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00005124
5125 /* enable receives in the hardware */
5126 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127
Joe Perches1dc32912008-07-11 15:17:08 -07005128 if (hw->mac_type >= e1000_82540) {
5129 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005130 /* advertise wake from D3Cold */
5131 #define E1000_CTRL_ADVD3WUC 0x00100000
5132 /* phy power management enable */
5133 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5134 ctrl |= E1000_CTRL_ADVD3WUC |
5135 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07005136 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137 }
5138
Joe Perches1dc32912008-07-11 15:17:08 -07005139 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00005140 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07005142 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07005144 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 }
5146
Joe Perches1dc32912008-07-11 15:17:08 -07005147 ew32(WUC, E1000_WUC_PME_EN);
5148 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005149 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07005150 ew32(WUC, 0);
5151 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152 }
5153
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005154 e1000_release_manageability(adapter);
5155
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005156 *enable_wake = !!wufc;
5157
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005158 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005159 if (adapter->en_mng_pt)
5160 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161
Auke Kokedd106f2006-11-06 08:57:12 -08005162 if (netif_running(netdev))
5163 e1000_free_irq(adapter);
5164
Tushar Dave0b76aae2017-12-06 02:26:29 +05305165 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5166 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005167
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 return 0;
5169}
5170
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005171#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005172static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5173{
5174 int retval;
5175 bool wake;
5176
5177 retval = __e1000_shutdown(pdev, &wake);
5178 if (retval)
5179 return retval;
5180
5181 if (wake) {
5182 pci_prepare_to_sleep(pdev);
5183 } else {
5184 pci_wake_from_d3(pdev, false);
5185 pci_set_power_state(pdev, PCI_D3hot);
5186 }
5187
5188 return 0;
5189}
5190
Joe Perches64798842008-07-11 15:17:02 -07005191static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192{
5193 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005194 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005195 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005196 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197
Auke Kokd0e027d2006-04-14 19:04:40 -07005198 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005199 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005200 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005201
5202 if (adapter->need_ioport)
5203 err = pci_enable_device(pdev);
5204 else
5205 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005206 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005207 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005208 return err;
5209 }
Tushar Dave0b76aae2017-12-06 02:26:29 +05305210
5211 /* flush memory to make sure state is correct */
5212 smp_mb__before_atomic();
5213 clear_bit(__E1000_DISABLED, &adapter->flags);
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005214 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215
Auke Kokd0e027d2006-04-14 19:04:40 -07005216 pci_enable_wake(pdev, PCI_D3hot, 0);
5217 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218
Joe Perchesc7be73b2008-07-11 15:17:28 -07005219 if (netif_running(netdev)) {
5220 err = e1000_request_irq(adapter);
5221 if (err)
5222 return err;
5223 }
Auke Kokedd106f2006-11-06 08:57:12 -08005224
5225 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005227 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005229 e1000_init_manageability(adapter);
5230
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005231 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 e1000_up(adapter);
5233
5234 netif_device_attach(netdev);
5235
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 return 0;
5237}
5238#endif
Auke Kokc653e632006-05-23 13:35:57 -07005239
5240static void e1000_shutdown(struct pci_dev *pdev)
5241{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005242 bool wake;
5243
5244 __e1000_shutdown(pdev, &wake);
5245
5246 if (system_state == SYSTEM_POWER_OFF) {
5247 pci_wake_from_d3(pdev, wake);
5248 pci_set_power_state(pdev, PCI_D3hot);
5249 }
Auke Kokc653e632006-05-23 13:35:57 -07005250}
5251
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252#ifdef CONFIG_NET_POLL_CONTROLLER
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00005253/* Polling 'interrupt' - used by things like netconsole to send skbs
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254 * without having to re-enable interrupts. It's not called while
5255 * the interrupt routine is executing.
5256 */
Joe Perches64798842008-07-11 15:17:02 -07005257static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005259 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005260
WANG Cong31119122016-12-10 14:22:42 -08005261 if (disable_hardirq(adapter->pdev->irq))
5262 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 enable_irq(adapter->pdev->irq);
5264}
5265#endif
5266
Auke Kok90267292006-06-08 09:30:24 -07005267/**
5268 * e1000_io_error_detected - called when PCI error is detected
5269 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005270 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005271 *
5272 * This function is called after a PCI bus error affecting
5273 * this device has been detected.
5274 */
Joe Perches64798842008-07-11 15:17:02 -07005275static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5276 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005277{
5278 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005279 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005280
5281 netif_device_detach(netdev);
5282
Andre Detscheab63302009-06-30 12:46:13 +00005283 if (state == pci_channel_io_perm_failure)
5284 return PCI_ERS_RESULT_DISCONNECT;
5285
Auke Kok90267292006-06-08 09:30:24 -07005286 if (netif_running(netdev))
5287 e1000_down(adapter);
Tushar Dave0b76aae2017-12-06 02:26:29 +05305288
5289 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5290 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005291
5292 /* Request a slot slot reset. */
5293 return PCI_ERS_RESULT_NEED_RESET;
5294}
5295
5296/**
5297 * e1000_io_slot_reset - called after the pci bus has been reset.
5298 * @pdev: Pointer to PCI device
5299 *
5300 * Restart the card from scratch, as if from a cold-boot. Implementation
5301 * resembles the first-half of the e1000_resume routine.
5302 */
5303static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5304{
5305 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005306 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005307 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005308 int err;
Auke Kok90267292006-06-08 09:30:24 -07005309
Taku Izumi81250292008-07-11 15:17:44 -07005310 if (adapter->need_ioport)
5311 err = pci_enable_device(pdev);
5312 else
5313 err = pci_enable_device_mem(pdev);
5314 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005315 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005316 return PCI_ERS_RESULT_DISCONNECT;
5317 }
Tushar Dave0b76aae2017-12-06 02:26:29 +05305318
5319 /* flush memory to make sure state is correct */
5320 smp_mb__before_atomic();
5321 clear_bit(__E1000_DISABLED, &adapter->flags);
Auke Kok90267292006-06-08 09:30:24 -07005322 pci_set_master(pdev);
5323
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005324 pci_enable_wake(pdev, PCI_D3hot, 0);
5325 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005326
Auke Kok90267292006-06-08 09:30:24 -07005327 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005328 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005329
5330 return PCI_ERS_RESULT_RECOVERED;
5331}
5332
5333/**
5334 * e1000_io_resume - called when traffic can start flowing again.
5335 * @pdev: Pointer to PCI device
5336 *
5337 * This callback is called when the error recovery driver tells us that
5338 * its OK to resume normal operation. Implementation resembles the
5339 * second-half of the e1000_resume routine.
5340 */
5341static void e1000_io_resume(struct pci_dev *pdev)
5342{
5343 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005344 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005345
5346 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005347
5348 if (netif_running(netdev)) {
5349 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005350 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005351 return;
5352 }
5353 }
5354
5355 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005356}
5357
Linus Torvalds1da177e2005-04-16 15:20:36 -07005358/* e1000_main.c */