blob: dd112aa5cebbe1097a777235447bcf08e9305b9f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
Auke Kok0abb6eb2006-09-27 12:53:14 -07003 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 more details.
Auke Kok0abb6eb2006-09-27 12:53:14 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 You should have received a copy of the GNU General Public License along with
Auke Kok0abb6eb2006-09-27 12:53:14 -070016 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 Contact Information:
23 Linux NICS <linux.nics@intel.com>
Auke Kok3d41e302006-04-14 19:05:31 -070024 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "e1000.h"
Andrew Mortond0bb53e2006-11-14 10:35:03 -050030#include <net/ip6_checksum.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000031#include <linux/io.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040032#include <linux/prefetch.h>
Jiri Pirko5622e402011-07-21 03:26:31 +000033#include <linux/bitops.h>
34#include <linux/if_vlan.h>
Dirk Brandewie5377a412011-01-06 14:29:54 +000035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036char e1000_driver_name[] = "e1000";
Adrian Bunk3ad2cc62005-10-30 16:53:34 +010037static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
Anupam Chandaab088532010-11-21 09:54:21 -080038#define DRV_VERSION "7.3.21-k8-NAPI"
Stephen Hemmingerabec42a2007-10-29 10:46:19 -070039const char e1000_driver_version[] = DRV_VERSION;
40static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id e1000_pci_tbl[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
Malli Chilakala26483452005-04-28 19:44:46 -070069 INTEL_E1000_ETHERNET_DEVICE(0x101A),
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080084 INTEL_E1000_ETHERNET_DEVICE(0x1099),
Jeff Kirsherb7ee49d2006-01-12 16:51:21 -080085 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
Dirk Brandewie5377a412011-01-06 14:29:54 +000086 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /* required last entry */
88 {0,}
89};
90
91MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
Nicholas Nunley35574762006-09-27 12:53:34 -070093int e1000_up(struct e1000_adapter *adapter);
94void e1000_down(struct e1000_adapter *adapter);
95void e1000_reinit_locked(struct e1000_adapter *adapter);
96void e1000_reset(struct e1000_adapter *adapter);
Nicholas Nunley35574762006-09-27 12:53:34 -070097int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100101static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200102 struct e1000_tx_ring *txdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100103static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200104 struct e1000_rx_ring *rxdr);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100105static void e1000_free_tx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200106 struct e1000_tx_ring *tx_ring);
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100107static void e1000_free_rx_resources(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200108 struct e1000_rx_ring *rx_ring);
Nicholas Nunley35574762006-09-27 12:53:34 -0700109void e1000_update_stats(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
111static int e1000_init_module(void);
112static void e1000_exit_module(void);
113static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500114static void e1000_remove(struct pci_dev *pdev);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400115static int e1000_alloc_queues(struct e1000_adapter *adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116static int e1000_sw_init(struct e1000_adapter *adapter);
Stefan Assmann1f2f83f2016-02-03 09:20:51 +0100117int e1000_open(struct net_device *netdev);
118int e1000_close(struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static void e1000_configure_tx(struct e1000_adapter *adapter);
120static void e1000_configure_rx(struct e1000_adapter *adapter);
121static void e1000_setup_rctl(struct e1000_adapter *adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400122static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200125 struct e1000_tx_ring *tx_ring);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400126static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200127 struct e1000_rx_ring *rx_ring);
Patrick McHardydb0ce502007-11-13 20:54:59 -0800128static void e1000_set_rx_mode(struct net_device *netdev);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000129static void e1000_update_phy_info_task(struct work_struct *work);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000130static void e1000_watchdog(struct work_struct *work);
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +0000131static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
Stephen Hemminger3b29a562009-08-31 19:50:55 +0000132static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
Janusz Wolaka48954c2015-09-17 23:34:29 +0200134static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136static int e1000_set_mac(struct net_device *netdev, void *p);
David Howells7d12e782006-10-05 14:55:46 +0100137static irqreturn_t e1000_intr(int irq, void *data);
Joe Perchesc3033b02008-03-21 11:06:25 -0700138static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700140static int e1000_clean(struct napi_struct *napi, int budget);
Joe Perchesc3033b02008-03-21 11:06:25 -0700141static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000144static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
Sabrina Dubroca08e83312015-02-26 05:35:41 +0000147static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
149 int cleaned_count)
150{
151}
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400152static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000153 struct e1000_rx_ring *rx_ring,
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800154 int cleaned_count);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000155static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
157 int cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160 int cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163static void e1000_tx_timeout(struct net_device *dev);
David Howells65f27f32006-11-22 14:55:48 +0000164static void e1000_reset_task(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165static void e1000_smartspeed(struct e1000_adapter *adapter);
Auke Koke619d522006-04-14 19:04:52 -0700166static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200167 struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Jiri Pirko5622e402011-07-21 03:26:31 +0000169static bool e1000_vlan_used(struct e1000_adapter *adapter);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000170static void e1000_vlan_mode(struct net_device *netdev,
171 netdev_features_t features);
Jiri Pirko52f55092012-03-20 18:10:01 +0000172static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
173 bool filter_on);
Patrick McHardy80d5c362013-04-19 02:04:28 +0000174static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175 __be16 proto, u16 vid);
176static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177 __be16 proto, u16 vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178static void e1000_restore_vlan(struct e1000_adapter *adapter);
179
Auke Kok6fdfef12006-06-27 09:06:36 -0700180#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +0000181static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182static int e1000_resume(struct pci_dev *pdev);
183#endif
Auke Kokc653e632006-05-23 13:35:57 -0700184static void e1000_shutdown(struct pci_dev *pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186#ifdef CONFIG_NET_POLL_CONTROLLER
187/* for netdump / net console */
188static void e1000_netpoll (struct net_device *netdev);
189#endif
190
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100191#define COPYBREAK_DEFAULT 256
192static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193module_param(copybreak, uint, 0644);
194MODULE_PARM_DESC(copybreak,
195 "Maximum size of packet that is copied to a new buffer on receive");
196
Auke Kok90267292006-06-08 09:30:24 -0700197static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200198 pci_channel_state_t state);
Auke Kok90267292006-06-08 09:30:24 -0700199static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200static void e1000_io_resume(struct pci_dev *pdev);
201
Stephen Hemminger3646f0e2012-09-07 09:33:15 -0700202static const struct pci_error_handlers e1000_err_handler = {
Auke Kok90267292006-06-08 09:30:24 -0700203 .error_detected = e1000_io_error_detected,
204 .slot_reset = e1000_io_slot_reset,
205 .resume = e1000_io_resume,
206};
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -0400207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208static struct pci_driver e1000_driver = {
209 .name = e1000_driver_name,
210 .id_table = e1000_pci_tbl,
211 .probe = e1000_probe,
Bill Pemberton9f9a12f2012-12-03 09:24:25 -0500212 .remove = e1000_remove,
Auke Kokc4e24f02006-09-27 12:53:19 -0700213#ifdef CONFIG_PM
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300214 /* Power Management Hooks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 .suspend = e1000_suspend,
Auke Kokc653e632006-05-23 13:35:57 -0700216 .resume = e1000_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217#endif
Auke Kok90267292006-06-08 09:30:24 -0700218 .shutdown = e1000_shutdown,
219 .err_handler = &e1000_err_handler
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220};
221
222MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224MODULE_LICENSE("GPL");
225MODULE_VERSION(DRV_VERSION);
226
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000227#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228static int debug = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229module_param(debug, int, 0);
230MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
231
232/**
Emil Tantilov675ad472010-04-27 14:02:58 +0000233 * e1000_get_hw_dev - return device
234 * used by hardware layer to print debugging information
235 *
236 **/
237struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
238{
239 struct e1000_adapter *adapter = hw->back;
240 return adapter->netdev;
241}
242
243/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 * e1000_init_module - Driver Registration Routine
245 *
246 * e1000_init_module is the first routine called when the driver is
247 * loaded. All it does is register with the PCI subsystem.
248 **/
Joe Perches64798842008-07-11 15:17:02 -0700249static int __init e1000_init_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
251 int ret;
Emil Tantilov675ad472010-04-27 14:02:58 +0000252 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Emil Tantilov675ad472010-04-27 14:02:58 +0000254 pr_info("%s\n", e1000_copyright);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Jeff Garzik29917622006-08-19 17:48:59 -0400256 ret = pci_register_driver(&e1000_driver);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100257 if (copybreak != COPYBREAK_DEFAULT) {
258 if (copybreak == 0)
Emil Tantilov675ad472010-04-27 14:02:58 +0000259 pr_info("copybreak disabled\n");
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100260 else
Emil Tantilov675ad472010-04-27 14:02:58 +0000261 pr_info("copybreak enabled for "
262 "packets <= %u bytes\n", copybreak);
Jesse Brandeburg1f753862006-12-15 10:40:39 +0100263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 return ret;
265}
266
267module_init(e1000_init_module);
268
269/**
270 * e1000_exit_module - Driver Exit Cleanup Routine
271 *
272 * e1000_exit_module is called just before the driver is removed
273 * from memory.
274 **/
Joe Perches64798842008-07-11 15:17:02 -0700275static void __exit e1000_exit_module(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 pci_unregister_driver(&e1000_driver);
278}
279
280module_exit(e1000_exit_module);
281
Auke Kok2db10a02006-06-27 09:06:28 -0700282static int e1000_request_irq(struct e1000_adapter *adapter)
283{
284 struct net_device *netdev = adapter->netdev;
Al Viro3e188262007-12-11 19:49:39 +0000285 irq_handler_t handler = e1000_intr;
Auke Koke94bd232007-05-16 01:49:46 -0700286 int irq_flags = IRQF_SHARED;
287 int err;
Auke Kok2db10a02006-06-27 09:06:28 -0700288
Auke Koke94bd232007-05-16 01:49:46 -0700289 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
Janusz Wolaka48954c2015-09-17 23:34:29 +0200290 netdev);
Auke Koke94bd232007-05-16 01:49:46 -0700291 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700292 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
Auke Koke94bd232007-05-16 01:49:46 -0700293 }
Auke Kok2db10a02006-06-27 09:06:28 -0700294
295 return err;
296}
297
298static void e1000_free_irq(struct e1000_adapter *adapter)
299{
300 struct net_device *netdev = adapter->netdev;
301
302 free_irq(adapter->pdev->irq, netdev);
Auke Kok2db10a02006-06-27 09:06:28 -0700303}
304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305/**
306 * e1000_irq_disable - Mask off interrupt generation on the NIC
307 * @adapter: board private structure
308 **/
Joe Perches64798842008-07-11 15:17:02 -0700309static void e1000_irq_disable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
Joe Perches1dc32912008-07-11 15:17:08 -0700311 struct e1000_hw *hw = &adapter->hw;
312
313 ew32(IMC, ~0);
314 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 synchronize_irq(adapter->pdev->irq);
316}
317
318/**
319 * e1000_irq_enable - Enable default interrupt generation settings
320 * @adapter: board private structure
321 **/
Joe Perches64798842008-07-11 15:17:02 -0700322static void e1000_irq_enable(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323{
Joe Perches1dc32912008-07-11 15:17:08 -0700324 struct e1000_hw *hw = &adapter->hw;
325
326 ew32(IMS, IMS_ENABLE_MASK);
327 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
Adrian Bunk3ad2cc62005-10-30 16:53:34 +0100329
Joe Perches64798842008-07-11 15:17:02 -0700330static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700331{
Joe Perches1dc32912008-07-11 15:17:08 -0700332 struct e1000_hw *hw = &adapter->hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700333 struct net_device *netdev = adapter->netdev;
Joe Perches1dc32912008-07-11 15:17:08 -0700334 u16 vid = hw->mng_cookie.vlan_id;
Joe Perches406874a2008-04-03 10:06:32 -0700335 u16 old_vid = adapter->mng_vlan_id;
Jesse Brandeburg96838a42006-01-18 13:01:39 -0800336
Jiri Pirko5622e402011-07-21 03:26:31 +0000337 if (!e1000_vlan_used(adapter))
338 return;
339
340 if (!test_bit(vid, adapter->active_vlans)) {
341 if (hw->mng_cookie.status &
342 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
Patrick McHardy80d5c362013-04-19 02:04:28 +0000343 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
Jeff Kirsherc5f226f2006-03-02 18:17:55 -0800344 adapter->mng_vlan_id = vid;
Jiri Pirko5622e402011-07-21 03:26:31 +0000345 } else {
346 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347 }
348 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349 (vid != old_vid) &&
350 !test_bit(old_vid, adapter->active_vlans))
Patrick McHardy80d5c362013-04-19 02:04:28 +0000351 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
352 old_vid);
Jiri Pirko5622e402011-07-21 03:26:31 +0000353 } else {
354 adapter->mng_vlan_id = vid;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700355 }
356}
Jeff Kirsherb55ccb32006-01-12 16:50:30 -0800357
Joe Perches64798842008-07-11 15:17:02 -0700358static void e1000_init_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500359{
Joe Perches1dc32912008-07-11 15:17:08 -0700360 struct e1000_hw *hw = &adapter->hw;
361
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500362 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700363 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500364
365 /* disable hardware interception of ARP */
366 manc &= ~(E1000_MANC_ARP_EN);
367
Joe Perches1dc32912008-07-11 15:17:08 -0700368 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500369 }
370}
371
Joe Perches64798842008-07-11 15:17:02 -0700372static void e1000_release_manageability(struct e1000_adapter *adapter)
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500373{
Joe Perches1dc32912008-07-11 15:17:08 -0700374 struct e1000_hw *hw = &adapter->hw;
375
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500376 if (adapter->en_mng_pt) {
Joe Perches1dc32912008-07-11 15:17:08 -0700377 u32 manc = er32(MANC);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500378
379 /* re-enable hardware interception of ARP */
380 manc |= E1000_MANC_ARP_EN;
381
Joe Perches1dc32912008-07-11 15:17:08 -0700382 ew32(MANC, manc);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500383 }
384}
385
Auke Koke0aac5a2007-03-06 08:57:21 -0800386/**
387 * e1000_configure - configure the hardware for RX and TX
388 * @adapter = private board structure
389 **/
390static void e1000_configure(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
392 struct net_device *netdev = adapter->netdev;
Auke Kok2db10a02006-06-27 09:06:28 -0700393 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394
Patrick McHardydb0ce502007-11-13 20:54:59 -0800395 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397 e1000_restore_vlan(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500398 e1000_init_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400 e1000_configure_tx(adapter);
401 e1000_setup_rctl(adapter);
402 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800403 /* call E1000_DESC_UNUSED which always leaves
404 * at least 1 descriptor unused to make sure
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000405 * next_to_use != next_to_clean
406 */
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800407 for (i = 0; i < adapter->num_rx_queues; i++) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -0800408 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
Jeff Kirshera292ca62006-01-12 16:51:30 -0800409 adapter->alloc_rx_buf(adapter, ring,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000410 E1000_DESC_UNUSED(ring));
Jeff Kirsherf56799e2006-01-12 16:50:39 -0800411 }
Auke Koke0aac5a2007-03-06 08:57:21 -0800412}
Jeff Kirsher7bfa4812006-01-12 16:50:41 -0800413
Auke Koke0aac5a2007-03-06 08:57:21 -0800414int e1000_up(struct e1000_adapter *adapter)
415{
Joe Perches1dc32912008-07-11 15:17:08 -0700416 struct e1000_hw *hw = &adapter->hw;
417
Auke Koke0aac5a2007-03-06 08:57:21 -0800418 /* hardware has been reset, we need to reload some things */
419 e1000_configure(adapter);
Malli Chilakala5de55622005-04-28 19:39:30 -0700420
Auke Kok1314bbf2006-09-27 12:54:02 -0700421 clear_bit(__E1000_DOWN, &adapter->flags);
422
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700423 napi_enable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700424
Auke Koke0aac5a2007-03-06 08:57:21 -0800425 e1000_irq_enable(adapter);
426
Jesse Brandeburg4cb9be72009-04-21 18:42:05 +0000427 netif_wake_queue(adapter->netdev);
428
Jesse Brandeburg79f3d392006-12-15 10:42:34 +0100429 /* fire a link change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -0700430 ew32(ICS, E1000_ICS_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 return 0;
432}
433
Auke Kok79f05bf2006-06-27 09:06:32 -0700434/**
435 * e1000_power_up_phy - restore link in case the phy was powered down
436 * @adapter: address of board private structure
437 *
438 * The phy may be powered down to save power and turn off link when the
439 * driver is unloaded and wake on lan is not enabled (among others)
440 * *** this routine MUST be followed by a call to e1000_reset ***
Auke Kok79f05bf2006-06-27 09:06:32 -0700441 **/
Jesse Brandeburgd6582662006-08-16 13:31:33 -0700442void e1000_power_up_phy(struct e1000_adapter *adapter)
Auke Kok79f05bf2006-06-27 09:06:32 -0700443{
Joe Perches1dc32912008-07-11 15:17:08 -0700444 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700445 u16 mii_reg = 0;
Auke Kok79f05bf2006-06-27 09:06:32 -0700446
447 /* Just clear the power down bit to wake the phy back up */
Joe Perches1dc32912008-07-11 15:17:08 -0700448 if (hw->media_type == e1000_media_type_copper) {
Auke Kok79f05bf2006-06-27 09:06:32 -0700449 /* according to the manual, the phy will retain its
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000450 * settings across a power-down/up cycle
451 */
Joe Perches1dc32912008-07-11 15:17:08 -0700452 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700453 mii_reg &= ~MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700454 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700455 }
456}
457
458static void e1000_power_down_phy(struct e1000_adapter *adapter)
459{
Joe Perches1dc32912008-07-11 15:17:08 -0700460 struct e1000_hw *hw = &adapter->hw;
461
Bruce Allan61c25052006-09-27 12:53:54 -0700462 /* Power down the PHY so no link is implied when interface is down *
Joe Perchesc3033b02008-03-21 11:06:25 -0700463 * The PHY cannot be powered down if any of the following is true *
Auke Kok79f05bf2006-06-27 09:06:32 -0700464 * (a) WoL is enabled
465 * (b) AMT is active
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000466 * (c) SoL/IDER session is active
467 */
Joe Perches1dc32912008-07-11 15:17:08 -0700468 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469 hw->media_type == e1000_media_type_copper) {
Joe Perches406874a2008-04-03 10:06:32 -0700470 u16 mii_reg = 0;
Bruce Allan61c25052006-09-27 12:53:54 -0700471
Joe Perches1dc32912008-07-11 15:17:08 -0700472 switch (hw->mac_type) {
Bruce Allan61c25052006-09-27 12:53:54 -0700473 case e1000_82540:
474 case e1000_82545:
475 case e1000_82545_rev_3:
476 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000477 case e1000_ce4100:
Bruce Allan61c25052006-09-27 12:53:54 -0700478 case e1000_82546_rev_3:
479 case e1000_82541:
480 case e1000_82541_rev_2:
481 case e1000_82547:
482 case e1000_82547_rev_2:
Joe Perches1dc32912008-07-11 15:17:08 -0700483 if (er32(MANC) & E1000_MANC_SMBUS_EN)
Bruce Allan61c25052006-09-27 12:53:54 -0700484 goto out;
485 break;
Bruce Allan61c25052006-09-27 12:53:54 -0700486 default:
487 goto out;
488 }
Joe Perches1dc32912008-07-11 15:17:08 -0700489 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
Auke Kok79f05bf2006-06-27 09:06:32 -0700490 mii_reg |= MII_CR_POWER_DOWN;
Joe Perches1dc32912008-07-11 15:17:08 -0700491 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
Jesse Brandeburg4e0d8f7d2011-10-05 07:24:46 +0000492 msleep(1);
Auke Kok79f05bf2006-06-27 09:06:32 -0700493 }
Bruce Allan61c25052006-09-27 12:53:54 -0700494out:
495 return;
Auke Kok79f05bf2006-06-27 09:06:32 -0700496}
497
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000498static void e1000_down_and_stop(struct e1000_adapter *adapter)
499{
500 set_bit(__E1000_DOWN, &adapter->flags);
Tushar Dave8ce69092012-05-17 01:04:50 +0000501
Vladimir Davydov74a1b1e2013-11-23 07:18:01 +0000502 cancel_delayed_work_sync(&adapter->watchdog_task);
503
504 /*
505 * Since the watchdog task can reschedule other tasks, we should cancel
506 * it first, otherwise we can run into the situation when a work is
507 * still running after the adapter has been turned down.
508 */
509
510 cancel_delayed_work_sync(&adapter->phy_info_task);
511 cancel_delayed_work_sync(&adapter->fifo_stall_task);
512
Tushar Dave8ce69092012-05-17 01:04:50 +0000513 /* Only kill reset task if adapter is not resetting */
514 if (!test_bit(__E1000_RESETTING, &adapter->flags))
515 cancel_work_sync(&adapter->reset_task);
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000516}
517
Joe Perches64798842008-07-11 15:17:02 -0700518void e1000_down(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000520 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 struct net_device *netdev = adapter->netdev;
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000522 u32 rctl, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
Eliezer Tamirf9c029d2015-02-25 15:52:49 +0000524 netif_carrier_off(netdev);
Auke Kok1314bbf2006-09-27 12:54:02 -0700525
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000526 /* disable receives in the hardware */
527 rctl = er32(RCTL);
528 ew32(RCTL, rctl & ~E1000_RCTL_EN);
529 /* flush and sleep below */
530
Jesse Brandeburg51851072009-09-25 12:17:01 +0000531 netif_tx_disable(netdev);
Jesse Brandeburga6c42322009-03-25 21:59:22 +0000532
533 /* disable transmits in the hardware */
534 tctl = er32(TCTL);
535 tctl &= ~E1000_TCTL_EN;
536 ew32(TCTL, tctl);
537 /* flush both disables and wait for them to finish */
538 E1000_WRITE_FLUSH();
539 msleep(10);
540
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700541 napi_disable(&adapter->napi);
Francois Romieuc3570ac2008-07-11 15:17:38 -0700542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 e1000_irq_disable(adapter);
Jeff Kirsherc1605eb2006-03-02 18:16:38 -0800544
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000545 /* Setting DOWN must be after irq_disable to prevent
Anupam Chandaab088532010-11-21 09:54:21 -0800546 * a screaming interrupt. Setting DOWN also prevents
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000547 * tasks from rescheduling.
Anupam Chandaab088532010-11-21 09:54:21 -0800548 */
Jesse Brandeburga4010af2011-10-05 07:24:41 +0000549 e1000_down_and_stop(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 adapter->link_speed = 0;
552 adapter->link_duplex = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 e1000_reset(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -0400555 e1000_clean_all_tx_rings(adapter);
556 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Joe Perches64798842008-07-11 15:17:02 -0700559void e1000_reinit_locked(struct e1000_adapter *adapter)
Auke Kok2db10a02006-06-27 09:06:28 -0700560{
561 WARN_ON(in_interrupt());
562 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
563 msleep(1);
564 e1000_down(adapter);
565 e1000_up(adapter);
566 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567}
568
Joe Perches64798842008-07-11 15:17:02 -0700569void e1000_reset(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570{
Joe Perches1dc32912008-07-11 15:17:08 -0700571 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -0700572 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
Joe Perchesc3033b02008-03-21 11:06:25 -0700573 bool legacy_pba_adjust = false;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000574 u16 hwm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576 /* Repartition Pba for greater than 9k mtu
577 * To take effect CTRL.RST is required.
578 */
579
Joe Perches1dc32912008-07-11 15:17:08 -0700580 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100581 case e1000_82542_rev2_0:
582 case e1000_82542_rev2_1:
583 case e1000_82543:
584 case e1000_82544:
585 case e1000_82540:
586 case e1000_82541:
587 case e1000_82541_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700588 legacy_pba_adjust = true;
Bruce Allan018ea442006-12-15 10:39:45 +0100589 pba = E1000_PBA_48K;
590 break;
591 case e1000_82545:
592 case e1000_82545_rev_3:
593 case e1000_82546:
Dirk Brandewie5377a412011-01-06 14:29:54 +0000594 case e1000_ce4100:
Bruce Allan018ea442006-12-15 10:39:45 +0100595 case e1000_82546_rev_3:
596 pba = E1000_PBA_48K;
597 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700598 case e1000_82547:
Malli Chilakala0e6ef3e2005-04-28 19:44:14 -0700599 case e1000_82547_rev_2:
Joe Perchesc3033b02008-03-21 11:06:25 -0700600 legacy_pba_adjust = true;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700601 pba = E1000_PBA_30K;
602 break;
Bruce Allan018ea442006-12-15 10:39:45 +0100603 case e1000_undefined:
604 case e1000_num_macs:
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700605 break;
606 }
607
Joe Perchesc3033b02008-03-21 11:06:25 -0700608 if (legacy_pba_adjust) {
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000609 if (hw->max_frame_size > E1000_RXBUFFER_8192)
Bruce Allan018ea442006-12-15 10:39:45 +0100610 pba -= 8; /* allocate more FIFO for Tx */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700611
Joe Perches1dc32912008-07-11 15:17:08 -0700612 if (hw->mac_type == e1000_82547) {
Bruce Allan018ea442006-12-15 10:39:45 +0100613 adapter->tx_fifo_head = 0;
614 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
615 adapter->tx_fifo_size =
616 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
617 atomic_set(&adapter->tx_fifo_stall, 0);
618 }
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000619 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
Bruce Allan018ea442006-12-15 10:39:45 +0100620 /* adjust PBA for jumbo frames */
Joe Perches1dc32912008-07-11 15:17:08 -0700621 ew32(PBA, pba);
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700622
Bruce Allan018ea442006-12-15 10:39:45 +0100623 /* To maintain wire speed transmits, the Tx FIFO should be
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000624 * large enough to accommodate two full transmit packets,
Bruce Allan018ea442006-12-15 10:39:45 +0100625 * rounded up to the next 1KB and expressed in KB. Likewise,
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000626 * the Rx FIFO should be large enough to accommodate at least
Bruce Allan018ea442006-12-15 10:39:45 +0100627 * one full receive packet and is similarly rounded up and
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000628 * expressed in KB.
629 */
Joe Perches1dc32912008-07-11 15:17:08 -0700630 pba = er32(PBA);
Bruce Allan018ea442006-12-15 10:39:45 +0100631 /* upper 16 bits has Tx packet buffer allocation size in KB */
632 tx_space = pba >> 16;
633 /* lower 16 bits has Rx packet buffer allocation size in KB */
634 pba &= 0xffff;
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000635 /* the Tx fifo also stores 16 bytes of information about the Tx
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000636 * but don't include ethernet FCS because hardware appends it
637 */
638 min_tx_space = (hw->max_frame_size +
Janusz Wolaka48954c2015-09-17 23:34:29 +0200639 sizeof(struct e1000_tx_desc) -
640 ETH_FCS_LEN) * 2;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700641 min_tx_space = ALIGN(min_tx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100642 min_tx_space >>= 10;
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000643 /* software strips receive CRC, so leave room for it */
644 min_rx_space = hw->max_frame_size;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -0700645 min_rx_space = ALIGN(min_rx_space, 1024);
Bruce Allan018ea442006-12-15 10:39:45 +0100646 min_rx_space >>= 10;
647
648 /* If current Tx allocation is less than the min Tx FIFO size,
649 * and the min Tx FIFO size is less than the current Rx FIFO
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000650 * allocation, take space away from current Rx allocation
651 */
Bruce Allan018ea442006-12-15 10:39:45 +0100652 if (tx_space < min_tx_space &&
653 ((min_tx_space - tx_space) < pba)) {
654 pba = pba - (min_tx_space - tx_space);
655
656 /* PCI/PCIx hardware has PBA alignment constraints */
Joe Perches1dc32912008-07-11 15:17:08 -0700657 switch (hw->mac_type) {
Bruce Allan018ea442006-12-15 10:39:45 +0100658 case e1000_82545 ... e1000_82546_rev_3:
659 pba &= ~(E1000_PBA_8K - 1);
660 break;
661 default:
662 break;
663 }
664
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000665 /* if short on Rx space, Rx wins and must trump Tx
666 * adjustment or use Early Receive if available
667 */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +0000668 if (pba < min_rx_space)
669 pba = min_rx_space;
Bruce Allan018ea442006-12-15 10:39:45 +0100670 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700672
Joe Perches1dc32912008-07-11 15:17:08 -0700673 ew32(PBA, pba);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000675 /* flow control settings:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000676 * The high water mark must be low enough to fit one full frame
677 * (or the size used for early receive) above it in the Rx FIFO.
678 * Set it to the lower of:
679 * - 90% of the Rx FIFO size, and
680 * - the full Rx FIFO size minus the early receive size (for parts
681 * with ERT support assuming ERT set to E1000_ERT_2048), or
682 * - the full Rx FIFO size minus one full frame
683 */
684 hwm = min(((pba << 10) * 9 / 10),
685 ((pba << 10) - hw->max_frame_size));
Jeff Kirsherf11b7f82006-01-12 16:50:51 -0800686
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +0000687 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
688 hw->fc_low_water = hw->fc_high_water - 8;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +0000689 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
Joe Perches1dc32912008-07-11 15:17:08 -0700690 hw->fc_send_xon = 1;
691 hw->fc = hw->original_fc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700693 /* Allow time for pending master requests to run */
Joe Perches1dc32912008-07-11 15:17:08 -0700694 e1000_reset_hw(hw);
695 if (hw->mac_type >= e1000_82544)
696 ew32(WUC, 0);
Jeff Kirsher09ae3e82006-09-27 12:53:51 -0700697
Joe Perches1dc32912008-07-11 15:17:08 -0700698 if (e1000_init_hw(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -0700699 e_dev_err("Hardware Error\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700700 e1000_update_mng_vlan(adapter);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100701
702 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
Joe Perches1dc32912008-07-11 15:17:08 -0700703 if (hw->mac_type >= e1000_82544 &&
Joe Perches1dc32912008-07-11 15:17:08 -0700704 hw->autoneg == 1 &&
705 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
706 u32 ctrl = er32(CTRL);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100707 /* clear phy power management bit if we are in gig only mode,
708 * which if enabled will attempt negotiation to 100Mb, which
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000709 * can cause a loss of link at power off or driver unload
710 */
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100711 ctrl &= ~E1000_CTRL_SWDPIN3;
Joe Perches1dc32912008-07-11 15:17:08 -0700712 ew32(CTRL, ctrl);
Jesse Brandeburg3d5460a2006-12-15 10:33:46 +0100713 }
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
Joe Perches1dc32912008-07-11 15:17:08 -0700716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Joe Perches1dc32912008-07-11 15:17:08 -0700718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
Auke Kok9a53a202006-06-27 09:06:45 -0700720
Jeff Garzik0fccd0e2006-12-15 10:56:10 -0500721 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
Ben Hutchings1aa8b472012-07-10 10:56:59 +0000724/* Dump the eeprom for users having checksum issues */
Adrian Bunkb4ea8952008-02-01 08:21:28 -0800725static void e1000_dump_eeprom(struct e1000_adapter *adapter)
Auke Kok67b3c272007-12-17 13:50:23 -0800726{
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u8 *data;
731 int i;
732 u16 csum_old, csum_new = 0;
733
734 eeprom.len = ops->get_eeprom_len(netdev);
735 eeprom.offset = 0;
736
737 data = kmalloc(eeprom.len, GFP_KERNEL);
Joe Perchese404dec2012-01-29 12:56:23 +0000738 if (!data)
Auke Kok67b3c272007-12-17 13:50:23 -0800739 return;
Auke Kok67b3c272007-12-17 13:50:23 -0800740
741 ops->get_eeprom(netdev, &eeprom, data);
742
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
748
Emil Tantilov675ad472010-04-27 14:02:58 +0000749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
Auke Kok67b3c272007-12-17 13:50:23 -0800752
Emil Tantilov675ad472010-04-27 14:02:58 +0000753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756
Emil Tantilov675ad472010-04-27 14:02:58 +0000757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
Auke Kok67b3c272007-12-17 13:50:23 -0800768
769 kfree(data);
770}
771
772/**
Taku Izumi81250292008-07-11 15:17:44 -0700773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
775 *
776 * Return true if an adapter needs ioport resources
777 **/
778static int e1000_is_need_ioport(struct pci_dev *pdev)
779{
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
802 return true;
803 default:
804 return false;
805 }
806}
807
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000808static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +0000810{
Jeff Kirsher6cfbd972013-02-09 12:49:21 +0000811 /* Since there is no support for separate Rx/Tx vlan accel
812 * enable/disable make sure Tx flag is always in same state as Rx.
Jiri Pirko5622e402011-07-21 03:26:31 +0000813 */
Patrick McHardyf6469682013-04-19 02:04:27 +0000814 if (features & NETIF_F_HW_VLAN_CTAG_RX)
815 features |= NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko5622e402011-07-21 03:26:31 +0000816 else
Patrick McHardyf6469682013-04-19 02:04:27 +0000817 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
Jiri Pirko5622e402011-07-21 03:26:31 +0000818
819 return features;
820}
821
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000822static int e1000_set_features(struct net_device *netdev,
823 netdev_features_t features)
Michał Mirosławe97d3202011-06-08 08:36:42 +0000824{
825 struct e1000_adapter *adapter = netdev_priv(netdev);
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000826 netdev_features_t changed = features ^ netdev->features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000827
Patrick McHardyf6469682013-04-19 02:04:27 +0000828 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
Jiri Pirko5622e402011-07-21 03:26:31 +0000829 e1000_vlan_mode(netdev, features);
830
Ben Greeare825b732012-04-04 06:01:29 +0000831 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
Michał Mirosławe97d3202011-06-08 08:36:42 +0000832 return 0;
833
Ben Greeare825b732012-04-04 06:01:29 +0000834 netdev->features = features;
Michał Mirosławe97d3202011-06-08 08:36:42 +0000835 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
836
837 if (netif_running(netdev))
838 e1000_reinit_locked(adapter);
839 else
840 e1000_reset(adapter);
841
842 return 0;
843}
844
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800845static const struct net_device_ops e1000_netdev_ops = {
846 .ndo_open = e1000_open,
847 .ndo_stop = e1000_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800848 .ndo_start_xmit = e1000_xmit_frame,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800849 .ndo_get_stats = e1000_get_stats,
850 .ndo_set_rx_mode = e1000_set_rx_mode,
851 .ndo_set_mac_address = e1000_set_mac,
Jiri Pirko5622e402011-07-21 03:26:31 +0000852 .ndo_tx_timeout = e1000_tx_timeout,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800853 .ndo_change_mtu = e1000_change_mtu,
854 .ndo_do_ioctl = e1000_ioctl,
855 .ndo_validate_addr = eth_validate_addr,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800856 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
857 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
858#ifdef CONFIG_NET_POLL_CONTROLLER
859 .ndo_poll_controller = e1000_netpoll,
860#endif
Jiri Pirko5622e402011-07-21 03:26:31 +0000861 .ndo_fix_features = e1000_fix_features,
862 .ndo_set_features = e1000_set_features,
Stephen Hemminger0e7614b2008-11-19 22:18:22 -0800863};
864
Taku Izumi81250292008-07-11 15:17:44 -0700865/**
Jesse Brandeburge508be12010-09-07 21:01:12 +0000866 * e1000_init_hw_struct - initialize members of hw struct
867 * @adapter: board private struct
868 * @hw: structure used by e1000_hw.c
869 *
870 * Factors out initialization of the e1000_hw struct to its own function
871 * that can be called very early at init (just after struct allocation).
872 * Fields are initialized based on PCI device information and
873 * OS network device settings (MTU size).
874 * Returns negative error codes if MAC type setup fails.
875 */
876static int e1000_init_hw_struct(struct e1000_adapter *adapter,
877 struct e1000_hw *hw)
878{
879 struct pci_dev *pdev = adapter->pdev;
880
881 /* PCI config space info */
882 hw->vendor_id = pdev->vendor;
883 hw->device_id = pdev->device;
884 hw->subsystem_vendor_id = pdev->subsystem_vendor;
885 hw->subsystem_id = pdev->subsystem_device;
886 hw->revision_id = pdev->revision;
887
888 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
889
890 hw->max_frame_size = adapter->netdev->mtu +
891 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
892 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
893
894 /* identify the MAC */
895 if (e1000_set_mac_type(hw)) {
896 e_err(probe, "Unknown MAC Type\n");
897 return -EIO;
898 }
899
900 switch (hw->mac_type) {
901 default:
902 break;
903 case e1000_82541:
904 case e1000_82547:
905 case e1000_82541_rev_2:
906 case e1000_82547_rev_2:
907 hw->phy_init_script = 1;
908 break;
909 }
910
911 e1000_set_media_type(hw);
912 e1000_get_bus_info(hw);
913
914 hw->wait_autoneg_complete = false;
915 hw->tbi_compatibility_en = true;
916 hw->adaptive_ifs = true;
917
918 /* Copper options */
919
920 if (hw->media_type == e1000_media_type_copper) {
921 hw->mdix = AUTO_ALL_MODES;
922 hw->disable_polarity_correction = false;
923 hw->master_slave = E1000_MASTER_SLAVE;
924 }
925
926 return 0;
927}
928
929/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 * e1000_probe - Device Initialization Routine
931 * @pdev: PCI device information struct
932 * @ent: entry in e1000_pci_tbl
933 *
934 * Returns 0 on success, negative on failure
935 *
936 * e1000_probe initializes an adapter identified by a pci_dev structure.
937 * The OS initialization, configuring of the adapter private structure,
938 * and a hardware reset occur.
939 **/
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +0000940static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941{
942 struct net_device *netdev;
Tushar Dave125ca932017-12-06 02:26:29 +0530943 struct e1000_adapter *adapter = NULL;
Joe Perches1dc32912008-07-11 15:17:08 -0700944 struct e1000_hw *hw;
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700945
Janusz Wolaka48954c2015-09-17 23:34:29 +0200946 static int cards_found;
947 static int global_quad_port_a; /* global ksp3 port a indication */
Malli Chilakala2d7edb92005-04-28 19:43:52 -0700948 int i, err, pci_using_dac;
Joe Perches406874a2008-04-03 10:06:32 -0700949 u16 eeprom_data = 0;
Dirk Brandewie5377a412011-01-06 14:29:54 +0000950 u16 tmp = 0;
Joe Perches406874a2008-04-03 10:06:32 -0700951 u16 eeprom_apme_mask = E1000_EEPROM_APME;
Taku Izumi81250292008-07-11 15:17:44 -0700952 int bars, need_ioport;
Tushar Dave125ca932017-12-06 02:26:29 +0530953 bool disable_dev = false;
Joe Perches0795af52007-10-03 17:59:30 -0700954
Taku Izumi81250292008-07-11 15:17:44 -0700955 /* do not allocate ioport bars when not needed */
956 need_ioport = e1000_is_need_ioport(pdev);
957 if (need_ioport) {
958 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
959 err = pci_enable_device(pdev);
960 } else {
961 bars = pci_select_bars(pdev, IORESOURCE_MEM);
Karsten Keil4d7155b2009-02-03 15:18:01 -0800962 err = pci_enable_device_mem(pdev);
Taku Izumi81250292008-07-11 15:17:44 -0700963 }
Joe Perchesc7be73b2008-07-11 15:17:28 -0700964 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 return err;
966
Taku Izumi81250292008-07-11 15:17:44 -0700967 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
Joe Perchesc7be73b2008-07-11 15:17:28 -0700968 if (err)
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700969 goto err_pci_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
971 pci_set_master(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +0000972 err = pci_save_state(pdev);
973 if (err)
974 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700976 err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700978 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 goto err_alloc_etherdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 SET_NETDEV_DEV(netdev, &pdev->dev);
982
983 pci_set_drvdata(pdev, netdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -0700984 adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 adapter->netdev = netdev;
986 adapter->pdev = pdev;
stephen hemmingerb3f4d592012-03-13 06:04:20 +0000987 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
Taku Izumi81250292008-07-11 15:17:44 -0700988 adapter->bars = bars;
989 adapter->need_ioport = need_ioport;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
Joe Perches1dc32912008-07-11 15:17:08 -0700991 hw = &adapter->hw;
992 hw->back = adapter;
993
Vasily Averin6dd62ab2006-08-28 14:56:22 -0700994 err = -EIO;
Arjan van de Ven275f1652008-10-20 21:42:39 -0700995 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
Joe Perches1dc32912008-07-11 15:17:08 -0700996 if (!hw->hw_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 goto err_ioremap;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Taku Izumi81250292008-07-11 15:17:44 -0700999 if (adapter->need_ioport) {
1000 for (i = BAR_1; i <= BAR_5; i++) {
1001 if (pci_resource_len(pdev, i) == 0)
1002 continue;
1003 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1004 hw->io_base = pci_resource_start(pdev, i);
1005 break;
1006 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 }
1008 }
1009
Jesse Brandeburge508be12010-09-07 21:01:12 +00001010 /* make ready for any if (hw->...) below */
1011 err = e1000_init_hw_struct(adapter, hw);
1012 if (err)
1013 goto err_sw_init;
1014
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001015 /* there is a workaround being applied below that limits
Jesse Brandeburge508be12010-09-07 21:01:12 +00001016 * 64-bit DMA addresses to 64-bit hardware. There are some
1017 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1018 */
1019 pci_using_dac = 0;
1020 if ((hw->bus_type == e1000_bus_type_pcix) &&
Russell King9931a262013-06-26 23:49:11 +01001021 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
Jesse Brandeburge508be12010-09-07 21:01:12 +00001022 pci_using_dac = 1;
Jesse Brandeburge508be12010-09-07 21:01:12 +00001023 } else {
Russell King9931a262013-06-26 23:49:11 +01001024 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Dean Nelson19a0b672010-11-11 05:50:25 +00001025 if (err) {
1026 pr_err("No usable DMA config, aborting\n");
1027 goto err_dma;
1028 }
Jesse Brandeburge508be12010-09-07 21:01:12 +00001029 }
1030
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001031 netdev->netdev_ops = &e1000_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 e1000_set_ethtool_ops(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 netdev->watchdog_timeo = 5 * HZ;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001034 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
Stephen Hemminger0e7614b2008-11-19 22:18:22 -08001035
Auke Kok0eb5a342006-09-27 12:53:17 -07001036 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 adapter->bd_number = cards_found;
1039
1040 /* setup the private structure */
1041
Joe Perchesc7be73b2008-07-11 15:17:28 -07001042 err = e1000_sw_init(adapter);
1043 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 goto err_sw_init;
1045
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001046 err = -EIO;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001047 if (hw->mac_type == e1000_ce4100) {
Florian Fainelli13acde82012-01-04 20:23:35 +00001048 hw->ce4100_gbe_mdio_base_virt =
1049 ioremap(pci_resource_start(pdev, BAR_1),
Janusz Wolaka48954c2015-09-17 23:34:29 +02001050 pci_resource_len(pdev, BAR_1));
Dirk Brandewie5377a412011-01-06 14:29:54 +00001051
Florian Fainelli13acde82012-01-04 20:23:35 +00001052 if (!hw->ce4100_gbe_mdio_base_virt)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001053 goto err_mdio_ioremap;
1054 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001055
Joe Perches1dc32912008-07-11 15:17:08 -07001056 if (hw->mac_type >= e1000_82543) {
Michał Mirosławe97d3202011-06-08 08:36:42 +00001057 netdev->hw_features = NETIF_F_SG |
Jiri Pirko5622e402011-07-21 03:26:31 +00001058 NETIF_F_HW_CSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00001059 NETIF_F_HW_VLAN_CTAG_RX;
1060 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1061 NETIF_F_HW_VLAN_CTAG_FILTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 }
1063
Joe Perches1dc32912008-07-11 15:17:08 -07001064 if ((hw->mac_type >= e1000_82544) &&
1065 (hw->mac_type != e1000_82547))
Michał Mirosławe97d3202011-06-08 08:36:42 +00001066 netdev->hw_features |= NETIF_F_TSO;
1067
Ben Greear11a78dc2012-02-11 15:40:01 +00001068 netdev->priv_flags |= IFF_SUPP_NOFCS;
1069
Michał Mirosławe97d3202011-06-08 08:36:42 +00001070 netdev->features |= netdev->hw_features;
Tushar Dave75006732012-06-12 13:03:29 +00001071 netdev->hw_features |= (NETIF_F_RXCSUM |
1072 NETIF_F_RXALL |
1073 NETIF_F_RXFCS);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001074
Yi Zou7b872a52010-09-22 17:57:58 +00001075 if (pci_using_dac) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 netdev->features |= NETIF_F_HIGHDMA;
Yi Zou7b872a52010-09-22 17:57:58 +00001077 netdev->vlan_features |= NETIF_F_HIGHDMA;
1078 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Tushar Dave75006732012-06-12 13:03:29 +00001080 netdev->vlan_features |= (NETIF_F_TSO |
1081 NETIF_F_HW_CSUM |
1082 NETIF_F_SG);
Patrick McHardy20501a62008-10-11 12:25:59 -07001083
Francesco Ruggeria22bb0b2014-10-22 15:29:24 +00001084 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1085 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1086 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1087 netdev->priv_flags |= IFF_UNICAST_FLT;
Jiri Pirko01789342011-08-16 06:29:00 +00001088
Joe Perches1dc32912008-07-11 15:17:08 -07001089 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001090
Auke Kokcd94dd02006-06-27 09:08:22 -07001091 /* initialize eeprom parameters */
Joe Perches1dc32912008-07-11 15:17:08 -07001092 if (e1000_init_eeprom_params(hw)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001093 e_err(probe, "EEPROM initialization failed\n");
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001094 goto err_eeprom;
Auke Kokcd94dd02006-06-27 09:08:22 -07001095 }
1096
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001097 /* before reading the EEPROM, reset the controller to
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001098 * put the device in a known good starting state
1099 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001100
Joe Perches1dc32912008-07-11 15:17:08 -07001101 e1000_reset_hw(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
1103 /* make sure the EEPROM is good */
Joe Perches1dc32912008-07-11 15:17:08 -07001104 if (e1000_validate_eeprom_checksum(hw) < 0) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001105 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
Auke Kok67b3c272007-12-17 13:50:23 -08001106 e1000_dump_eeprom(adapter);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001107 /* set MAC address to all zeroes to invalidate and temporary
Auke Kok67b3c272007-12-17 13:50:23 -08001108 * disable this device for the user. This blocks regular
1109 * traffic while still permitting ethtool ioctls from reaching
1110 * the hardware as well as allowing the user to run the
1111 * interface after manually setting a hw addr using
1112 * `ip set address`
1113 */
Joe Perches1dc32912008-07-11 15:17:08 -07001114 memset(hw->mac_addr, 0, netdev->addr_len);
Auke Kok67b3c272007-12-17 13:50:23 -08001115 } else {
1116 /* copy the MAC address out of the EEPROM */
Joe Perches1dc32912008-07-11 15:17:08 -07001117 if (e1000_read_mac_addr(hw))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001118 e_err(probe, "EEPROM Read Error\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 }
Joe Perchesdbedd442015-03-06 20:49:12 -08001120 /* don't block initialization here due to bad MAC address */
Joe Perches1dc32912008-07-11 15:17:08 -07001121 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
Jiri Pirkoaaeb6cd2013-01-08 01:38:26 +00001123 if (!is_valid_ether_addr(netdev->dev_addr))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001124 e_err(probe, "Invalid MAC Address\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001127 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1128 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1129 e1000_82547_tx_fifo_stall_task);
1130 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
David Howells65f27f32006-11-22 14:55:48 +00001131 INIT_WORK(&adapter->reset_task, e1000_reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 e1000_check_options(adapter);
1134
1135 /* Initial Wake on LAN setting
1136 * If APM wake is enabled in the EEPROM,
1137 * enable the ACPI Magic Packet filter
1138 */
1139
Joe Perches1dc32912008-07-11 15:17:08 -07001140 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 case e1000_82542_rev2_0:
1142 case e1000_82542_rev2_1:
1143 case e1000_82543:
1144 break;
1145 case e1000_82544:
Joe Perches1dc32912008-07-11 15:17:08 -07001146 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1148 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1149 break;
1150 case e1000_82546:
1151 case e1000_82546_rev_3:
Janusz Wolaka48954c2015-09-17 23:34:29 +02001152 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
Joe Perches1dc32912008-07-11 15:17:08 -07001153 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1155 break;
1156 }
1157 /* Fall Through */
1158 default:
Joe Perches1dc32912008-07-11 15:17:08 -07001159 e1000_read_eeprom(hw,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1161 break;
1162 }
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001163 if (eeprom_data & eeprom_apme_mask)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001164 adapter->eeprom_wol |= E1000_WUFC_MAG;
1165
1166 /* now that we have the eeprom settings, apply the special cases
1167 * where the eeprom may be wrong or the board simply won't support
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001168 * wake on lan on a particular port
1169 */
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001170 switch (pdev->device) {
1171 case E1000_DEV_ID_82546GB_PCIE:
1172 adapter->eeprom_wol = 0;
1173 break;
1174 case E1000_DEV_ID_82546EB_FIBER:
1175 case E1000_DEV_ID_82546GB_FIBER:
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001176 /* Wake events only supported on port A for dual fiber
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001177 * regardless of eeprom setting
1178 */
Joe Perches1dc32912008-07-11 15:17:08 -07001179 if (er32(STATUS) & E1000_STATUS_FUNC_1)
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001180 adapter->eeprom_wol = 0;
1181 break;
1182 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1183 /* if quad port adapter, disable WoL on all but port A */
1184 if (global_quad_port_a != 0)
1185 adapter->eeprom_wol = 0;
1186 else
Rusty Russell3db1cd52011-12-19 13:56:45 +00001187 adapter->quad_port_a = true;
Jesse Brandeburg120cd572006-08-31 14:27:46 -07001188 /* Reset for multiple quad port adapters */
1189 if (++global_quad_port_a == 4)
1190 global_quad_port_a = 0;
1191 break;
1192 }
1193
1194 /* initialize the wol settings based on the eeprom settings */
1195 adapter->wol = adapter->eeprom_wol;
\"Rafael J. Wysocki\de126482008-11-07 20:30:19 +00001196 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Dirk Brandewie5377a412011-01-06 14:29:54 +00001198 /* Auto detect PHY address */
1199 if (hw->mac_type == e1000_ce4100) {
1200 for (i = 0; i < 32; i++) {
1201 hw->phy_addr = i;
1202 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
Jean Sacren4e01f3a2015-09-19 05:08:40 -06001203
1204 if (tmp != 0 && tmp != 0xFF)
Dirk Brandewie5377a412011-01-06 14:29:54 +00001205 break;
1206 }
Jean Sacren4e01f3a2015-09-19 05:08:40 -06001207
1208 if (i >= 32)
1209 goto err_eeprom;
Dirk Brandewie5377a412011-01-06 14:29:54 +00001210 }
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 /* reset the hardware with the new settings */
1213 e1000_reset(adapter);
1214
Auke Kok416b5d12007-06-01 10:22:39 -07001215 strcpy(netdev->name, "eth%d");
Joe Perchesc7be73b2008-07-11 15:17:28 -07001216 err = register_netdev(netdev);
1217 if (err)
Auke Kok416b5d12007-06-01 10:22:39 -07001218 goto err_register;
Auke Kok1314bbf2006-09-27 12:54:02 -07001219
Jiri Pirko52f55092012-03-20 18:10:01 +00001220 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko5622e402011-07-21 03:26:31 +00001221
Emil Tantilov675ad472010-04-27 14:02:58 +00001222 /* print bus type/speed/width info */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001223 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
Joe Perches7837e582010-06-11 12:51:49 +00001224 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1225 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1226 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1227 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1228 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1229 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1230 netdev->dev_addr);
Emil Tantilov675ad472010-04-27 14:02:58 +00001231
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001232 /* carrier off reporting is important to ethtool even BEFORE open */
1233 netif_carrier_off(netdev);
1234
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001235 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 cards_found++;
1238 return 0;
1239
1240err_register:
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001241err_eeprom:
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001242 e1000_phy_hw_reset(hw);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001243
Joe Perches1dc32912008-07-11 15:17:08 -07001244 if (hw->flash_address)
1245 iounmap(hw->flash_address);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001246 kfree(adapter->tx_ring);
1247 kfree(adapter->rx_ring);
Jesse Brandeburge508be12010-09-07 21:01:12 +00001248err_dma:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249err_sw_init:
Dirk Brandewie5377a412011-01-06 14:29:54 +00001250err_mdio_ioremap:
Florian Fainelli13acde82012-01-04 20:23:35 +00001251 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001252 iounmap(hw->hw_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253err_ioremap:
Tushar Dave125ca932017-12-06 02:26:29 +05301254 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 free_netdev(netdev);
1256err_alloc_etherdev:
Taku Izumi81250292008-07-11 15:17:44 -07001257 pci_release_selected_regions(pdev, bars);
Vasily Averin6dd62ab2006-08-28 14:56:22 -07001258err_pci_reg:
Tushar Dave125ca932017-12-06 02:26:29 +05301259 if (!adapter || disable_dev)
1260 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 return err;
1262}
1263
1264/**
1265 * e1000_remove - Device Removal Routine
1266 * @pdev: PCI device information struct
1267 *
1268 * e1000_remove is called by the PCI subsystem to alert the driver
Jean Sacrenb6fad9f2015-09-19 05:08:41 -06001269 * that it should release a PCI device. That could be caused by a
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 * Hot-Plug event, or because the driver is going to be removed from
1271 * memory.
1272 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001273static void e1000_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274{
1275 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07001276 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001277 struct e1000_hw *hw = &adapter->hw;
Tushar Dave125ca932017-12-06 02:26:29 +05301278 bool disable_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001280 e1000_down_and_stop(adapter);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05001281 e1000_release_manageability(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001283 unregister_netdev(netdev);
1284
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001285 e1000_phy_hw_reset(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001287 kfree(adapter->tx_ring);
1288 kfree(adapter->rx_ring);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001289
Florian Fainelli1c267502012-01-04 20:23:34 +00001290 if (hw->mac_type == e1000_ce4100)
Florian Fainelli13acde82012-01-04 20:23:35 +00001291 iounmap(hw->ce4100_gbe_mdio_base_virt);
Joe Perches1dc32912008-07-11 15:17:08 -07001292 iounmap(hw->hw_addr);
1293 if (hw->flash_address)
1294 iounmap(hw->flash_address);
Taku Izumi81250292008-07-11 15:17:44 -07001295 pci_release_selected_regions(pdev, adapter->bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Tushar Dave125ca932017-12-06 02:26:29 +05301297 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 free_netdev(netdev);
1299
Tushar Dave125ca932017-12-06 02:26:29 +05301300 if (disable_dev)
1301 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302}
1303
1304/**
1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1306 * @adapter: board private structure to initialize
1307 *
1308 * e1000_sw_init initializes the Adapter private data structure.
Jesse Brandeburge508be12010-09-07 21:01:12 +00001309 * e1000_init_hw_struct MUST be called before this function
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001311static int e1000_sw_init(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312{
Auke Kokeb0f8052006-07-14 16:14:48 -07001313 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001315 adapter->num_tx_queues = 1;
1316 adapter->num_rx_queues = 1;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001317
1318 if (e1000_alloc_queues(adapter)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001319 e_err(probe, "Unable to allocate memory for queues\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001320 return -ENOMEM;
1321 }
1322
Herbert Xu47313052007-05-29 15:07:31 -07001323 /* Explicitly disable IRQ since the NIC can be in any state. */
Herbert Xu47313052007-05-29 15:07:31 -07001324 e1000_irq_disable(adapter);
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 spin_lock_init(&adapter->stats_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Auke Kok1314bbf2006-09-27 12:54:02 -07001328 set_bit(__E1000_DOWN, &adapter->flags);
1329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 return 0;
1331}
1332
1333/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001334 * e1000_alloc_queues - Allocate memory for all rings
1335 * @adapter: board private structure to initialize
1336 *
1337 * We allocate one ring per queue at run-time since we don't know the
Wang Chen3e1d7cd2008-12-03 22:07:10 -08001338 * number of queues at compile-time.
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001339 **/
Bill Pemberton9f9a12f2012-12-03 09:24:25 -05001340static int e1000_alloc_queues(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001341{
Yan Burman1c7e5b12007-03-06 08:58:04 -08001342 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Janusz Wolaka48954c2015-09-17 23:34:29 +02001343 sizeof(struct e1000_tx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001344 if (!adapter->tx_ring)
1345 return -ENOMEM;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001346
Yan Burman1c7e5b12007-03-06 08:58:04 -08001347 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Janusz Wolaka48954c2015-09-17 23:34:29 +02001348 sizeof(struct e1000_rx_ring), GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001349 if (!adapter->rx_ring) {
1350 kfree(adapter->tx_ring);
1351 return -ENOMEM;
1352 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001353
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001354 return E1000_SUCCESS;
1355}
1356
1357/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 * e1000_open - Called when a network interface is made active
1359 * @netdev: network interface device structure
1360 *
1361 * Returns 0 on success, negative value on failure
1362 *
1363 * The open entry point is called when a network interface is made
1364 * active by the system (IFF_UP). At this point all resources needed
1365 * for transmit and receive operations are allocated, the interrupt
Jesse Brandeburga4010af2011-10-05 07:24:41 +00001366 * handler is registered with the OS, the watchdog task is started,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 * and the stack is notified that the interface is ready.
1368 **/
Stefan Assmann1f2f83f2016-02-03 09:20:51 +01001369int e1000_open(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001371 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001372 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 int err;
1374
Auke Kok2db10a02006-06-27 09:06:28 -07001375 /* disallow open during test */
Auke Kok1314bbf2006-09-27 12:54:02 -07001376 if (test_bit(__E1000_TESTING, &adapter->flags))
Auke Kok2db10a02006-06-27 09:06:28 -07001377 return -EBUSY;
1378
Jesse Brandeburgeb62efd2009-04-17 20:44:36 +00001379 netif_carrier_off(netdev);
1380
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 /* allocate transmit descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001382 err = e1000_setup_all_tx_resources(adapter);
1383 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 goto err_setup_tx;
1385
1386 /* allocate receive descriptors */
Auke Koke0aac5a2007-03-06 08:57:21 -08001387 err = e1000_setup_all_rx_resources(adapter);
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001388 if (err)
Auke Koke0aac5a2007-03-06 08:57:21 -08001389 goto err_setup_rx;
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001390
Auke Kok79f05bf2006-06-27 09:06:32 -07001391 e1000_power_up_phy(adapter);
1392
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001393 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
Joe Perches1dc32912008-07-11 15:17:08 -07001394 if ((hw->mng_cookie.status &
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001395 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1396 e1000_update_mng_vlan(adapter);
1397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Auke Koke0aac5a2007-03-06 08:57:21 -08001399 /* before we allocate an interrupt, we must be ready to handle it.
1400 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1401 * as soon as we call pci_request_irq, so we have to setup our
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001402 * clean_rx handler before we do so.
1403 */
Auke Koke0aac5a2007-03-06 08:57:21 -08001404 e1000_configure(adapter);
1405
1406 err = e1000_request_irq(adapter);
1407 if (err)
1408 goto err_req_irq;
1409
1410 /* From here on the code is the same as e1000_up() */
1411 clear_bit(__E1000_DOWN, &adapter->flags);
1412
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001413 napi_enable(&adapter->napi);
Herbert Xu47313052007-05-29 15:07:31 -07001414
Auke Koke0aac5a2007-03-06 08:57:21 -08001415 e1000_irq_enable(adapter);
1416
Ben Hutchings076152d2008-07-18 17:50:57 -07001417 netif_start_queue(netdev);
1418
Auke Koke0aac5a2007-03-06 08:57:21 -08001419 /* fire a link status change interrupt to start the watchdog */
Joe Perches1dc32912008-07-11 15:17:08 -07001420 ew32(ICS, E1000_ICS_LSC);
Auke Koke0aac5a2007-03-06 08:57:21 -08001421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 return E1000_SUCCESS;
1423
Linus Torvaldsb5bf28c2007-02-21 11:21:44 -08001424err_req_irq:
Auke Koke0aac5a2007-03-06 08:57:21 -08001425 e1000_power_down_phy(adapter);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001426 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427err_setup_rx:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001428 e1000_free_all_tx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429err_setup_tx:
1430 e1000_reset(adapter);
1431
1432 return err;
1433}
1434
1435/**
1436 * e1000_close - Disables a network interface
1437 * @netdev: network interface device structure
1438 *
1439 * Returns 0, this is not allowed to fail
1440 *
1441 * The close entry point is called when an interface is de-activated
1442 * by the OS. The hardware is still under the drivers control, but
1443 * needs to be disabled. A global MAC reset is issued to stop the
1444 * hardware, and all transmit and receive resources are freed.
1445 **/
Stefan Assmann1f2f83f2016-02-03 09:20:51 +01001446int e1000_close(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447{
Malli Chilakala60490fe2005-06-17 17:41:45 -07001448 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07001449 struct e1000_hw *hw = &adapter->hw;
yzhu16a7d64e2013-11-23 07:07:40 +00001450 int count = E1000_CHECK_RESET_COUNT;
1451
1452 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1453 usleep_range(10000, 20000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Auke Kok2db10a02006-06-27 09:06:28 -07001455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 e1000_down(adapter);
Auke Kok79f05bf2006-06-27 09:06:32 -07001457 e1000_power_down_phy(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07001458 e1000_free_irq(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001460 e1000_free_all_tx_resources(adapter);
1461 e1000_free_all_rx_resources(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Bruce Allan46665602006-09-27 12:54:08 -07001463 /* kill manageability vlan ID if supported, but not if a vlan with
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001464 * the same ID is registered on the host OS (let 8021q kill it)
1465 */
Joe Perches1dc32912008-07-11 15:17:08 -07001466 if ((hw->mng_cookie.status &
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001467 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1468 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
Patrick McHardy80d5c362013-04-19 02:04:28 +00001469 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1470 adapter->mng_vlan_id);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001471 }
Jeff Kirsherb55ccb32006-01-12 16:50:30 -08001472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return 0;
1474}
1475
1476/**
1477 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1478 * @adapter: address of board private structure
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001479 * @start: address of beginning of memory
1480 * @len: length of memory
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 **/
Joe Perches64798842008-07-11 15:17:02 -07001482static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1483 unsigned long len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
Joe Perches1dc32912008-07-11 15:17:08 -07001485 struct e1000_hw *hw = &adapter->hw;
Joe Perchese982f172008-07-11 15:17:18 -07001486 unsigned long begin = (unsigned long)start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 unsigned long end = begin + len;
1488
Malli Chilakala26483452005-04-28 19:44:46 -07001489 /* First rev 82545 and 82546 need to not allow any memory
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001490 * write location to cross 64k boundary due to errata 23
1491 */
Joe Perches1dc32912008-07-11 15:17:08 -07001492 if (hw->mac_type == e1000_82545 ||
Dirk Brandewie5377a412011-01-06 14:29:54 +00001493 hw->mac_type == e1000_ce4100 ||
Joe Perches1dc32912008-07-11 15:17:08 -07001494 hw->mac_type == e1000_82546) {
Joe Perchesc3033b02008-03-21 11:06:25 -07001495 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 }
1497
Joe Perchesc3033b02008-03-21 11:06:25 -07001498 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499}
1500
1501/**
1502 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1503 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001504 * @txdr: tx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 *
1506 * Return 0 on success, negative on failure
1507 **/
Joe Perches64798842008-07-11 15:17:02 -07001508static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1509 struct e1000_tx_ring *txdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 struct pci_dev *pdev = adapter->pdev;
1512 int size;
1513
Florian Westphal580f3212014-09-03 13:34:31 +00001514 size = sizeof(struct e1000_tx_buffer) * txdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001515 txdr->buffer_info = vzalloc(size);
Joe Perches14f8dc42013-02-07 11:46:27 +00001516 if (!txdr->buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
1519 /* round up to nearest 4K */
1520
1521 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001522 txdr->size = ALIGN(txdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001524 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1525 GFP_KERNEL);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001526 if (!txdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527setup_tx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 vfree(txdr->buffer_info);
1529 return -ENOMEM;
1530 }
1531
Malli Chilakala26483452005-04-28 19:44:46 -07001532 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1534 void *olddesc = txdr->desc;
1535 dma_addr_t olddma = txdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001536 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001537 txdr->size, txdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001538 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001539 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1540 &txdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001541 /* Failed allocation, critical failure */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08001542 if (!txdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001543 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1544 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 goto setup_tx_desc_die;
1546 }
1547
1548 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1549 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001550 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1551 txdr->dma);
1552 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1553 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001554 e_err(probe, "Unable to allocate aligned memory "
Emil Tantilov675ad472010-04-27 14:02:58 +00001555 "for the transmit descriptor ring\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 vfree(txdr->buffer_info);
1557 return -ENOMEM;
1558 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001559 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001560 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1561 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 }
1563 }
1564 memset(txdr->desc, 0, txdr->size);
1565
1566 txdr->next_to_use = 0;
1567 txdr->next_to_clean = 0;
1568
1569 return 0;
1570}
1571
1572/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001573 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1574 * (Descriptors) for all queues
1575 * @adapter: board private structure
1576 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001577 * Return 0 on success, negative on failure
1578 **/
Joe Perches64798842008-07-11 15:17:02 -07001579int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001580{
1581 int i, err = 0;
1582
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001583 for (i = 0; i < adapter->num_tx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001584 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1585 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001586 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001587 for (i-- ; i >= 0; i--)
1588 e1000_free_tx_resources(adapter,
1589 &adapter->tx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001590 break;
1591 }
1592 }
1593
1594 return err;
1595}
1596
1597/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1599 * @adapter: board private structure
1600 *
1601 * Configure the Tx unit of the MAC after a reset.
1602 **/
Joe Perches64798842008-07-11 15:17:02 -07001603static void e1000_configure_tx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
Joe Perches406874a2008-04-03 10:06:32 -07001605 u64 tdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001606 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001607 u32 tdlen, tctl, tipg;
Joe Perches406874a2008-04-03 10:06:32 -07001608 u32 ipgr1, ipgr2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610 /* Setup the HW Tx Head and Tail descriptor pointers */
1611
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001612 switch (adapter->num_tx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001613 case 1:
1614 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001615 tdba = adapter->tx_ring[0].dma;
1616 tdlen = adapter->tx_ring[0].count *
1617 sizeof(struct e1000_tx_desc);
Joe Perches1dc32912008-07-11 15:17:08 -07001618 ew32(TDLEN, tdlen);
1619 ew32(TDBAH, (tdba >> 32));
1620 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1621 ew32(TDT, 0);
1622 ew32(TDH, 0);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001623 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1624 E1000_TDH : E1000_82542_TDH);
1625 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1626 E1000_TDT : E1000_82542_TDT);
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001627 break;
1628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629
1630 /* Set the default values for the Tx Inter Packet Gap timer */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001631 if ((hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburgd89b6c62006-12-15 10:38:32 +01001632 hw->media_type == e1000_media_type_internal_serdes))
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001633 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1634 else
1635 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1636
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001637 switch (hw->mac_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 case e1000_82542_rev2_0:
1639 case e1000_82542_rev2_1:
1640 tipg = DEFAULT_82542_TIPG_IPGT;
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001641 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1642 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 break;
1644 default:
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001645 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1646 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1647 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 }
Jeff Kirsher0fadb052006-01-12 16:51:05 -08001649 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1650 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
Joe Perches1dc32912008-07-11 15:17:08 -07001651 ew32(TIPG, tipg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652
1653 /* Set the Tx Interrupt Delay register */
1654
Joe Perches1dc32912008-07-11 15:17:08 -07001655 ew32(TIDV, adapter->tx_int_delay);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001656 if (hw->mac_type >= e1000_82540)
Joe Perches1dc32912008-07-11 15:17:08 -07001657 ew32(TADV, adapter->tx_abs_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
1659 /* Program the Transmit Control Register */
1660
Joe Perches1dc32912008-07-11 15:17:08 -07001661 tctl = er32(TCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662 tctl &= ~E1000_TCTL_CT;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001663 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1665
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001666 e1000_config_collision_dist(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667
1668 /* Setup Transmit Descriptor Settings for eop descriptor */
Jesse Brandeburg6a042da2006-11-01 08:48:04 -08001669 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1670
1671 /* only set IDE if we are delaying interrupts using the timers */
1672 if (adapter->tx_int_delay)
1673 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001675 if (hw->mac_type < e1000_82543)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1677 else
1678 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1679
1680 /* Cache if we're 82544 running in PCI-X because we'll
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001681 * need this to apply a workaround later in the send path.
1682 */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001683 if (hw->mac_type == e1000_82544 &&
1684 hw->bus_type == e1000_bus_type_pcix)
Rusty Russell3db1cd52011-12-19 13:56:45 +00001685 adapter->pcix_82544 = true;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001686
Joe Perches1dc32912008-07-11 15:17:08 -07001687 ew32(TCTL, tctl);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08001688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689}
1690
1691/**
1692 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1693 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001694 * @rxdr: rx descriptor ring (for a specific queue) to setup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 *
1696 * Returns 0 on success, negative on failure
1697 **/
Joe Perches64798842008-07-11 15:17:02 -07001698static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1699 struct e1000_rx_ring *rxdr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 struct pci_dev *pdev = adapter->pdev;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001702 int size, desc_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Florian Westphal93f0afe2014-09-03 13:34:26 +00001704 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
Eric Dumazet89bf67f2010-11-22 00:15:06 +00001705 rxdr->buffer_info = vzalloc(size);
Joe Perches14f8dc42013-02-07 11:46:27 +00001706 if (!rxdr->buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001709 desc_len = sizeof(struct e1000_rx_desc);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 /* Round up to nearest 4K */
1712
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001713 rxdr->size = rxdr->count * desc_len;
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07001714 rxdr->size = ALIGN(rxdr->size, 4096);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001716 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1717 GFP_KERNEL);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001718 if (!rxdr->desc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719setup_rx_desc_die:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 vfree(rxdr->buffer_info);
1721 return -ENOMEM;
1722 }
1723
Malli Chilakala26483452005-04-28 19:44:46 -07001724 /* Fix for errata 23, can't cross 64kB boundary */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1726 void *olddesc = rxdr->desc;
1727 dma_addr_t olddma = rxdr->dma;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001728 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
Emil Tantilov675ad472010-04-27 14:02:58 +00001729 rxdr->size, rxdr->desc);
Malli Chilakala26483452005-04-28 19:44:46 -07001730 /* Try again, without freeing the previous */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001731 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1732 &rxdr->dma, GFP_KERNEL);
Malli Chilakala26483452005-04-28 19:44:46 -07001733 /* Failed allocation, critical failure */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001734 if (!rxdr->desc) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001735 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1736 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 goto setup_rx_desc_die;
1738 }
1739
1740 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1741 /* give up */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001742 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1743 rxdr->dma);
1744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745 olddma);
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001746 e_err(probe, "Unable to allocate aligned memory for "
1747 "the Rx descriptor ring\n");
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001748 goto setup_rx_desc_die;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 } else {
Malli Chilakala26483452005-04-28 19:44:46 -07001750 /* Free old allocation, new allocation was successful */
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001751 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1752 olddma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 }
1754 }
1755 memset(rxdr->desc, 0, rxdr->size);
1756
1757 rxdr->next_to_clean = 0;
1758 rxdr->next_to_use = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001759 rxdr->rx_skb_top = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 return 0;
1762}
1763
1764/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001765 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1766 * (Descriptors) for all queues
1767 * @adapter: board private structure
1768 *
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001769 * Return 0 on success, negative on failure
1770 **/
Joe Perches64798842008-07-11 15:17:02 -07001771int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001772{
1773 int i, err = 0;
1774
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001775 for (i = 0; i < adapter->num_rx_queues; i++) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001776 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1777 if (err) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07001778 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
Vasily Averin3fbbc722006-08-28 14:56:24 -07001779 for (i-- ; i >= 0; i--)
1780 e1000_free_rx_resources(adapter,
1781 &adapter->rx_ring[i]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001782 break;
1783 }
1784 }
1785
1786 return err;
1787}
1788
1789/**
Malli Chilakala26483452005-04-28 19:44:46 -07001790 * e1000_setup_rctl - configure the receive control registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 * @adapter: Board private structure
1792 **/
Joe Perches64798842008-07-11 15:17:02 -07001793static void e1000_setup_rctl(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
Joe Perches1dc32912008-07-11 15:17:08 -07001795 struct e1000_hw *hw = &adapter->hw;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001796 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
Joe Perches1dc32912008-07-11 15:17:08 -07001798 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1801
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001802 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1803 E1000_RCTL_RDMTS_HALF |
Joe Perches1dc32912008-07-11 15:17:08 -07001804 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
Joe Perches1dc32912008-07-11 15:17:08 -07001806 if (hw->tbi_compatibility_on == 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 rctl |= E1000_RCTL_SBP;
1808 else
1809 rctl &= ~E1000_RCTL_SBP;
1810
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001811 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1812 rctl &= ~E1000_RCTL_LPE;
1813 else
1814 rctl |= E1000_RCTL_LPE;
1815
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 /* Setup buffer sizes */
Auke Kok9e2feac2006-04-14 19:05:18 -07001817 rctl &= ~E1000_RCTL_SZ_4096;
1818 rctl |= E1000_RCTL_BSEX;
1819 switch (adapter->rx_buffer_len) {
Janusz Wolaka48954c2015-09-17 23:34:29 +02001820 case E1000_RXBUFFER_2048:
1821 default:
1822 rctl |= E1000_RCTL_SZ_2048;
1823 rctl &= ~E1000_RCTL_BSEX;
1824 break;
1825 case E1000_RXBUFFER_4096:
1826 rctl |= E1000_RCTL_SZ_4096;
1827 break;
1828 case E1000_RXBUFFER_8192:
1829 rctl |= E1000_RCTL_SZ_8192;
1830 break;
1831 case E1000_RXBUFFER_16384:
1832 rctl |= E1000_RCTL_SZ_16384;
1833 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001834 }
1835
Ben Greeare825b732012-04-04 06:01:29 +00001836 /* This is useful for sniffing bad packets. */
1837 if (adapter->netdev->features & NETIF_F_RXALL) {
1838 /* UPE and MPE will be handled by normal PROMISC logic
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001839 * in e1000e_set_rx_mode
1840 */
Ben Greeare825b732012-04-04 06:01:29 +00001841 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1842 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1843 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1844
1845 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1846 E1000_RCTL_DPF | /* Allow filtered pause */
1847 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1848 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1849 * and that breaks VLANs.
1850 */
1851 }
1852
Joe Perches1dc32912008-07-11 15:17:08 -07001853 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
1856/**
1857 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1858 * @adapter: board private structure
1859 *
1860 * Configure the Rx unit of the MAC after a reset.
1861 **/
Joe Perches64798842008-07-11 15:17:02 -07001862static void e1000_configure_rx(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863{
Joe Perches406874a2008-04-03 10:06:32 -07001864 u64 rdba;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001865 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00001866 u32 rdlen, rctl, rxcsum;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001867
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001868 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1869 rdlen = adapter->rx_ring[0].count *
Janusz Wolaka48954c2015-09-17 23:34:29 +02001870 sizeof(struct e1000_rx_desc);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001871 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1872 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1873 } else {
1874 rdlen = adapter->rx_ring[0].count *
Janusz Wolaka48954c2015-09-17 23:34:29 +02001875 sizeof(struct e1000_rx_desc);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00001876 adapter->clean_rx = e1000_clean_rx_irq;
1877 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
1880 /* disable receives while setting up the descriptors */
Joe Perches1dc32912008-07-11 15:17:08 -07001881 rctl = er32(RCTL);
1882 ew32(RCTL, rctl & ~E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883
1884 /* set the Receive Delay Timer Register */
Joe Perches1dc32912008-07-11 15:17:08 -07001885 ew32(RDTR, adapter->rx_int_delay);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001887 if (hw->mac_type >= e1000_82540) {
Joe Perches1dc32912008-07-11 15:17:08 -07001888 ew32(RADV, adapter->rx_abs_int_delay);
Jesse Brandeburg835bb122006-11-01 08:48:13 -08001889 if (adapter->itr_setting != 0)
Joe Perches1dc32912008-07-11 15:17:08 -07001890 ew32(ITR, 1000000000 / (adapter->itr * 256));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 }
1892
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001893 /* Setup the HW Rx Head and Tail Descriptor Pointers and
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001894 * the Base and Length of the Rx Descriptor Ring
1895 */
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001896 switch (adapter->num_rx_queues) {
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001897 case 1:
1898 default:
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001899 rdba = adapter->rx_ring[0].dma;
Joe Perches1dc32912008-07-11 15:17:08 -07001900 ew32(RDLEN, rdlen);
1901 ew32(RDBAH, (rdba >> 32));
1902 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1903 ew32(RDT, 0);
1904 ew32(RDH, 0);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00001905 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1906 E1000_RDH : E1000_82542_RDH);
1907 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1908 E1000_RDT : E1000_82542_RDT);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001909 break;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04001910 }
1911
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001913 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07001914 rxcsum = er32(RXCSUM);
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001915 if (adapter->rx_csum)
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001916 rxcsum |= E1000_RXCSUM_TUOFL;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001917 else
Malli Chilakala2d7edb92005-04-28 19:43:52 -07001918 /* don't need to clear IPPCSE as it defaults to 0 */
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07001919 rxcsum &= ~E1000_RXCSUM_TUOFL;
Joe Perches1dc32912008-07-11 15:17:08 -07001920 ew32(RXCSUM, rxcsum);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 }
1922
1923 /* Enable Receives */
Dean Nelsond5bc77a2011-09-16 16:52:54 +00001924 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925}
1926
1927/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001928 * e1000_free_tx_resources - Free Tx Resources per Queue
1929 * @adapter: board private structure
1930 * @tx_ring: Tx descriptor ring for a specific queue
1931 *
1932 * Free all transmit software resources
1933 **/
Joe Perches64798842008-07-11 15:17:02 -07001934static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1935 struct e1000_tx_ring *tx_ring)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001936{
1937 struct pci_dev *pdev = adapter->pdev;
1938
1939 e1000_clean_tx_ring(adapter, tx_ring);
1940
1941 vfree(tx_ring->buffer_info);
1942 tx_ring->buffer_info = NULL;
1943
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001944 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1945 tx_ring->dma);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001946
1947 tx_ring->desc = NULL;
1948}
1949
1950/**
1951 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 * @adapter: board private structure
1953 *
1954 * Free all transmit software resources
1955 **/
Joe Perches64798842008-07-11 15:17:02 -07001956void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957{
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001958 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
Jeff Kirsherf56799e2006-01-12 16:50:39 -08001960 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001961 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
Florian Westphal580f3212014-09-03 13:34:31 +00001964static void
1965e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1966 struct e1000_tx_buffer *buffer_info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967{
Alexander Duyck602c0552009-12-02 16:46:00 +00001968 if (buffer_info->dma) {
1969 if (buffer_info->mapped_as_page)
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001970 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1971 buffer_info->length, DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001972 else
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001973 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
Alexander Duyck602c0552009-12-02 16:46:00 +00001974 buffer_info->length,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00001975 DMA_TO_DEVICE);
Alexander Duyck602c0552009-12-02 16:46:00 +00001976 buffer_info->dma = 0;
1977 }
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001978 if (buffer_info->skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 dev_kfree_skb_any(buffer_info->skb);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001980 buffer_info->skb = NULL;
1981 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00001982 buffer_info->time_stamp = 0;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08001983 /* buffer_info must be completely set up in the transmit path */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984}
1985
1986/**
1987 * e1000_clean_tx_ring - Free Tx Buffers
1988 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04001989 * @tx_ring: ring to be cleaned
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 **/
Joe Perches64798842008-07-11 15:17:02 -07001991static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1992 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993{
Joe Perches1dc32912008-07-11 15:17:08 -07001994 struct e1000_hw *hw = &adapter->hw;
Florian Westphal580f3212014-09-03 13:34:31 +00001995 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 unsigned long size;
1997 unsigned int i;
1998
1999 /* Free all the Tx ring sk_buffs */
2000
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002001 for (i = 0; i < tx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 buffer_info = &tx_ring->buffer_info[i];
2003 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2004 }
2005
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00002006 netdev_reset_queue(adapter->netdev);
Florian Westphal580f3212014-09-03 13:34:31 +00002007 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 memset(tx_ring->buffer_info, 0, size);
2009
2010 /* Zero out the descriptor ring */
2011
2012 memset(tx_ring->desc, 0, tx_ring->size);
2013
2014 tx_ring->next_to_use = 0;
2015 tx_ring->next_to_clean = 0;
Rusty Russell3db1cd52011-12-19 13:56:45 +00002016 tx_ring->last_tx_tso = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Joe Perches1dc32912008-07-11 15:17:08 -07002018 writel(0, hw->hw_addr + tx_ring->tdh);
2019 writel(0, hw->hw_addr + tx_ring->tdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002020}
2021
2022/**
2023 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2024 * @adapter: board private structure
2025 **/
Joe Perches64798842008-07-11 15:17:02 -07002026static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002027{
2028 int i;
2029
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002030 for (i = 0; i < adapter->num_tx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002031 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032}
2033
2034/**
2035 * e1000_free_rx_resources - Free Rx Resources
2036 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002037 * @rx_ring: ring to clean the resources from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 *
2039 * Free all receive software resources
2040 **/
Joe Perches64798842008-07-11 15:17:02 -07002041static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2042 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 struct pci_dev *pdev = adapter->pdev;
2045
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002046 e1000_clean_rx_ring(adapter, rx_ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
2048 vfree(rx_ring->buffer_info);
2049 rx_ring->buffer_info = NULL;
2050
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002051 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2052 rx_ring->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
2054 rx_ring->desc = NULL;
2055}
2056
2057/**
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002058 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 * @adapter: board private structure
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002060 *
2061 * Free all receive software resources
2062 **/
Joe Perches64798842008-07-11 15:17:02 -07002063void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002064{
2065 int i;
2066
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002067 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002068 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2069}
2070
Florian Westphal13809602014-09-03 13:34:36 +00002071#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2072static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2073{
2074 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2075 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2076}
2077
2078static void *e1000_alloc_frag(const struct e1000_adapter *a)
2079{
2080 unsigned int len = e1000_frag_len(a);
2081 u8 *data = netdev_alloc_frag(len);
2082
2083 if (likely(data))
2084 data += E1000_HEADROOM;
2085 return data;
2086}
2087
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002088/**
2089 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2090 * @adapter: board private structure
2091 * @rx_ring: ring to free buffers from
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 **/
Joe Perches64798842008-07-11 15:17:02 -07002093static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2094 struct e1000_rx_ring *rx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095{
Joe Perches1dc32912008-07-11 15:17:08 -07002096 struct e1000_hw *hw = &adapter->hw;
Florian Westphal93f0afe2014-09-03 13:34:26 +00002097 struct e1000_rx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 struct pci_dev *pdev = adapter->pdev;
2099 unsigned long size;
Brandeburg, Jesse630b25c2008-09-16 13:01:28 -07002100 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
Florian Westphal13809602014-09-03 13:34:36 +00002102 /* Free all the Rx netfrags */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002103 for (i = 0; i < rx_ring->count; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 buffer_info = &rx_ring->buffer_info[i];
Florian Westphal13809602014-09-03 13:34:36 +00002105 if (adapter->clean_rx == e1000_clean_rx_irq) {
2106 if (buffer_info->dma)
2107 dma_unmap_single(&pdev->dev, buffer_info->dma,
2108 adapter->rx_buffer_len,
2109 DMA_FROM_DEVICE);
2110 if (buffer_info->rxbuf.data) {
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07002111 skb_free_frag(buffer_info->rxbuf.data);
Florian Westphal13809602014-09-03 13:34:36 +00002112 buffer_info->rxbuf.data = NULL;
2113 }
2114 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2115 if (buffer_info->dma)
2116 dma_unmap_page(&pdev->dev, buffer_info->dma,
2117 adapter->rx_buffer_len,
2118 DMA_FROM_DEVICE);
2119 if (buffer_info->rxbuf.page) {
2120 put_page(buffer_info->rxbuf.page);
2121 buffer_info->rxbuf.page = NULL;
2122 }
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002123 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00002125 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 }
2127
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002128 /* there also may be some cached data from a chained receive */
Florian Westphalde591c72014-09-03 13:34:42 +00002129 napi_free_frags(&adapter->napi);
2130 rx_ring->rx_skb_top = NULL;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00002131
Florian Westphal93f0afe2014-09-03 13:34:26 +00002132 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 memset(rx_ring->buffer_info, 0, size);
2134
2135 /* Zero out the descriptor ring */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 memset(rx_ring->desc, 0, rx_ring->size);
2137
2138 rx_ring->next_to_clean = 0;
2139 rx_ring->next_to_use = 0;
2140
Joe Perches1dc32912008-07-11 15:17:08 -07002141 writel(0, hw->hw_addr + rx_ring->rdh);
2142 writel(0, hw->hw_addr + rx_ring->rdt);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002143}
2144
2145/**
2146 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2147 * @adapter: board private structure
2148 **/
Joe Perches64798842008-07-11 15:17:02 -07002149static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002150{
2151 int i;
2152
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002153 for (i = 0; i < adapter->num_rx_queues; i++)
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002154 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155}
2156
2157/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2158 * and memory write and invalidate disabled for certain operations
2159 */
Joe Perches64798842008-07-11 15:17:02 -07002160static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161{
Joe Perches1dc32912008-07-11 15:17:08 -07002162 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002164 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
Joe Perches1dc32912008-07-11 15:17:08 -07002166 e1000_pci_clear_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Joe Perches1dc32912008-07-11 15:17:08 -07002168 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 rctl |= E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002170 ew32(RCTL, rctl);
2171 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 mdelay(5);
2173
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002174 if (netif_running(netdev))
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002175 e1000_clean_all_rx_rings(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176}
2177
Joe Perches64798842008-07-11 15:17:02 -07002178static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179{
Joe Perches1dc32912008-07-11 15:17:08 -07002180 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002182 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Joe Perches1dc32912008-07-11 15:17:08 -07002184 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 rctl &= ~E1000_RCTL_RST;
Joe Perches1dc32912008-07-11 15:17:08 -07002186 ew32(RCTL, rctl);
2187 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 mdelay(5);
2189
Joe Perches1dc32912008-07-11 15:17:08 -07002190 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2191 e1000_pci_set_mwi(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002193 if (netif_running(netdev)) {
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002194 /* No need to loop, because 82542 supports only 1 queue */
2195 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
Jesse Brandeburg7c4d3362006-01-18 13:01:45 -08002196 e1000_configure_rx(adapter);
Jeff Kirsher72d64a42006-01-12 16:51:19 -08002197 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 }
2199}
2200
2201/**
2202 * e1000_set_mac - Change the Ethernet Address of the NIC
2203 * @netdev: network interface device structure
2204 * @p: pointer to an address structure
2205 *
2206 * Returns 0 on success, negative on failure
2207 **/
Joe Perches64798842008-07-11 15:17:02 -07002208static int e1000_set_mac(struct net_device *netdev, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002210 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07002211 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 struct sockaddr *addr = p;
2213
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002214 if (!is_valid_ether_addr(addr->sa_data))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 return -EADDRNOTAVAIL;
2216
2217 /* 82542 2.0 needs to be in reset to write receive address registers */
2218
Joe Perches1dc32912008-07-11 15:17:08 -07002219 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 e1000_enter_82542_rst(adapter);
2221
2222 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Joe Perches1dc32912008-07-11 15:17:08 -07002223 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Joe Perches1dc32912008-07-11 15:17:08 -07002225 e1000_rar_set(hw, hw->mac_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
Joe Perches1dc32912008-07-11 15:17:08 -07002227 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 e1000_leave_82542_rst(adapter);
2229
2230 return 0;
2231}
2232
2233/**
Patrick McHardydb0ce502007-11-13 20:54:59 -08002234 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 * @netdev: network interface device structure
2236 *
Patrick McHardydb0ce502007-11-13 20:54:59 -08002237 * The set_rx_mode entry point is called whenever the unicast or multicast
2238 * address lists or the network interface flags are updated. This routine is
2239 * responsible for configuring the hardware for proper unicast, multicast,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 * promiscuous mode, and all-multi behavior.
2241 **/
Joe Perches64798842008-07-11 15:17:02 -07002242static void e1000_set_rx_mode(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243{
Malli Chilakala60490fe2005-06-17 17:41:45 -07002244 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 struct e1000_hw *hw = &adapter->hw;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002246 struct netdev_hw_addr *ha;
2247 bool use_uc = false;
Joe Perches406874a2008-04-03 10:06:32 -07002248 u32 rctl;
2249 u32 hash_value;
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04002250 int i, rar_entries = E1000_RAR_ENTRIES;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002251 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002252 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2253
Joe Perches14f8dc42013-02-07 11:46:27 +00002254 if (!mcarray)
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002255 return;
Auke Kokcd94dd02006-06-27 09:08:22 -07002256
Malli Chilakala26483452005-04-28 19:44:46 -07002257 /* Check for Promiscuous and All Multicast modes */
2258
Joe Perches1dc32912008-07-11 15:17:08 -07002259 rctl = er32(RCTL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002261 if (netdev->flags & IFF_PROMISC) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
Patrick McHardy746b9f02008-07-16 20:15:45 -07002263 rctl &= ~E1000_RCTL_VFE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002265 if (netdev->flags & IFF_ALLMULTI)
Patrick McHardy746b9f02008-07-16 20:15:45 -07002266 rctl |= E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002267 else
Patrick McHardy746b9f02008-07-16 20:15:45 -07002268 rctl &= ~E1000_RCTL_MPE;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002269 /* Enable VLAN filter if there is a VLAN */
Jiri Pirko5622e402011-07-21 03:26:31 +00002270 if (e1000_vlan_used(adapter))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002271 rctl |= E1000_RCTL_VFE;
Patrick McHardydb0ce502007-11-13 20:54:59 -08002272 }
2273
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002274 if (netdev_uc_count(netdev) > rar_entries - 1) {
Patrick McHardydb0ce502007-11-13 20:54:59 -08002275 rctl |= E1000_RCTL_UPE;
2276 } else if (!(netdev->flags & IFF_PROMISC)) {
2277 rctl &= ~E1000_RCTL_UPE;
Jiri Pirkoccffad252009-05-22 23:22:17 +00002278 use_uc = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 }
2280
Joe Perches1dc32912008-07-11 15:17:08 -07002281 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 /* 82542 2.0 needs to be in reset to write receive address registers */
2284
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002285 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 e1000_enter_82542_rst(adapter);
2287
Patrick McHardydb0ce502007-11-13 20:54:59 -08002288 /* load the first 14 addresses into the exact filters 1-14. Unicast
2289 * addresses take precedence to avoid disabling unicast filtering
2290 * when possible.
2291 *
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04002292 * RAR 0 is used for the station MAC address
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 * if there are not 14 addresses, go ahead and clear the filters
2294 */
Jiri Pirkoccffad252009-05-22 23:22:17 +00002295 i = 1;
2296 if (use_uc)
Jiri Pirko32e7bfc2010-01-25 13:36:10 -08002297 netdev_for_each_uc_addr(ha, netdev) {
Jiri Pirkoccffad252009-05-22 23:22:17 +00002298 if (i == rar_entries)
2299 break;
2300 e1000_rar_set(hw, ha->addr, i++);
2301 }
2302
Jiri Pirko22bedad32010-04-01 21:22:57 +00002303 netdev_for_each_mc_addr(ha, netdev) {
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002304 if (i == rar_entries) {
2305 /* load any remaining addresses into the hash table */
2306 u32 hash_reg, hash_bit, mta;
Jiri Pirko22bedad32010-04-01 21:22:57 +00002307 hash_value = e1000_hash_mc_addr(hw, ha->addr);
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002308 hash_reg = (hash_value >> 5) & 0x7F;
2309 hash_bit = hash_value & 0x1F;
2310 mta = (1 << hash_bit);
2311 mcarray[hash_reg] |= mta;
Jiri Pirko10886af2010-02-23 01:19:22 -08002312 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00002313 e1000_rar_set(hw, ha->addr, i++);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 }
2315 }
2316
Jiri Pirko7a81e9f2010-02-22 09:10:44 +00002317 for (; i < rar_entries; i++) {
2318 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2319 E1000_WRITE_FLUSH();
2320 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2321 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 }
2323
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002324 /* write the hash table completely, write from bottom to avoid
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002325 * both stupid write combining chipsets, and flushing each write
2326 */
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002327 for (i = mta_reg_count - 1; i >= 0 ; i--) {
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002328 /* If we are on an 82544 has an errata where writing odd
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002329 * offsets overwrites the previous even offset, but writing
2330 * backwards over the range solves the issue by always
2331 * writing the odd offset first
2332 */
2333 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2334 }
2335 E1000_WRITE_FLUSH();
2336
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002337 if (hw->mac_type == e1000_82542_rev2_0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 e1000_leave_82542_rst(adapter);
Jesse Brandeburg81c52282009-04-04 16:36:53 -07002339
2340 kfree(mcarray);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341}
2342
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002343/**
2344 * e1000_update_phy_info_task - get phy info
2345 * @work: work struct contained inside adapter struct
2346 *
2347 * Need to wait a few seconds after link up to get diagnostic information from
2348 * the phy
2349 */
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002350static void e1000_update_phy_info_task(struct work_struct *work)
2351{
2352 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002353 struct e1000_adapter,
2354 phy_info_task.work);
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00002355
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002356 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357}
2358
2359/**
Jesse Brandeburg5cf42fc2010-09-22 18:22:17 +00002360 * e1000_82547_tx_fifo_stall_task - task to complete work
2361 * @work: work struct contained inside adapter struct
2362 **/
2363static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2364{
2365 struct e1000_adapter *adapter = container_of(work,
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002366 struct e1000_adapter,
2367 fifo_stall_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002368 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 struct net_device *netdev = adapter->netdev;
Joe Perches406874a2008-04-03 10:06:32 -07002370 u32 tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002372 if (atomic_read(&adapter->tx_fifo_stall)) {
Joe Perches1dc32912008-07-11 15:17:08 -07002373 if ((er32(TDT) == er32(TDH)) &&
2374 (er32(TDFT) == er32(TDFH)) &&
2375 (er32(TDFTS) == er32(TDFHS))) {
2376 tctl = er32(TCTL);
2377 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2378 ew32(TDFT, adapter->tx_head_addr);
2379 ew32(TDFH, adapter->tx_head_addr);
2380 ew32(TDFTS, adapter->tx_head_addr);
2381 ew32(TDFHS, adapter->tx_head_addr);
2382 ew32(TCTL, tctl);
2383 E1000_WRITE_FLUSH();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
2385 adapter->tx_fifo_head = 0;
2386 atomic_set(&adapter->tx_fifo_stall, 0);
2387 netif_wake_queue(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002388 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002389 schedule_delayed_work(&adapter->fifo_stall_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 }
2391 }
2392}
2393
Nick Nunleyb5481922010-02-03 14:49:28 +00002394bool e1000_has_link(struct e1000_adapter *adapter)
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002395{
2396 struct e1000_hw *hw = &adapter->hw;
2397 bool link_active = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002398
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002399 /* get_link_status is set on LSC (link status) interrupt or rx
2400 * sequence error interrupt (except on intel ce4100).
2401 * get_link_status will stay false until the
2402 * e1000_check_for_link establishes link for copper adapters
2403 * ONLY
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002404 */
2405 switch (hw->media_type) {
2406 case e1000_media_type_copper:
Nicolas Schichan6d9e5132011-07-09 00:24:18 +00002407 if (hw->mac_type == e1000_ce4100)
2408 hw->get_link_status = 1;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002409 if (hw->get_link_status) {
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002410 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002411 link_active = !hw->get_link_status;
2412 } else {
2413 link_active = true;
2414 }
2415 break;
2416 case e1000_media_type_fiber:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002417 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002418 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2419 break;
2420 case e1000_media_type_internal_serdes:
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07002421 e1000_check_for_link(hw);
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002422 link_active = hw->serdes_has_link;
2423 break;
2424 default:
2425 break;
2426 }
2427
2428 return link_active;
2429}
2430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431/**
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002432 * e1000_watchdog - work function
2433 * @work: work struct contained inside adapter struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 **/
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002435static void e1000_watchdog(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436{
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002437 struct e1000_adapter *adapter = container_of(work,
2438 struct e1000_adapter,
2439 watchdog_task.work);
Joe Perches1dc32912008-07-11 15:17:08 -07002440 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 struct net_device *netdev = adapter->netdev;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002442 struct e1000_tx_ring *txdr = adapter->tx_ring;
Joe Perches406874a2008-04-03 10:06:32 -07002443 u32 link, tctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002445 link = e1000_has_link(adapter);
2446 if ((netif_carrier_ok(netdev)) && link)
2447 goto link_up;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002449 if (link) {
2450 if (!netif_carrier_ok(netdev)) {
Joe Perches406874a2008-04-03 10:06:32 -07002451 u32 ctrl;
Joe Perchesc3033b02008-03-21 11:06:25 -07002452 bool txb2b = true;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002453 /* update snapshot of PHY registers on LSC */
Joe Perches1dc32912008-07-11 15:17:08 -07002454 e1000_get_speed_and_duplex(hw,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002455 &adapter->link_speed,
2456 &adapter->link_duplex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Joe Perches1dc32912008-07-11 15:17:08 -07002458 ctrl = er32(CTRL);
Emil Tantilov675ad472010-04-27 14:02:58 +00002459 pr_info("%s NIC Link is Up %d Mbps %s, "
2460 "Flow Control: %s\n",
2461 netdev->name,
2462 adapter->link_speed,
2463 adapter->link_duplex == FULL_DUPLEX ?
2464 "Full Duplex" : "Half Duplex",
2465 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2466 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2467 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2468 E1000_CTRL_TFCE) ? "TX" : "None")));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
Emil Tantilov39ca5f02010-03-26 11:25:58 +00002470 /* adjust timeout factor according to speed/duplex */
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002471 adapter->tx_timeout_factor = 1;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002472 switch (adapter->link_speed) {
2473 case SPEED_10:
Joe Perchesc3033b02008-03-21 11:06:25 -07002474 txb2b = false;
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002475 adapter->tx_timeout_factor = 16;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002476 break;
2477 case SPEED_100:
Joe Perchesc3033b02008-03-21 11:06:25 -07002478 txb2b = false;
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002479 /* maybe add some timeout factor ? */
2480 break;
2481 }
2482
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00002483 /* enable transmits in the hardware */
Joe Perches1dc32912008-07-11 15:17:08 -07002484 tctl = er32(TCTL);
Jeff Kirsher7e6c9862006-03-02 18:19:30 -08002485 tctl |= E1000_TCTL_EN;
Joe Perches1dc32912008-07-11 15:17:08 -07002486 ew32(TCTL, tctl);
Jeff Kirsher66a2b0a2006-01-12 16:50:53 -08002487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 netif_carrier_on(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002489 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002490 schedule_delayed_work(&adapter->phy_info_task,
2491 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 adapter->smartspeed = 0;
2493 }
2494 } else {
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002495 if (netif_carrier_ok(netdev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 adapter->link_speed = 0;
2497 adapter->link_duplex = 0;
Emil Tantilov675ad472010-04-27 14:02:58 +00002498 pr_info("%s NIC Link is Down\n",
2499 netdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 netif_carrier_off(netdev);
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002501
2502 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002503 schedule_delayed_work(&adapter->phy_info_task,
2504 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505 }
2506
2507 e1000_smartspeed(adapter);
2508 }
2509
Jesse Brandeburgbe0f0712009-09-25 12:17:44 +00002510link_up:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 e1000_update_stats(adapter);
2512
Joe Perches1dc32912008-07-11 15:17:08 -07002513 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 adapter->tpt_old = adapter->stats.tpt;
Joe Perches1dc32912008-07-11 15:17:08 -07002515 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 adapter->colc_old = adapter->stats.colc;
2517
2518 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2519 adapter->gorcl_old = adapter->stats.gorcl;
2520 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2521 adapter->gotcl_old = adapter->stats.gotcl;
2522
Joe Perches1dc32912008-07-11 15:17:08 -07002523 e1000_update_adaptive(hw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
Jeff Kirsherf56799e2006-01-12 16:50:39 -08002525 if (!netif_carrier_ok(netdev)) {
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002526 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 /* We've lost link, so the controller stops DMA,
2528 * but we've got queued Tx work that's never going
2529 * to get done, so reset controller to flush Tx.
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002530 * (Do the reset outside of interrupt context).
2531 */
Jeff Kirsher87041632006-03-02 18:21:24 -08002532 adapter->tx_timeout_count++;
2533 schedule_work(&adapter->reset_task);
Jesse Brandeburg0ef4eed2011-10-05 07:24:51 +00002534 /* exit immediately since reset is imminent */
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00002535 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 }
2537 }
2538
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002539 /* Simple mode for Interrupt Throttle Rate (ITR) */
2540 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002541 /* Symmetric Tx/Rx gets a reduced ITR=2000;
Jesse Brandeburgeab2abf2010-05-04 22:26:03 +00002542 * Total asymmetrical Tx or Rx gets ITR=8000;
2543 * everyone else is between 2000-8000.
2544 */
2545 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2546 u32 dif = (adapter->gotcl > adapter->gorcl ?
2547 adapter->gotcl - adapter->gorcl :
2548 adapter->gorcl - adapter->gotcl) / 10000;
2549 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2550
2551 ew32(ITR, 1000000000 / (itr * 256));
2552 }
2553
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 /* Cause software interrupt to ensure rx ring is cleaned */
Joe Perches1dc32912008-07-11 15:17:08 -07002555 ew32(ICS, E1000_ICS_RXDMT0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Malli Chilakala26483452005-04-28 19:44:46 -07002557 /* Force detection of hung controller every watchdog period */
Joe Perchesc3033b02008-03-21 11:06:25 -07002558 adapter->detect_tx_hung = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002560 /* Reschedule the task */
Jesse Brandeburgbaa34742009-09-25 12:17:23 +00002561 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00002562 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563}
2564
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002565enum latency_range {
2566 lowest_latency = 0,
2567 low_latency = 1,
2568 bulk_latency = 2,
2569 latency_invalid = 255
2570};
2571
2572/**
2573 * e1000_update_itr - update the dynamic ITR value based on statistics
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00002574 * @adapter: pointer to adapter
2575 * @itr_setting: current adapter->itr
2576 * @packets: the number of packets during this measurement interval
2577 * @bytes: the number of bytes during this measurement interval
2578 *
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002579 * Stores a new ITR value based on packets and byte
2580 * counts during the last interrupt. The advantage of per interrupt
2581 * computation is faster updates and more accurate ITR for the current
2582 * traffic pattern. Constants in this function were computed
2583 * based on theoretical maximum wire speed and thresholds were set based
2584 * on testing data as well as attempting to minimize response time
2585 * while increasing bulk throughput.
2586 * this functionality is controlled by the InterruptThrottleRate module
2587 * parameter (see e1000_param.c)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002588 **/
2589static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
Joe Perches64798842008-07-11 15:17:02 -07002590 u16 itr_setting, int packets, int bytes)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002591{
2592 unsigned int retval = itr_setting;
2593 struct e1000_hw *hw = &adapter->hw;
2594
2595 if (unlikely(hw->mac_type < e1000_82540))
2596 goto update_itr_done;
2597
2598 if (packets == 0)
2599 goto update_itr_done;
2600
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002601 switch (itr_setting) {
2602 case lowest_latency:
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002603 /* jumbo frames get bulk treatment*/
2604 if (bytes/packets > 8000)
2605 retval = bulk_latency;
2606 else if ((packets < 5) && (bytes > 512))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002607 retval = low_latency;
2608 break;
2609 case low_latency: /* 50 usec aka 20000 ints/s */
2610 if (bytes > 10000) {
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002611 /* jumbo frames need bulk latency setting */
2612 if (bytes/packets > 8000)
2613 retval = bulk_latency;
2614 else if ((packets < 10) || ((bytes/packets) > 1200))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002615 retval = bulk_latency;
2616 else if ((packets > 35))
2617 retval = lowest_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002618 } else if (bytes/packets > 2000)
2619 retval = bulk_latency;
2620 else if (packets <= 2 && bytes < 512)
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002621 retval = lowest_latency;
2622 break;
2623 case bulk_latency: /* 250 usec aka 4000 ints/s */
2624 if (bytes > 25000) {
2625 if (packets > 35)
2626 retval = low_latency;
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002627 } else if (bytes < 6000) {
2628 retval = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002629 }
2630 break;
2631 }
2632
2633update_itr_done:
2634 return retval;
2635}
2636
2637static void e1000_set_itr(struct e1000_adapter *adapter)
2638{
2639 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07002640 u16 current_itr;
2641 u32 new_itr = adapter->itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002642
2643 if (unlikely(hw->mac_type < e1000_82540))
2644 return;
2645
2646 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2647 if (unlikely(adapter->link_speed != SPEED_1000)) {
2648 current_itr = 0;
2649 new_itr = 4000;
2650 goto set_itr_now;
2651 }
2652
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002653 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2654 adapter->total_tx_packets,
2655 adapter->total_tx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002656 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2657 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2658 adapter->tx_itr = low_latency;
2659
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002660 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2661 adapter->total_rx_packets,
2662 adapter->total_rx_bytes);
Jesse Brandeburg2b653262006-12-15 10:30:44 +01002663 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2664 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2665 adapter->rx_itr = low_latency;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002666
2667 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2668
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002669 switch (current_itr) {
2670 /* counts and packets in update_itr are dependent on these numbers */
2671 case lowest_latency:
2672 new_itr = 70000;
2673 break;
2674 case low_latency:
2675 new_itr = 20000; /* aka hwitr = ~200 */
2676 break;
2677 case bulk_latency:
2678 new_itr = 4000;
2679 break;
2680 default:
2681 break;
2682 }
2683
2684set_itr_now:
2685 if (new_itr != adapter->itr) {
2686 /* this attempts to bias the interrupt rate towards Bulk
2687 * by adding intermediate steps when interrupt rate is
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002688 * increasing
2689 */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002690 new_itr = new_itr > adapter->itr ?
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002691 min(adapter->itr + (new_itr >> 2), new_itr) :
2692 new_itr;
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002693 adapter->itr = new_itr;
Joe Perches1dc32912008-07-11 15:17:08 -07002694 ew32(ITR, 1000000000 / (new_itr * 256));
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002695 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08002696}
2697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698#define E1000_TX_FLAGS_CSUM 0x00000001
2699#define E1000_TX_FLAGS_VLAN 0x00000002
2700#define E1000_TX_FLAGS_TSO 0x00000004
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002701#define E1000_TX_FLAGS_IPV4 0x00000008
Ben Greear11a78dc2012-02-11 15:40:01 +00002702#define E1000_TX_FLAGS_NO_FCS 0x00000010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2704#define E1000_TX_FLAGS_VLAN_SHIFT 16
2705
Joe Perches64798842008-07-11 15:17:02 -07002706static int e1000_tso(struct e1000_adapter *adapter,
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002707 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2708 __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 struct e1000_context_desc *context_desc;
Florian Westphal580f3212014-09-03 13:34:31 +00002711 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002713 u32 cmd_length = 0;
2714 u16 ipcse = 0, tucse, mss;
2715 u8 ipcss, ipcso, tucss, tucso, hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
Herbert Xu89114af2006-07-08 13:34:32 -07002717 if (skb_is_gso(skb)) {
Francois Romieu4a54b1e2014-03-30 03:14:37 +00002718 int err;
2719
2720 err = skb_cow_head(skb, 0);
2721 if (err < 0)
2722 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07002724 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Herbert Xu79671682006-06-22 02:40:14 -07002725 mss = skb_shinfo(skb)->gso_size;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002726 if (protocol == htons(ETH_P_IP)) {
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002727 struct iphdr *iph = ip_hdr(skb);
2728 iph->tot_len = 0;
2729 iph->check = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002730 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2731 iph->daddr, 0,
2732 IPPROTO_TCP,
2733 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002734 cmd_length = E1000_TXD_CMD_IP;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002735 ipcse = skb_transport_offset(skb) - 1;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002736 } else if (skb_is_gso_v6(skb)) {
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002737 ipv6_hdr(skb)->payload_len = 0;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002738 tcp_hdr(skb)->check =
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07002739 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2740 &ipv6_hdr(skb)->daddr,
2741 0, IPPROTO_TCP, 0);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002742 ipcse = 0;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002743 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -03002744 ipcss = skb_network_offset(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002745 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
Arnaldo Carvalho de Meloea2ae172007-04-25 17:55:53 -07002746 tucss = skb_transport_offset(skb);
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002747 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 tucse = 0;
2749
2750 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002751 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002753 i = tx_ring->next_to_use;
2754 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002755 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
2757 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2758 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2759 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2760 context_desc->upper_setup.tcp_fields.tucss = tucss;
2761 context_desc->upper_setup.tcp_fields.tucso = tucso;
2762 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2763 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2764 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2765 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2766
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002767 buffer_info->time_stamp = jiffies;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002768 buffer_info->next_to_watch = i;
Jeff Kirsher545c67c2006-01-12 16:50:25 -08002769
Janusz Wolaka48954c2015-09-17 23:34:29 +02002770 if (++i == tx_ring->count)
2771 i = 0;
2772
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04002773 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Joe Perchesc3033b02008-03-21 11:06:25 -07002775 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 }
Joe Perchesc3033b02008-03-21 11:06:25 -07002777 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778}
2779
Joe Perches64798842008-07-11 15:17:02 -07002780static bool e1000_tx_csum(struct e1000_adapter *adapter,
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002781 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2782 __be16 protocol)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783{
2784 struct e1000_context_desc *context_desc;
Florian Westphal580f3212014-09-03 13:34:31 +00002785 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 unsigned int i;
Joe Perches406874a2008-04-03 10:06:32 -07002787 u8 css;
Dave Graham3ed30672008-10-09 14:29:26 -07002788 u32 cmd_len = E1000_TXD_CMD_DEXT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789
Dave Graham3ed30672008-10-09 14:29:26 -07002790 if (skb->ip_summed != CHECKSUM_PARTIAL)
2791 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
Vlad Yasevich06f4d032014-08-25 10:34:49 -04002793 switch (protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08002794 case cpu_to_be16(ETH_P_IP):
Dave Graham3ed30672008-10-09 14:29:26 -07002795 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2796 cmd_len |= E1000_TXD_CMD_TCP;
2797 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08002798 case cpu_to_be16(ETH_P_IPV6):
Dave Graham3ed30672008-10-09 14:29:26 -07002799 /* XXX not handling all IPV6 headers */
2800 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2801 cmd_len |= E1000_TXD_CMD_TCP;
2802 break;
2803 default:
2804 if (unlikely(net_ratelimit()))
Emil Tantilovfeb8f472010-07-26 23:37:21 -07002805 e_warn(drv, "checksum_partial proto=%x!\n",
2806 skb->protocol);
Dave Graham3ed30672008-10-09 14:29:26 -07002807 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 }
2809
Michał Mirosław0d0b1672010-12-14 15:24:08 +00002810 css = skb_checksum_start_offset(skb);
Dave Graham3ed30672008-10-09 14:29:26 -07002811
2812 i = tx_ring->next_to_use;
2813 buffer_info = &tx_ring->buffer_info[i];
2814 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2815
2816 context_desc->lower_setup.ip_config = 0;
2817 context_desc->upper_setup.tcp_fields.tucss = css;
2818 context_desc->upper_setup.tcp_fields.tucso =
2819 css + skb->csum_offset;
2820 context_desc->upper_setup.tcp_fields.tucse = 0;
2821 context_desc->tcp_seg_setup.data = 0;
2822 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2823
2824 buffer_info->time_stamp = jiffies;
2825 buffer_info->next_to_watch = i;
2826
Janusz Wolaka48954c2015-09-17 23:34:29 +02002827 if (unlikely(++i == tx_ring->count))
2828 i = 0;
2829
Dave Graham3ed30672008-10-09 14:29:26 -07002830 tx_ring->next_to_use = i;
2831
2832 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833}
2834
2835#define E1000_MAX_TXD_PWR 12
2836#define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2837
Joe Perches64798842008-07-11 15:17:02 -07002838static int e1000_tx_map(struct e1000_adapter *adapter,
2839 struct e1000_tx_ring *tx_ring,
2840 struct sk_buff *skb, unsigned int first,
2841 unsigned int max_per_txd, unsigned int nr_frags,
2842 unsigned int mss)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843{
Joe Perches1dc32912008-07-11 15:17:08 -07002844 struct e1000_hw *hw = &adapter->hw;
Alexander Duyck602c0552009-12-02 16:46:00 +00002845 struct pci_dev *pdev = adapter->pdev;
Florian Westphal580f3212014-09-03 13:34:31 +00002846 struct e1000_tx_buffer *buffer_info;
Jesse Brandeburgd20b6062009-03-02 16:03:21 -08002847 unsigned int len = skb_headlen(skb);
Alexander Duyck602c0552009-12-02 16:46:00 +00002848 unsigned int offset = 0, size, count = 0, i;
Dean Nelson31c15a22011-08-25 14:39:24 +00002849 unsigned int f, bytecount, segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850
2851 i = tx_ring->next_to_use;
2852
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002853 while (len) {
Alexander Duyck37e73df2009-03-25 21:58:45 +00002854 buffer_info = &tx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 size = min(len, max_per_txd);
Jeff Kirsherfd803242005-12-13 00:06:22 -05002856 /* Workaround for Controller erratum --
2857 * descriptor for non-tso packet in a linear SKB that follows a
2858 * tso gets written back prematurely before the data is fully
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002859 * DMA'd to the controller
2860 */
Jeff Kirsherfd803242005-12-13 00:06:22 -05002861 if (!skb->data_len && tx_ring->last_tx_tso &&
Herbert Xu89114af2006-07-08 13:34:32 -07002862 !skb_is_gso(skb)) {
Rusty Russell3db1cd52011-12-19 13:56:45 +00002863 tx_ring->last_tx_tso = false;
Jeff Kirsherfd803242005-12-13 00:06:22 -05002864 size -= 4;
2865 }
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 /* Workaround for premature desc write-backs
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002868 * in TSO mode. Append 4-byte sentinel desc
2869 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002870 if (unlikely(mss && !nr_frags && size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 size -= 4;
Malli Chilakala97338bd2005-04-28 19:41:46 -07002872 /* work-around for errata 10 and it applies
2873 * to all controllers in PCI-X mode
2874 * The fix is to make sure that the first descriptor of a
2875 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2876 */
Joe Perches1dc32912008-07-11 15:17:08 -07002877 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Janusz Wolaka48954c2015-09-17 23:34:29 +02002878 (size > 2015) && count == 0))
2879 size = 2015;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002880
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 /* Workaround for potential 82544 hang in PCI-X. Avoid
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002882 * terminating buffers within evenly-aligned dwords.
2883 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002884 if (unlikely(adapter->pcix_82544 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2886 size > 4))
2887 size -= 4;
2888
2889 buffer_info->length = size;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00002890 /* set time_stamp *before* dma to help avoid a possible race */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002892 buffer_info->mapped_as_page = false;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002893 buffer_info->dma = dma_map_single(&pdev->dev,
2894 skb->data + offset,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002895 size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002896 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002897 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002898 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
2900 len -= size;
2901 offset += size;
2902 count++;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002903 if (len) {
2904 i++;
2905 if (unlikely(i == tx_ring->count))
2906 i = 0;
2907 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 }
2909
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002910 for (f = 0; f < nr_frags; f++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00002911 const struct skb_frag_struct *frag;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
2913 frag = &skb_shinfo(skb)->frags[f];
Eric Dumazet9e903e02011-10-18 21:00:24 +00002914 len = skb_frag_size(frag);
Ian Campbell877749b2011-08-29 23:18:26 +00002915 offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002917 while (len) {
Ian Campbell877749b2011-08-29 23:18:26 +00002918 unsigned long bufend;
Alexander Duyck37e73df2009-03-25 21:58:45 +00002919 i++;
2920 if (unlikely(i == tx_ring->count))
2921 i = 0;
2922
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 buffer_info = &tx_ring->buffer_info[i];
2924 size = min(len, max_per_txd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 /* Workaround for premature desc write-backs
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002926 * in TSO mode. Append 4-byte sentinel desc
2927 */
2928 if (unlikely(mss && f == (nr_frags-1) &&
2929 size == len && size > 8))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 size -= 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 /* Workaround for potential 82544 hang in PCI-X.
2932 * Avoid terminating buffers within evenly-aligned
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002933 * dwords.
2934 */
Ian Campbell877749b2011-08-29 23:18:26 +00002935 bufend = (unsigned long)
2936 page_to_phys(skb_frag_page(frag));
2937 bufend += offset + size - 1;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002938 if (unlikely(adapter->pcix_82544 &&
Ian Campbell877749b2011-08-29 23:18:26 +00002939 !(bufend & 4) &&
2940 size > 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 size -= 4;
2942
2943 buffer_info->length = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 buffer_info->time_stamp = jiffies;
Alexander Duyck602c0552009-12-02 16:46:00 +00002945 buffer_info->mapped_as_page = true;
Ian Campbell877749b2011-08-29 23:18:26 +00002946 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2947 offset, size, DMA_TO_DEVICE);
Nick Nunleyb16f53b2010-04-27 13:08:45 +00002948 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
Alexander Duyck602c0552009-12-02 16:46:00 +00002949 goto dma_error;
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08002950 buffer_info->next_to_watch = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
2952 len -= size;
2953 offset += size;
2954 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 }
2956 }
2957
Dean Nelson31c15a22011-08-25 14:39:24 +00002958 segs = skb_shinfo(skb)->gso_segs ?: 1;
2959 /* multiply data chunks by size of headers */
2960 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2961
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 tx_ring->buffer_info[i].skb = skb;
Dean Nelson31c15a22011-08-25 14:39:24 +00002963 tx_ring->buffer_info[i].segs = segs;
2964 tx_ring->buffer_info[i].bytecount = bytecount;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 tx_ring->buffer_info[first].next_to_watch = i;
2966
2967 return count;
Alexander Duyck602c0552009-12-02 16:46:00 +00002968
2969dma_error:
2970 dev_err(&pdev->dev, "TX DMA map failed\n");
2971 buffer_info->dma = 0;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002972 if (count)
Alexander Duyck602c0552009-12-02 16:46:00 +00002973 count--;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002974
2975 while (count--) {
Janusz Wolaka48954c2015-09-17 23:34:29 +02002976 if (i == 0)
Alexander Duyck602c0552009-12-02 16:46:00 +00002977 i += tx_ring->count;
Roel Kluinc1fa3472010-01-19 14:21:45 +00002978 i--;
Alexander Duyck602c0552009-12-02 16:46:00 +00002979 buffer_info = &tx_ring->buffer_info[i];
2980 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2981 }
2982
2983 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002984}
2985
Joe Perches64798842008-07-11 15:17:02 -07002986static void e1000_tx_queue(struct e1000_adapter *adapter,
2987 struct e1000_tx_ring *tx_ring, int tx_flags,
2988 int count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 struct e1000_tx_desc *tx_desc = NULL;
Florian Westphal580f3212014-09-03 13:34:31 +00002991 struct e1000_tx_buffer *buffer_info;
Joe Perches406874a2008-04-03 10:06:32 -07002992 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 unsigned int i;
2994
Jesse Brandeburg96838a42006-01-18 13:01:39 -08002995 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00002997 E1000_TXD_CMD_TSE;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07002998 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2999
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003000 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003001 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 }
3003
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003004 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3006 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3007 }
3008
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003009 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 txd_lower |= E1000_TXD_CMD_VLE;
3011 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3012 }
3013
Ben Greear11a78dc2012-02-11 15:40:01 +00003014 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3015 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3016
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 i = tx_ring->next_to_use;
3018
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003019 while (count--) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020 buffer_info = &tx_ring->buffer_info[i];
3021 tx_desc = E1000_TX_DESC(*tx_ring, i);
3022 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3023 tx_desc->lower.data =
3024 cpu_to_le32(txd_lower | buffer_info->length);
3025 tx_desc->upper.data = cpu_to_le32(txd_upper);
Janusz Wolaka48954c2015-09-17 23:34:29 +02003026 if (unlikely(++i == tx_ring->count))
3027 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028 }
3029
3030 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3031
Ben Greear11a78dc2012-02-11 15:40:01 +00003032 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3033 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3034 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3035
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 /* Force memory writes to complete before letting h/w
3037 * know there are new descriptors to fetch. (Only
3038 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003039 * such as IA-64).
3040 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 wmb();
3042
3043 tx_ring->next_to_use = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044}
3045
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003046/* 82547 workaround to avoid controller hang in half-duplex environment.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 * The workaround is to avoid queuing a large packet that would span
3048 * the internal Tx FIFO ring boundary by notifying the stack to resend
3049 * the packet at a later time. This gives the Tx FIFO an opportunity to
3050 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3051 * to the beginning of the Tx FIFO.
Ben Hutchings1aa8b472012-07-10 10:56:59 +00003052 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
3054#define E1000_FIFO_HDR 0x10
3055#define E1000_82547_PAD_LEN 0x3E0
3056
Joe Perches64798842008-07-11 15:17:02 -07003057static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3058 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059{
Joe Perches406874a2008-04-03 10:06:32 -07003060 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3061 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003062
Milind Arun Choudhary9099cfb2007-04-27 13:55:29 -07003063 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003065 if (adapter->link_duplex != HALF_DUPLEX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 goto no_fifo_stall_required;
3067
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003068 if (atomic_read(&adapter->tx_fifo_stall))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 return 1;
3070
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003071 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072 atomic_set(&adapter->tx_fifo_stall, 1);
3073 return 1;
3074 }
3075
3076no_fifo_stall_required:
3077 adapter->tx_fifo_head += skb_fifo_len;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003078 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3080 return 0;
3081}
3082
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003083static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3084{
3085 struct e1000_adapter *adapter = netdev_priv(netdev);
3086 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3087
3088 netif_stop_queue(netdev);
3089 /* Herbert's original patch had:
3090 * smp_mb__after_netif_stop_queue();
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003091 * but since that doesn't exist yet, just open code it.
3092 */
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003093 smp_mb();
3094
3095 /* We need to check again in a case another CPU has just
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003096 * made room available.
3097 */
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003098 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3099 return -EBUSY;
3100
3101 /* A reprieve! */
3102 netif_start_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003103 ++adapter->restart_queue;
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003104 return 0;
3105}
3106
3107static int e1000_maybe_stop_tx(struct net_device *netdev,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003108 struct e1000_tx_ring *tx_ring, int size)
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003109{
3110 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3111 return 0;
3112 return __e1000_maybe_stop_tx(netdev, size);
3113}
3114
Alexander Duyck847a1d62016-03-02 16:16:01 -05003115#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
Stephen Hemminger3b29a562009-08-31 19:50:55 +00003116static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3117 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003118{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003119 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003120 struct e1000_hw *hw = &adapter->hw;
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003121 struct e1000_tx_ring *tx_ring;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003122 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3123 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3124 unsigned int tx_flags = 0;
Eric Dumazete743d312010-04-14 15:59:40 -07003125 unsigned int len = skb_headlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003126 unsigned int nr_frags;
3127 unsigned int mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 int count = 0;
Auke Kok76c224b2006-05-23 13:36:06 -07003129 int tso;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 unsigned int f;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003131 __be16 protocol = vlan_get_protocol(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003132
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003133 /* This goes back to the question of how to logically map a Tx queue
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003134 * to a flow. Right now, performance is impacted slightly negatively
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003135 * if using multiple Tx queues. If the stack breaks away from a
3136 * single qdisc implementation, we can look at this again.
3137 */
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003138 tx_ring = adapter->tx_ring;
Mallikarjuna R Chilakala24025e4ec2005-10-04 07:03:23 -04003139
Tushar Dave59d86c72012-09-15 10:16:57 +00003140 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3141 * packets may get corrupted during padding by HW.
3142 * To WA this issue, pad all small packets manually.
3143 */
Alexander Duycka94d9e22014-12-03 08:17:39 -08003144 if (eth_skb_pad(skb))
3145 return NETDEV_TX_OK;
Tushar Dave59d86c72012-09-15 10:16:57 +00003146
Herbert Xu79671682006-06-22 02:40:14 -07003147 mss = skb_shinfo(skb)->gso_size;
Auke Kok76c224b2006-05-23 13:36:06 -07003148 /* The controller does a simple calculation to
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 * make sure there is enough room in the FIFO before
3150 * initiating the DMA for each buffer. The calc is:
3151 * 4 = ceil(buffer len/mss). To make sure we don't
3152 * overrun the FIFO, adjust the max buffer len if mss
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003153 * drops.
3154 */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003155 if (mss) {
Joe Perches406874a2008-04-03 10:06:32 -07003156 u8 hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 max_per_txd = min(mss << 2, max_per_txd);
3158 max_txd_pwr = fls(max_per_txd) - 1;
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003159
Arnaldo Carvalho de Meloab6a5bb2007-03-18 17:43:48 -07003160 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
Krishna Kumar6d1e3aa2007-10-05 14:15:16 -07003161 if (skb->data_len && hdr_len == len) {
Joe Perches1dc32912008-07-11 15:17:08 -07003162 switch (hw->mac_type) {
Jeff Kirsher9f687882006-03-02 18:20:17 -08003163 unsigned int pull_size;
Herbert Xu683a2aa2006-12-16 12:04:33 +11003164 case e1000_82544:
3165 /* Make sure we have room to chop off 4 bytes,
3166 * and that the end alignment will work out to
3167 * this hardware's requirements
3168 * NOTE: this is a TSO only workaround
3169 * if end byte alignment not correct move us
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003170 * into the next dword
3171 */
3172 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3173 & 4)
Herbert Xu683a2aa2006-12-16 12:04:33 +11003174 break;
3175 /* fall through */
Jeff Kirsher9f687882006-03-02 18:20:17 -08003176 pull_size = min((unsigned int)4, skb->data_len);
3177 if (!__pskb_pull_tail(skb, pull_size)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003178 e_err(drv, "__pskb_pull_tail "
3179 "failed.\n");
Jeff Kirsher9f687882006-03-02 18:20:17 -08003180 dev_kfree_skb_any(skb);
Jeff Garzik749dfc702006-03-11 13:35:31 -05003181 return NETDEV_TX_OK;
Jeff Kirsher9f687882006-03-02 18:20:17 -08003182 }
Eric Dumazete743d312010-04-14 15:59:40 -07003183 len = skb_headlen(skb);
Jeff Kirsher9f687882006-03-02 18:20:17 -08003184 break;
3185 default:
3186 /* do nothing */
3187 break;
Jeff Kirsherd74bbd32006-01-12 16:51:07 -08003188 }
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190 }
3191
Jeff Kirsher9a3056d2006-01-12 16:50:23 -08003192 /* reserve a descriptor for the offload context */
Patrick McHardy84fa7932006-08-29 16:44:56 -07003193 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 count++;
Malli Chilakala26483452005-04-28 19:44:46 -07003195 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003196
Jeff Kirsherfd803242005-12-13 00:06:22 -05003197 /* Controller Erratum workaround */
Herbert Xu89114af2006-07-08 13:34:32 -07003198 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
Jeff Kirsherfd803242005-12-13 00:06:22 -05003199 count++;
Jeff Kirsherfd803242005-12-13 00:06:22 -05003200
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 count += TXD_USE_COUNT(len, max_txd_pwr);
3202
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003203 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003204 count++;
3205
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003206 /* work-around for errata 10 and it applies to all controllers
Malli Chilakala97338bd2005-04-28 19:41:46 -07003207 * in PCI-X mode, so add one more descriptor to the count
3208 */
Joe Perches1dc32912008-07-11 15:17:08 -07003209 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
Malli Chilakala97338bd2005-04-28 19:41:46 -07003210 (len > 2015)))
3211 count++;
3212
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213 nr_frags = skb_shinfo(skb)->nr_frags;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003214 for (f = 0; f < nr_frags; f++)
Eric Dumazet9e903e02011-10-18 21:00:24 +00003215 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003216 max_txd_pwr);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003217 if (adapter->pcix_82544)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 count += nr_frags;
3219
Linus Torvalds1da177e2005-04-16 15:20:36 -07003220 /* need: count + 2 desc gap to keep tail from touching
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003221 * head, otherwise try next time
3222 */
Alexander Duyck80179432009-01-21 14:42:47 -08003223 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003225
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003226 if (unlikely((hw->mac_type == e1000_82547) &&
3227 (e1000_82547_fifo_workaround(adapter, skb)))) {
3228 netif_stop_queue(netdev);
3229 if (!test_bit(__E1000_DOWN, &adapter->flags))
3230 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3231 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 }
3233
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003234 if (skb_vlan_tag_present(skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 tx_flags |= E1000_TX_FLAGS_VLAN;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01003236 tx_flags |= (skb_vlan_tag_get(skb) <<
3237 E1000_TX_FLAGS_VLAN_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 }
3239
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003240 first = tx_ring->next_to_use;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003241
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003242 tso = e1000_tso(adapter, tx_ring, skb, protocol);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003243 if (tso < 0) {
3244 dev_kfree_skb_any(skb);
3245 return NETDEV_TX_OK;
3246 }
3247
Jeff Kirsherfd803242005-12-13 00:06:22 -05003248 if (likely(tso)) {
Jesse Brandeburg8fce4732009-09-25 12:18:41 +00003249 if (likely(hw->mac_type != e1000_82544))
Rusty Russell3db1cd52011-12-19 13:56:45 +00003250 tx_ring->last_tx_tso = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 tx_flags |= E1000_TX_FLAGS_TSO;
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003252 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 tx_flags |= E1000_TX_FLAGS_CSUM;
3254
Vlad Yasevich06f4d032014-08-25 10:34:49 -04003255 if (protocol == htons(ETH_P_IP))
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003256 tx_flags |= E1000_TX_FLAGS_IPV4;
3257
Ben Greear11a78dc2012-02-11 15:40:01 +00003258 if (unlikely(skb->no_fcs))
3259 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3260
Alexander Duyck37e73df2009-03-25 21:58:45 +00003261 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003262 nr_frags, mss);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
Alexander Duyck37e73df2009-03-25 21:58:45 +00003264 if (count) {
Alexander Duycka4605fe2016-03-02 16:16:08 -05003265 /* The descriptors needed is higher than other Intel drivers
3266 * due to a number of workarounds. The breakdown is below:
3267 * Data descriptors: MAX_SKB_FRAGS + 1
3268 * Context Descriptor: 1
3269 * Keep head from touching tail: 2
3270 * Workarounds: 3
3271 */
3272 int desc_needed = MAX_SKB_FRAGS + 7;
3273
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003274 netdev_sent_queue(netdev, skb->len);
Willem de Bruijneab467f2012-04-27 09:04:04 +00003275 skb_tx_timestamp(skb);
3276
Alexander Duyck37e73df2009-03-25 21:58:45 +00003277 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
Alexander Duycka4605fe2016-03-02 16:16:08 -05003278
3279 /* 82544 potentially requires twice as many data descriptors
3280 * in order to guarantee buffers don't end on evenly-aligned
3281 * dwords
3282 */
3283 if (adapter->pcix_82544)
3284 desc_needed += MAX_SKB_FRAGS + 1;
3285
Alexander Duyck37e73df2009-03-25 21:58:45 +00003286 /* Make sure there is space in the ring for the next send. */
Alexander Duycka4605fe2016-03-02 16:16:08 -05003287 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Florian Westphal8a4d0b92015-01-07 11:40:33 +00003289 if (!skb->xmit_more ||
3290 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3291 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3292 /* we need this if more than one processor can write to
3293 * our tail at a time, it synchronizes IO on IA64/Altix
3294 * systems
3295 */
3296 mmiowb();
3297 }
Alexander Duyck37e73df2009-03-25 21:58:45 +00003298 } else {
3299 dev_kfree_skb_any(skb);
3300 tx_ring->buffer_info[first].time_stamp = 0;
3301 tx_ring->next_to_use = first;
3302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 return NETDEV_TX_OK;
3305}
3306
Tushar Daveb04e36b2012-01-27 09:00:46 +00003307#define NUM_REGS 38 /* 1 based count */
3308static void e1000_regdump(struct e1000_adapter *adapter)
3309{
3310 struct e1000_hw *hw = &adapter->hw;
3311 u32 regs[NUM_REGS];
3312 u32 *regs_buff = regs;
3313 int i = 0;
3314
Tushar Davee29b5d82012-02-10 08:06:36 +00003315 static const char * const reg_name[] = {
3316 "CTRL", "STATUS",
3317 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3318 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3319 "TIDV", "TXDCTL", "TADV", "TARC0",
3320 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3321 "TXDCTL1", "TARC1",
3322 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3323 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3324 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
Tushar Daveb04e36b2012-01-27 09:00:46 +00003325 };
3326
3327 regs_buff[0] = er32(CTRL);
3328 regs_buff[1] = er32(STATUS);
3329
3330 regs_buff[2] = er32(RCTL);
3331 regs_buff[3] = er32(RDLEN);
3332 regs_buff[4] = er32(RDH);
3333 regs_buff[5] = er32(RDT);
3334 regs_buff[6] = er32(RDTR);
3335
3336 regs_buff[7] = er32(TCTL);
3337 regs_buff[8] = er32(TDBAL);
3338 regs_buff[9] = er32(TDBAH);
3339 regs_buff[10] = er32(TDLEN);
3340 regs_buff[11] = er32(TDH);
3341 regs_buff[12] = er32(TDT);
3342 regs_buff[13] = er32(TIDV);
3343 regs_buff[14] = er32(TXDCTL);
3344 regs_buff[15] = er32(TADV);
3345 regs_buff[16] = er32(TARC0);
3346
3347 regs_buff[17] = er32(TDBAL1);
3348 regs_buff[18] = er32(TDBAH1);
3349 regs_buff[19] = er32(TDLEN1);
3350 regs_buff[20] = er32(TDH1);
3351 regs_buff[21] = er32(TDT1);
3352 regs_buff[22] = er32(TXDCTL1);
3353 regs_buff[23] = er32(TARC1);
3354 regs_buff[24] = er32(CTRL_EXT);
3355 regs_buff[25] = er32(ERT);
3356 regs_buff[26] = er32(RDBAL0);
3357 regs_buff[27] = er32(RDBAH0);
3358 regs_buff[28] = er32(TDFH);
3359 regs_buff[29] = er32(TDFT);
3360 regs_buff[30] = er32(TDFHS);
3361 regs_buff[31] = er32(TDFTS);
3362 regs_buff[32] = er32(TDFPC);
3363 regs_buff[33] = er32(RDFH);
3364 regs_buff[34] = er32(RDFT);
3365 regs_buff[35] = er32(RDFHS);
3366 regs_buff[36] = er32(RDFTS);
3367 regs_buff[37] = er32(RDFPC);
3368
3369 pr_info("Register dump\n");
Tushar Davee29b5d82012-02-10 08:06:36 +00003370 for (i = 0; i < NUM_REGS; i++)
3371 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003372}
3373
3374/*
3375 * e1000_dump: Print registers, tx ring and rx ring
3376 */
3377static void e1000_dump(struct e1000_adapter *adapter)
3378{
3379 /* this code doesn't handle multiple rings */
3380 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3381 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3382 int i;
3383
3384 if (!netif_msg_hw(adapter))
3385 return;
3386
3387 /* Print Registers */
3388 e1000_regdump(adapter);
3389
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003390 /* transmit dump */
Tushar Daveb04e36b2012-01-27 09:00:46 +00003391 pr_info("TX Desc ring0 dump\n");
3392
3393 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3394 *
3395 * Legacy Transmit Descriptor
3396 * +--------------------------------------------------------------+
3397 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3398 * +--------------------------------------------------------------+
3399 * 8 | Special | CSS | Status | CMD | CSO | Length |
3400 * +--------------------------------------------------------------+
3401 * 63 48 47 36 35 32 31 24 23 16 15 0
3402 *
3403 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3404 * 63 48 47 40 39 32 31 16 15 8 7 0
3405 * +----------------------------------------------------------------+
3406 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3407 * +----------------------------------------------------------------+
3408 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3409 * +----------------------------------------------------------------+
3410 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3411 *
3412 * Extended Data Descriptor (DTYP=0x1)
3413 * +----------------------------------------------------------------+
3414 * 0 | Buffer Address [63:0] |
3415 * +----------------------------------------------------------------+
3416 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3417 * +----------------------------------------------------------------+
3418 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3419 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003420 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3421 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003422
3423 if (!netif_msg_tx_done(adapter))
3424 goto rx_ring_summary;
3425
3426 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3427 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
Florian Westphal580f3212014-09-03 13:34:31 +00003428 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003429 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003430 struct my_u *u = (struct my_u *)tx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003431 const char *type;
3432
Tushar Daveb04e36b2012-01-27 09:00:46 +00003433 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003434 type = "NTC/U";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003435 else if (i == tx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003436 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003437 else if (i == tx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003438 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003439 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003440 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003441
Tushar Davee29b5d82012-02-10 08:06:36 +00003442 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3443 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3444 le64_to_cpu(u->a), le64_to_cpu(u->b),
3445 (u64)buffer_info->dma, buffer_info->length,
3446 buffer_info->next_to_watch,
3447 (u64)buffer_info->time_stamp, buffer_info->skb, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003448 }
3449
3450rx_ring_summary:
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003451 /* receive dump */
Tushar Daveb04e36b2012-01-27 09:00:46 +00003452 pr_info("\nRX Desc ring dump\n");
3453
3454 /* Legacy Receive Descriptor Format
3455 *
3456 * +-----------------------------------------------------+
3457 * | Buffer Address [63:0] |
3458 * +-----------------------------------------------------+
3459 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3460 * +-----------------------------------------------------+
3461 * 63 48 47 40 39 32 31 16 15 0
3462 */
Tushar Davee29b5d82012-02-10 08:06:36 +00003463 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003464
3465 if (!netif_msg_rx_status(adapter))
3466 goto exit;
3467
3468 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3469 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
Florian Westphal93f0afe2014-09-03 13:34:26 +00003470 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
Andrei Emeltchenkodd7f5c92012-03-25 17:49:25 +00003471 struct my_u { __le64 a; __le64 b; };
Tushar Daveb04e36b2012-01-27 09:00:46 +00003472 struct my_u *u = (struct my_u *)rx_desc;
Tushar Davee29b5d82012-02-10 08:06:36 +00003473 const char *type;
3474
Tushar Daveb04e36b2012-01-27 09:00:46 +00003475 if (i == rx_ring->next_to_use)
Tushar Davee29b5d82012-02-10 08:06:36 +00003476 type = "NTU";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003477 else if (i == rx_ring->next_to_clean)
Tushar Davee29b5d82012-02-10 08:06:36 +00003478 type = "NTC";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003479 else
Tushar Davee29b5d82012-02-10 08:06:36 +00003480 type = "";
Tushar Daveb04e36b2012-01-27 09:00:46 +00003481
Tushar Davee29b5d82012-02-10 08:06:36 +00003482 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3483 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
Florian Westphal13809602014-09-03 13:34:36 +00003484 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003485 } /* for */
3486
3487 /* dump the descriptor caches */
3488 /* rx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003489 pr_info("Rx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003490 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003491 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3492 i,
3493 readl(adapter->hw.hw_addr + i+4),
3494 readl(adapter->hw.hw_addr + i),
3495 readl(adapter->hw.hw_addr + i+12),
3496 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003497 }
3498 /* tx */
Tushar Davee29b5d82012-02-10 08:06:36 +00003499 pr_info("Tx descriptor cache in 64bit format\n");
Tushar Daveb04e36b2012-01-27 09:00:46 +00003500 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
Tushar Davee29b5d82012-02-10 08:06:36 +00003501 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3502 i,
3503 readl(adapter->hw.hw_addr + i+4),
3504 readl(adapter->hw.hw_addr + i),
3505 readl(adapter->hw.hw_addr + i+12),
3506 readl(adapter->hw.hw_addr + i+8));
Tushar Daveb04e36b2012-01-27 09:00:46 +00003507 }
3508exit:
3509 return;
3510}
3511
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512/**
3513 * e1000_tx_timeout - Respond to a Tx Hang
3514 * @netdev: network interface device structure
3515 **/
Joe Perches64798842008-07-11 15:17:02 -07003516static void e1000_tx_timeout(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003518 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519
3520 /* Do the reset outside of interrupt context */
Jeff Kirsher87041632006-03-02 18:21:24 -08003521 adapter->tx_timeout_count++;
3522 schedule_work(&adapter->reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523}
3524
Joe Perches64798842008-07-11 15:17:02 -07003525static void e1000_reset_task(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526{
David Howells65f27f32006-11-22 14:55:48 +00003527 struct e1000_adapter *adapter =
3528 container_of(work, struct e1000_adapter, reset_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529
Tushar Daveb04e36b2012-01-27 09:00:46 +00003530 e_err(drv, "Reset adapter\n");
Vladimir Davydovb2f963bf2013-11-23 07:17:56 +00003531 e1000_reinit_locked(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532}
3533
3534/**
3535 * e1000_get_stats - Get System Network Statistics
3536 * @netdev: network interface device structure
3537 *
3538 * Returns the address of the device statistics structure.
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003539 * The statistics are actually updated from the watchdog.
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 **/
Joe Perches64798842008-07-11 15:17:02 -07003541static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542{
Jeff Kirsher6b7660c2006-01-12 16:50:35 -08003543 /* only return the current stats */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003544 return &netdev->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003545}
3546
3547/**
3548 * e1000_change_mtu - Change the Maximum Transfer Unit
3549 * @netdev: network interface device structure
3550 * @new_mtu: new value for maximum frame size
3551 *
3552 * Returns 0 on success, negative on failure
3553 **/
Joe Perches64798842008-07-11 15:17:02 -07003554static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555{
Malli Chilakala60490fe2005-06-17 17:41:45 -07003556 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07003557 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3559
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003560 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3561 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003562 e_err(probe, "Invalid MTU setting\n");
Mallikarjuna R Chilakala868d5302005-10-04 06:58:59 -04003563 return -EINVAL;
3564 }
3565
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003566 /* Adapter-specific max frame size limits. */
Joe Perches1dc32912008-07-11 15:17:08 -07003567 switch (hw->mac_type) {
Auke Kok9e2feac2006-04-14 19:05:18 -07003568 case e1000_undefined ... e1000_82542_rev2_1:
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003569 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003570 e_err(probe, "Jumbo Frames not supported.\n");
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003571 return -EINVAL;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003572 }
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003573 break;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003574 default:
3575 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3576 break;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003577 }
3578
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003579 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3580 msleep(1);
3581 /* e1000_down has a dependency on max_frame_size */
3582 hw->max_frame_size = max_frame;
Sabrina Dubroca08e83312015-02-26 05:35:41 +00003583 if (netif_running(netdev)) {
3584 /* prevent buffers from being reallocated */
3585 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003586 e1000_down(adapter);
Sabrina Dubroca08e83312015-02-26 05:35:41 +00003587 }
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003588
David S. Miller87f50322006-07-31 22:39:40 -07003589 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
Auke Kok9e2feac2006-04-14 19:05:18 -07003590 * means we reserve 2 more, this pushes us to allocate from the next
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003591 * larger slab size.
3592 * i.e. RXBUFFER_2048 --> size-4096 slab
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003593 * however with the new *_jumbo_rx* routines, jumbo receives will use
3594 * fragmented skbs
3595 */
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003596
Jesse Brandeburg99261462010-01-22 22:56:16 +00003597 if (max_frame <= E1000_RXBUFFER_2048)
Auke Kok9e2feac2006-04-14 19:05:18 -07003598 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003599 else
3600#if (PAGE_SIZE >= E1000_RXBUFFER_16384)
Auke Kok9e2feac2006-04-14 19:05:18 -07003601 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00003602#elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3603 adapter->rx_buffer_len = PAGE_SIZE;
3604#endif
Auke Kok9e2feac2006-04-14 19:05:18 -07003605
3606 /* adjust allocation if LPE protects us, and we aren't using SBP */
Joe Perches1dc32912008-07-11 15:17:08 -07003607 if (!hw->tbi_compatibility_on &&
Jesse Brandeburgb7cb8c22009-07-06 10:45:01 +00003608 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
Auke Kok9e2feac2006-04-14 19:05:18 -07003609 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3610 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Jeff Kirsher997f5cb2006-01-12 16:50:55 -08003611
Emil Tantilov675ad472010-04-27 14:02:58 +00003612 pr_info("%s changing MTU from %d to %d\n",
3613 netdev->name, netdev->mtu, new_mtu);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003614 netdev->mtu = new_mtu;
3615
Auke Kok2db10a02006-06-27 09:06:28 -07003616 if (netif_running(netdev))
Jesse Brandeburg3d6114e2009-09-25 12:19:02 +00003617 e1000_up(adapter);
3618 else
3619 e1000_reset(adapter);
3620
3621 clear_bit(__E1000_RESETTING, &adapter->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623 return 0;
3624}
3625
3626/**
3627 * e1000_update_stats - Update the board statistics counters
3628 * @adapter: board private structure
3629 **/
Joe Perches64798842008-07-11 15:17:02 -07003630void e1000_update_stats(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003631{
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003632 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 struct e1000_hw *hw = &adapter->hw;
Linas Vepstas282f33c2006-06-08 22:19:44 -07003634 struct pci_dev *pdev = adapter->pdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003635 unsigned long flags;
Joe Perches406874a2008-04-03 10:06:32 -07003636 u16 phy_tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637
3638#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3639
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003640 /* Prevent stats update while adapter is being reset, or if the pci
Linas Vepstas282f33c2006-06-08 22:19:44 -07003641 * connection is down.
3642 */
Auke Kok90267292006-06-08 09:30:24 -07003643 if (adapter->link_speed == 0)
3644 return;
Linas Vepstas81b19552006-12-12 18:29:15 -06003645 if (pci_channel_offline(pdev))
Linas Vepstas282f33c2006-06-08 22:19:44 -07003646 return;
Auke Kok90267292006-06-08 09:30:24 -07003647
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648 spin_lock_irqsave(&adapter->stats_lock, flags);
3649
Masatake YAMATO828d0552007-10-20 03:06:37 +02003650 /* these counters are modified from e1000_tbi_adjust_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651 * called from the interrupt context, so they must only
3652 * be written while holding adapter->stats_lock
3653 */
3654
Joe Perches1dc32912008-07-11 15:17:08 -07003655 adapter->stats.crcerrs += er32(CRCERRS);
3656 adapter->stats.gprc += er32(GPRC);
3657 adapter->stats.gorcl += er32(GORCL);
3658 adapter->stats.gorch += er32(GORCH);
3659 adapter->stats.bprc += er32(BPRC);
3660 adapter->stats.mprc += er32(MPRC);
3661 adapter->stats.roc += er32(ROC);
Auke Kokcd94dd02006-06-27 09:08:22 -07003662
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003663 adapter->stats.prc64 += er32(PRC64);
3664 adapter->stats.prc127 += er32(PRC127);
3665 adapter->stats.prc255 += er32(PRC255);
3666 adapter->stats.prc511 += er32(PRC511);
3667 adapter->stats.prc1023 += er32(PRC1023);
3668 adapter->stats.prc1522 += er32(PRC1522);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669
Joe Perches1dc32912008-07-11 15:17:08 -07003670 adapter->stats.symerrs += er32(SYMERRS);
3671 adapter->stats.mpc += er32(MPC);
3672 adapter->stats.scc += er32(SCC);
3673 adapter->stats.ecol += er32(ECOL);
3674 adapter->stats.mcc += er32(MCC);
3675 adapter->stats.latecol += er32(LATECOL);
3676 adapter->stats.dc += er32(DC);
3677 adapter->stats.sec += er32(SEC);
3678 adapter->stats.rlec += er32(RLEC);
3679 adapter->stats.xonrxc += er32(XONRXC);
3680 adapter->stats.xontxc += er32(XONTXC);
3681 adapter->stats.xoffrxc += er32(XOFFRXC);
3682 adapter->stats.xofftxc += er32(XOFFTXC);
3683 adapter->stats.fcruc += er32(FCRUC);
3684 adapter->stats.gptc += er32(GPTC);
3685 adapter->stats.gotcl += er32(GOTCL);
3686 adapter->stats.gotch += er32(GOTCH);
3687 adapter->stats.rnbc += er32(RNBC);
3688 adapter->stats.ruc += er32(RUC);
3689 adapter->stats.rfc += er32(RFC);
3690 adapter->stats.rjc += er32(RJC);
3691 adapter->stats.torl += er32(TORL);
3692 adapter->stats.torh += er32(TORH);
3693 adapter->stats.totl += er32(TOTL);
3694 adapter->stats.toth += er32(TOTH);
3695 adapter->stats.tpr += er32(TPR);
Auke Kokcd94dd02006-06-27 09:08:22 -07003696
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003697 adapter->stats.ptc64 += er32(PTC64);
3698 adapter->stats.ptc127 += er32(PTC127);
3699 adapter->stats.ptc255 += er32(PTC255);
3700 adapter->stats.ptc511 += er32(PTC511);
3701 adapter->stats.ptc1023 += er32(PTC1023);
3702 adapter->stats.ptc1522 += er32(PTC1522);
Auke Kokcd94dd02006-06-27 09:08:22 -07003703
Joe Perches1dc32912008-07-11 15:17:08 -07003704 adapter->stats.mptc += er32(MPTC);
3705 adapter->stats.bptc += er32(BPTC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706
3707 /* used for adaptive IFS */
3708
Joe Perches1dc32912008-07-11 15:17:08 -07003709 hw->tx_packet_delta = er32(TPT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003710 adapter->stats.tpt += hw->tx_packet_delta;
Joe Perches1dc32912008-07-11 15:17:08 -07003711 hw->collision_delta = er32(COLC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712 adapter->stats.colc += hw->collision_delta;
3713
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003714 if (hw->mac_type >= e1000_82543) {
Joe Perches1dc32912008-07-11 15:17:08 -07003715 adapter->stats.algnerrc += er32(ALGNERRC);
3716 adapter->stats.rxerrc += er32(RXERRC);
3717 adapter->stats.tncrs += er32(TNCRS);
3718 adapter->stats.cexterr += er32(CEXTERR);
3719 adapter->stats.tsctc += er32(TSCTC);
3720 adapter->stats.tsctfc += er32(TSCTFC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003721 }
3722
3723 /* Fill out the OS statistics structure */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003724 netdev->stats.multicast = adapter->stats.mprc;
3725 netdev->stats.collisions = adapter->stats.colc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726
3727 /* Rx Errors */
3728
Jeff Kirsher87041632006-03-02 18:21:24 -08003729 /* RLEC on some newer hardware can be incorrect so build
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003730 * our own version based on RUC and ROC
3731 */
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003732 netdev->stats.rx_errors = adapter->stats.rxerrc +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 adapter->stats.crcerrs + adapter->stats.algnerrc +
Jeff Kirsher87041632006-03-02 18:21:24 -08003734 adapter->stats.ruc + adapter->stats.roc +
3735 adapter->stats.cexterr;
Mitch Williams49559852006-09-27 12:53:37 -07003736 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003737 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3738 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3739 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3740 netdev->stats.rx_missed_errors = adapter->stats.mpc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741
3742 /* Tx Errors */
Mitch Williams49559852006-09-27 12:53:37 -07003743 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003744 netdev->stats.tx_errors = adapter->stats.txerrc;
3745 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3746 netdev->stats.tx_window_errors = adapter->stats.latecol;
3747 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
Joe Perches1dc32912008-07-11 15:17:08 -07003748 if (hw->bad_tx_carr_stats_fd &&
Jeff Garzik167fb282006-12-15 10:41:15 -05003749 adapter->link_duplex == FULL_DUPLEX) {
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003750 netdev->stats.tx_carrier_errors = 0;
Jeff Garzik167fb282006-12-15 10:41:15 -05003751 adapter->stats.tncrs = 0;
3752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753
3754 /* Tx Dropped needs to be maintained elsewhere */
3755
3756 /* Phy Stats */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003757 if (hw->media_type == e1000_media_type_copper) {
3758 if ((adapter->link_speed == SPEED_1000) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3760 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3761 adapter->phy_stats.idle_errors += phy_tmp;
3762 }
3763
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003764 if ((hw->mac_type <= e1000_82546) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 (hw->phy_type == e1000_phy_m88) &&
3766 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3767 adapter->phy_stats.receive_errors += phy_tmp;
3768 }
3769
Jeff Garzik15e376b2006-12-15 11:16:33 -05003770 /* Management Stats */
Joe Perches1dc32912008-07-11 15:17:08 -07003771 if (hw->has_smbus) {
3772 adapter->stats.mgptc += er32(MGTPTC);
3773 adapter->stats.mgprc += er32(MGTPRC);
3774 adapter->stats.mgpdc += er32(MGTPDC);
Jeff Garzik15e376b2006-12-15 11:16:33 -05003775 }
3776
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3778}
Jesse Brandeburg9ac98282006-11-01 08:48:10 -08003779
3780/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781 * e1000_intr - Interrupt Handler
3782 * @irq: interrupt number
3783 * @data: pointer to a network interface device structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07003784 **/
Joe Perches64798842008-07-11 15:17:02 -07003785static irqreturn_t e1000_intr(int irq, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786{
3787 struct net_device *netdev = data;
Malli Chilakala60490fe2005-06-17 17:41:45 -07003788 struct e1000_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789 struct e1000_hw *hw = &adapter->hw;
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003790 u32 icr = er32(ICR);
Francois Romieuc3570ac2008-07-11 15:17:38 -07003791
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003792 if (unlikely((!icr)))
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003793 return IRQ_NONE; /* Not our interrupt */
3794
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003795 /* we might have caused the interrupt, but the above
Jesse Brandeburg4c11b8a2011-01-13 07:48:13 +00003796 * read cleared it, and just in case the driver is
3797 * down there is nothing to do so return handled
3798 */
3799 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3800 return IRQ_HANDLED;
3801
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003802 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003803 hw->get_link_status = 1;
Auke Kok1314bbf2006-09-27 12:54:02 -07003804 /* guard against interrupt when we're going down */
3805 if (!test_bit(__E1000_DOWN, &adapter->flags))
Jesse Brandeburga4010af2011-10-05 07:24:41 +00003806 schedule_delayed_work(&adapter->watchdog_task, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807 }
3808
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00003809 /* disable interrupts, without the synchronize_irq bit */
3810 ew32(IMC, ~0);
3811 E1000_WRITE_FLUSH();
3812
Ben Hutchings288379f2009-01-19 16:43:59 -08003813 if (likely(napi_schedule_prep(&adapter->napi))) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003814 adapter->total_tx_bytes = 0;
3815 adapter->total_tx_packets = 0;
3816 adapter->total_rx_bytes = 0;
3817 adapter->total_rx_packets = 0;
Ben Hutchings288379f2009-01-19 16:43:59 -08003818 __napi_schedule(&adapter->napi);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003819 } else {
Auke Kok90fb5132006-11-01 08:47:30 -08003820 /* this really should not happen! if it does it is basically a
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003821 * bug, but not a hard error, so enable ints and continue
3822 */
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003823 if (!test_bit(__E1000_DOWN, &adapter->flags))
3824 e1000_irq_enable(adapter);
3825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003826
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827 return IRQ_HANDLED;
3828}
3829
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830/**
3831 * e1000_clean - NAPI Rx polling callback
3832 * @adapter: board private structure
3833 **/
Joe Perches64798842008-07-11 15:17:02 -07003834static int e1000_clean(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835{
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003836 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3837 napi);
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003838 int tx_clean_complete = 0, work_done = 0;
Malli Chilakala26483452005-04-28 19:44:46 -07003839
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003840 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003841
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003842 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003843
Jesse Brandeburg650b5a52009-09-25 12:19:23 +00003844 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003845 work_done = budget;
3846
David S. Miller53e52c72008-01-07 21:06:12 -08003847 /* If budget not fully consumed, exit the polling mode */
3848 if (work_done < budget) {
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003849 if (likely(adapter->itr_setting & 3))
3850 e1000_set_itr(adapter);
Jesse Brandeburg32b3e082015-09-24 16:35:47 -07003851 napi_complete_done(napi, work_done);
Jesse Brandeburga6c42322009-03-25 21:59:22 +00003852 if (!test_bit(__E1000_DOWN, &adapter->flags))
3853 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 }
3855
Stephen Hemmingerbea33482007-10-03 16:41:36 -07003856 return work_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003857}
3858
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859/**
3860 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3861 * @adapter: board private structure
3862 **/
Joe Perches64798842008-07-11 15:17:02 -07003863static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3864 struct e1000_tx_ring *tx_ring)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003865{
Joe Perches1dc32912008-07-11 15:17:08 -07003866 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003867 struct net_device *netdev = adapter->netdev;
3868 struct e1000_tx_desc *tx_desc, *eop_desc;
Florian Westphal580f3212014-09-03 13:34:31 +00003869 struct e1000_tx_buffer *buffer_info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003870 unsigned int i, eop;
Jeff Kirsher2a1af5d2006-03-02 18:20:43 -08003871 unsigned int count = 0;
Janusz Wolaka48954c2015-09-17 23:34:29 +02003872 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003873 unsigned int bytes_compl = 0, pkts_compl = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874
3875 i = tx_ring->next_to_clean;
3876 eop = tx_ring->buffer_info[i].next_to_watch;
3877 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3878
Alexander Duyckccfb3422009-03-25 21:59:04 +00003879 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3880 (count < tx_ring->count)) {
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003881 bool cleaned = false;
Alexander Duyck837a1db2015-04-07 16:55:27 -07003882 dma_rmb(); /* read buffer_info after eop_desc */
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003883 for ( ; !cleaned; count++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 tx_desc = E1000_TX_DESC(*tx_ring, i);
3885 buffer_info = &tx_ring->buffer_info[i];
3886 cleaned = (i == eop);
3887
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003888 if (cleaned) {
Dean Nelson31c15a22011-08-25 14:39:24 +00003889 total_tx_packets += buffer_info->segs;
3890 total_tx_bytes += buffer_info->bytecount;
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003891 if (buffer_info->skb) {
3892 bytes_compl += buffer_info->skb->len;
3893 pkts_compl++;
3894 }
3895
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003896 }
Jeff Kirsherfd803242005-12-13 00:06:22 -05003897 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
Jesse Brandeburga9ebadd2006-11-01 08:47:53 -08003898 tx_desc->upper.data = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899
Janusz Wolaka48954c2015-09-17 23:34:29 +02003900 if (unlikely(++i == tx_ring->count))
3901 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902 }
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003903
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904 eop = tx_ring->buffer_info[i].next_to_watch;
3905 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3906 }
3907
Dmitriy Vyukov9eab46b2015-09-08 10:52:44 +02003908 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3909 * which will reuse the cleaned buffers.
3910 */
3911 smp_store_release(&tx_ring->next_to_clean, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912
Otto Estuardo Solares Cabrera2f66fd32012-08-24 08:04:58 +00003913 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3914
Auke Kok77b2aad2006-04-14 19:05:25 -07003915#define TX_WAKE_THRESHOLD 32
Jesse Brandeburg843f4262009-04-16 16:59:47 +00003916 if (unlikely(count && netif_carrier_ok(netdev) &&
Jesse Brandeburg65c79732006-09-27 12:53:48 -07003917 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3918 /* Make sure that anybody stopping the queue after this
3919 * sees the new next_to_clean.
3920 */
3921 smp_mb();
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003922
3923 if (netif_queue_stopped(netdev) &&
3924 !(test_bit(__E1000_DOWN, &adapter->flags))) {
Auke Kok77b2aad2006-04-14 19:05:25 -07003925 netif_wake_queue(netdev);
Jesse Brandeburgfcfb1222006-11-01 08:47:59 -08003926 ++adapter->restart_queue;
3927 }
Auke Kok77b2aad2006-04-14 19:05:25 -07003928 }
Malli Chilakala26483452005-04-28 19:44:46 -07003929
Mallikarjuna R Chilakala581d7082005-10-04 07:01:55 -04003930 if (adapter->detect_tx_hung) {
Malli Chilakala26483452005-04-28 19:44:46 -07003931 /* Detect a transmit hang in hardware, this serializes the
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003932 * check with the clearing of time_stamp and movement of i
3933 */
Joe Perchesc3033b02008-03-21 11:06:25 -07003934 adapter->detect_tx_hung = false;
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003935 if (tx_ring->buffer_info[eop].time_stamp &&
3936 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00003937 (adapter->tx_timeout_factor * HZ)) &&
Joe Perches8e95a202009-12-03 07:58:21 +00003938 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003939
3940 /* detected Tx unit hang */
Emil Tantilovfeb8f472010-07-26 23:37:21 -07003941 e_err(drv, "Detected Tx Unit Hang\n"
Emil Tantilov675ad472010-04-27 14:02:58 +00003942 " Tx Queue <%lu>\n"
3943 " TDH <%x>\n"
3944 " TDT <%x>\n"
3945 " next_to_use <%x>\n"
3946 " next_to_clean <%x>\n"
3947 "buffer_info[next_to_clean]\n"
3948 " time_stamp <%lx>\n"
3949 " next_to_watch <%x>\n"
3950 " jiffies <%lx>\n"
3951 " next_to_watch.status <%x>\n",
Hong Zhiguo49a45a02013-10-22 18:32:56 +00003952 (unsigned long)(tx_ring - adapter->tx_ring),
Joe Perches1dc32912008-07-11 15:17:08 -07003953 readl(hw->hw_addr + tx_ring->tdh),
3954 readl(hw->hw_addr + tx_ring->tdt),
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003955 tx_ring->next_to_use,
Jeff Kirsher392137f2006-01-12 16:50:57 -08003956 tx_ring->next_to_clean,
Jesse Brandeburgcdd75492009-09-25 12:18:07 +00003957 tx_ring->buffer_info[eop].time_stamp,
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003958 eop,
3959 jiffies,
3960 eop_desc->upper.fields.status);
Tushar Daveb04e36b2012-01-27 09:00:46 +00003961 e1000_dump(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 netif_stop_queue(netdev);
Malli Chilakala70b8f1e2005-04-28 19:40:40 -07003963 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964 }
Jesse Brandeburg835bb122006-11-01 08:48:13 -08003965 adapter->total_tx_bytes += total_tx_bytes;
3966 adapter->total_tx_packets += total_tx_packets;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00003967 netdev->stats.tx_bytes += total_tx_bytes;
3968 netdev->stats.tx_packets += total_tx_packets;
Eric Dumazet807540b2010-09-23 05:40:09 +00003969 return count < tx_ring->count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970}
3971
3972/**
3973 * e1000_rx_checksum - Receive Checksum Offload for 82543
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003974 * @adapter: board private structure
3975 * @status_err: receive descriptor status and error fields
3976 * @csum: receive descriptor csum field
3977 * @sk_buff: socket buffer with received data
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 **/
Joe Perches64798842008-07-11 15:17:02 -07003979static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3980 u32 csum, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003981{
Joe Perches1dc32912008-07-11 15:17:08 -07003982 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07003983 u16 status = (u16)status_err;
3984 u8 errors = (u8)(status_err >> 24);
Eric Dumazetbc8acf22010-09-02 13:07:41 -07003985
3986 skb_checksum_none_assert(skb);
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003987
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 /* 82543 or newer only */
Janusz Wolaka48954c2015-09-17 23:34:29 +02003989 if (unlikely(hw->mac_type < e1000_82543))
3990 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991 /* Ignore Checksum bit is set */
Janusz Wolaka48954c2015-09-17 23:34:29 +02003992 if (unlikely(status & E1000_RXD_STAT_IXSM))
3993 return;
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003994 /* TCP/UDP checksum error bit is set */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08003995 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
Malli Chilakala2d7edb92005-04-28 19:43:52 -07003996 /* let the stack verify checksum errors */
3997 adapter->hw_csum_err++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998 return;
3999 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004000 /* TCP/UDP Checksum has not been calculated */
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004001 if (!(status & E1000_RXD_STAT_TCPCS))
4002 return;
4003
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004004 /* It must be a TCP or UDP packet with a valid checksum */
4005 if (likely(status & E1000_RXD_STAT_TCPCS)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 /* TCP checksum is good */
4007 skb->ip_summed = CHECKSUM_UNNECESSARY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 }
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004009 adapter->hw_csum_good++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010}
4011
4012/**
Florian Westphal13809602014-09-03 13:34:36 +00004013 * e1000_consume_page - helper function for jumbo Rx path
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004014 **/
Florian Westphal93f0afe2014-09-03 13:34:26 +00004015static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004016 u16 length)
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004017{
Florian Westphal13809602014-09-03 13:34:36 +00004018 bi->rxbuf.page = NULL;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004019 skb->len += length;
4020 skb->data_len += length;
Eric Dumazeted64b3c2011-10-13 07:53:42 +00004021 skb->truesize += PAGE_SIZE;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004022}
4023
4024/**
4025 * e1000_receive_skb - helper function to handle rx indications
4026 * @adapter: board private structure
4027 * @status: descriptor status field as written by hardware
4028 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4029 * @skb: pointer to sk_buff to be indicated to stack
4030 */
4031static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4032 __le16 vlan, struct sk_buff *skb)
4033{
Jesse Brandeburg6a08d192010-09-22 18:23:05 +00004034 skb->protocol = eth_type_trans(skb, adapter->netdev);
4035
Jiri Pirko5622e402011-07-21 03:26:31 +00004036 if (status & E1000_RXD_STAT_VP) {
4037 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4038
Patrick McHardy86a9bad2013-04-19 02:04:30 +00004039 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Jiri Pirko5622e402011-07-21 03:26:31 +00004040 }
4041 napi_gro_receive(&adapter->napi, skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004042}
4043
4044/**
Florian Westphal4f0aeb12014-09-03 13:34:10 +00004045 * e1000_tbi_adjust_stats
4046 * @hw: Struct containing variables accessed by shared code
4047 * @frame_len: The length of the frame in question
4048 * @mac_addr: The Ethernet destination address of the frame in question
4049 *
4050 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4051 */
4052static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4053 struct e1000_hw_stats *stats,
4054 u32 frame_len, const u8 *mac_addr)
4055{
4056 u64 carry_bit;
4057
4058 /* First adjust the frame length. */
4059 frame_len--;
4060 /* We need to adjust the statistics counters, since the hardware
4061 * counters overcount this packet as a CRC error and undercount
4062 * the packet as a good packet
4063 */
4064 /* This packet should not be counted as a CRC error. */
4065 stats->crcerrs--;
4066 /* This packet does count as a Good Packet Received. */
4067 stats->gprc++;
4068
4069 /* Adjust the Good Octets received counters */
4070 carry_bit = 0x80000000 & stats->gorcl;
4071 stats->gorcl += frame_len;
4072 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4073 * Received Count) was one before the addition,
4074 * AND it is zero after, then we lost the carry out,
4075 * need to add one to Gorch (Good Octets Received Count High).
4076 * This could be simplified if all environments supported
4077 * 64-bit integers.
4078 */
4079 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4080 stats->gorch++;
4081 /* Is this a broadcast or multicast? Check broadcast first,
4082 * since the test for a multicast frame will test positive on
4083 * a broadcast frame.
4084 */
4085 if (is_broadcast_ether_addr(mac_addr))
4086 stats->bprc++;
4087 else if (is_multicast_ether_addr(mac_addr))
4088 stats->mprc++;
4089
4090 if (frame_len == hw->max_frame_size) {
4091 /* In this case, the hardware has overcounted the number of
4092 * oversize frames.
4093 */
4094 if (stats->roc > 0)
4095 stats->roc--;
4096 }
4097
4098 /* Adjust the bin counters when the extra byte put the frame in the
4099 * wrong bin. Remember that the frame_len was adjusted above.
4100 */
4101 if (frame_len == 64) {
4102 stats->prc64++;
4103 stats->prc127--;
4104 } else if (frame_len == 127) {
4105 stats->prc127++;
4106 stats->prc255--;
4107 } else if (frame_len == 255) {
4108 stats->prc255++;
4109 stats->prc511--;
4110 } else if (frame_len == 511) {
4111 stats->prc511++;
4112 stats->prc1023--;
4113 } else if (frame_len == 1023) {
4114 stats->prc1023++;
4115 stats->prc1522--;
4116 } else if (frame_len == 1522) {
4117 stats->prc1522++;
4118 }
4119}
4120
Florian Westphal20371102014-09-03 13:34:15 +00004121static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4122 u8 status, u8 errors,
4123 u32 length, const u8 *data)
4124{
4125 struct e1000_hw *hw = &adapter->hw;
4126 u8 last_byte = *(data + length - 1);
4127
4128 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4129 unsigned long irq_flags;
4130
4131 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4132 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4133 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4134
4135 return true;
4136 }
4137
4138 return false;
4139}
4140
Florian Westphal2b294b12014-09-03 13:34:21 +00004141static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4142 unsigned int bufsz)
4143{
Alexander Duyck67fd8932014-12-09 19:40:56 -08004144 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
Florian Westphal2b294b12014-09-03 13:34:21 +00004145
4146 if (unlikely(!skb))
4147 adapter->alloc_rx_buff_failed++;
4148 return skb;
4149}
4150
Florian Westphal4f0aeb12014-09-03 13:34:10 +00004151/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004152 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4153 * @adapter: board private structure
4154 * @rx_ring: ring to clean
4155 * @work_done: amount of napi work completed this call
4156 * @work_to_do: max amount of work allowed for this call to do
4157 *
4158 * the return value indicates whether actual cleaning was done, there
4159 * is no guarantee that everything was cleaned
4160 */
4161static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4162 struct e1000_rx_ring *rx_ring,
4163 int *work_done, int work_to_do)
4164{
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004165 struct net_device *netdev = adapter->netdev;
4166 struct pci_dev *pdev = adapter->pdev;
4167 struct e1000_rx_desc *rx_desc, *next_rxd;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004168 struct e1000_rx_buffer *buffer_info, *next_buffer;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004169 u32 length;
4170 unsigned int i;
4171 int cleaned_count = 0;
4172 bool cleaned = false;
Janusz Wolaka48954c2015-09-17 23:34:29 +02004173 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004174
4175 i = rx_ring->next_to_clean;
4176 rx_desc = E1000_RX_DESC(*rx_ring, i);
4177 buffer_info = &rx_ring->buffer_info[i];
4178
4179 while (rx_desc->status & E1000_RXD_STAT_DD) {
4180 struct sk_buff *skb;
4181 u8 status;
4182
4183 if (*work_done >= work_to_do)
4184 break;
4185 (*work_done)++;
Alexander Duyck837a1db2015-04-07 16:55:27 -07004186 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004187
4188 status = rx_desc->status;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004189
Janusz Wolaka48954c2015-09-17 23:34:29 +02004190 if (++i == rx_ring->count)
4191 i = 0;
4192
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004193 next_rxd = E1000_RX_DESC(*rx_ring, i);
4194 prefetch(next_rxd);
4195
4196 next_buffer = &rx_ring->buffer_info[i];
4197
4198 cleaned = true;
4199 cleaned_count++;
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004200 dma_unmap_page(&pdev->dev, buffer_info->dma,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004201 adapter->rx_buffer_len, DMA_FROM_DEVICE);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004202 buffer_info->dma = 0;
4203
4204 length = le16_to_cpu(rx_desc->length);
4205
4206 /* errors is only valid for DD + EOP descriptors */
4207 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4208 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
Florian Westphal13809602014-09-03 13:34:36 +00004209 u8 *mapped = page_address(buffer_info->rxbuf.page);
Sebastian Andrzej Siewiora3060852012-05-11 16:30:46 +00004210
Florian Westphal20371102014-09-03 13:34:15 +00004211 if (e1000_tbi_should_accept(adapter, status,
4212 rx_desc->errors,
4213 length, mapped)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004214 length--;
Florian Westphal20371102014-09-03 13:34:15 +00004215 } else if (netdev->features & NETIF_F_RXALL) {
4216 goto process_skb;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004217 } else {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004218 /* an error means any chain goes out the window
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004219 * too
4220 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004221 if (rx_ring->rx_skb_top)
4222 dev_kfree_skb(rx_ring->rx_skb_top);
4223 rx_ring->rx_skb_top = NULL;
4224 goto next_desc;
4225 }
4226 }
4227
4228#define rxtop rx_ring->rx_skb_top
Ben Greeare825b732012-04-04 06:01:29 +00004229process_skb:
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004230 if (!(status & E1000_RXD_STAT_EOP)) {
4231 /* this descriptor is only the beginning (or middle) */
4232 if (!rxtop) {
4233 /* this is the beginning of a chain */
Florian Westphalde591c72014-09-03 13:34:42 +00004234 rxtop = napi_get_frags(&adapter->napi);
Florian Westphal13809602014-09-03 13:34:36 +00004235 if (!rxtop)
4236 break;
4237
4238 skb_fill_page_desc(rxtop, 0,
4239 buffer_info->rxbuf.page,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004240 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004241 } else {
4242 /* this is the middle of a chain */
4243 skb_fill_page_desc(rxtop,
4244 skb_shinfo(rxtop)->nr_frags,
Florian Westphal13809602014-09-03 13:34:36 +00004245 buffer_info->rxbuf.page, 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004246 }
4247 e1000_consume_page(buffer_info, rxtop, length);
4248 goto next_desc;
4249 } else {
4250 if (rxtop) {
4251 /* end of the chain */
4252 skb_fill_page_desc(rxtop,
4253 skb_shinfo(rxtop)->nr_frags,
Florian Westphal13809602014-09-03 13:34:36 +00004254 buffer_info->rxbuf.page, 0, length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004255 skb = rxtop;
4256 rxtop = NULL;
4257 e1000_consume_page(buffer_info, skb, length);
4258 } else {
Florian Westphal13809602014-09-03 13:34:36 +00004259 struct page *p;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004260 /* no chain, got EOP, this buf is the packet
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004261 * copybreak to save the put_page/alloc_page
4262 */
Florian Westphal13809602014-09-03 13:34:36 +00004263 p = buffer_info->rxbuf.page;
Florian Westphalde591c72014-09-03 13:34:42 +00004264 if (length <= copybreak) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004265 u8 *vaddr;
Florian Westphal13809602014-09-03 13:34:36 +00004266
Florian Westphalde591c72014-09-03 13:34:42 +00004267 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4268 length -= 4;
4269 skb = e1000_alloc_rx_skb(adapter,
4270 length);
4271 if (!skb)
4272 break;
4273
Florian Westphal13809602014-09-03 13:34:36 +00004274 vaddr = kmap_atomic(p);
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004275 memcpy(skb_tail_pointer(skb), vaddr,
4276 length);
Cong Wang46790262011-11-25 23:14:23 +08004277 kunmap_atomic(vaddr);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004278 /* re-use the page, so don't erase
Florian Westphal13809602014-09-03 13:34:36 +00004279 * buffer_info->rxbuf.page
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004280 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004281 skb_put(skb, length);
Florian Westphalde591c72014-09-03 13:34:42 +00004282 e1000_rx_checksum(adapter,
4283 status | rx_desc->errors << 24,
4284 le16_to_cpu(rx_desc->csum), skb);
4285
4286 total_rx_bytes += skb->len;
4287 total_rx_packets++;
4288
4289 e1000_receive_skb(adapter, status,
4290 rx_desc->special, skb);
4291 goto next_desc;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004292 } else {
Florian Westphalde591c72014-09-03 13:34:42 +00004293 skb = napi_get_frags(&adapter->napi);
4294 if (!skb) {
4295 adapter->alloc_rx_buff_failed++;
4296 break;
4297 }
Florian Westphal13809602014-09-03 13:34:36 +00004298 skb_fill_page_desc(skb, 0, p, 0,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004299 length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004300 e1000_consume_page(buffer_info, skb,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004301 length);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004302 }
4303 }
4304 }
4305
4306 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4307 e1000_rx_checksum(adapter,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004308 (u32)(status) |
4309 ((u32)(rx_desc->errors) << 24),
4310 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004311
Ben Greearb0d15622012-02-11 15:40:11 +00004312 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4313 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4314 pskb_trim(skb, skb->len - 4);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004315 total_rx_packets++;
4316
Florian Westphalde591c72014-09-03 13:34:42 +00004317 if (status & E1000_RXD_STAT_VP) {
4318 __le16 vlan = rx_desc->special;
4319 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4320
4321 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004322 }
4323
Florian Westphalde591c72014-09-03 13:34:42 +00004324 napi_gro_frags(&adapter->napi);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004325
4326next_desc:
4327 rx_desc->status = 0;
4328
4329 /* return some buffers to hardware, one at a time is too slow */
4330 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4331 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4332 cleaned_count = 0;
4333 }
4334
4335 /* use prefetched values */
4336 rx_desc = next_rxd;
4337 buffer_info = next_buffer;
4338 }
4339 rx_ring->next_to_clean = i;
4340
4341 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4342 if (cleaned_count)
4343 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4344
4345 adapter->total_rx_packets += total_rx_packets;
4346 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004347 netdev->stats.rx_bytes += total_rx_bytes;
4348 netdev->stats.rx_packets += total_rx_packets;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004349 return cleaned;
4350}
4351
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004352/* this should improve performance for small packets with large amounts
Joe Perches57bf6ee2010-05-13 15:26:17 +00004353 * of reassembly being done in the stack
4354 */
Florian Westphal2b294b12014-09-03 13:34:21 +00004355static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004356 struct e1000_rx_buffer *buffer_info,
Florian Westphal2b294b12014-09-03 13:34:21 +00004357 u32 length, const void *data)
Joe Perches57bf6ee2010-05-13 15:26:17 +00004358{
Florian Westphal2b294b12014-09-03 13:34:21 +00004359 struct sk_buff *skb;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004360
4361 if (length > copybreak)
Florian Westphal2b294b12014-09-03 13:34:21 +00004362 return NULL;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004363
Florian Westphal2b294b12014-09-03 13:34:21 +00004364 skb = e1000_alloc_rx_skb(adapter, length);
4365 if (!skb)
4366 return NULL;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004367
Florian Westphal2b294b12014-09-03 13:34:21 +00004368 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4369 length, DMA_FROM_DEVICE);
4370
4371 memcpy(skb_put(skb, length), data, length);
4372
4373 return skb;
Joe Perches57bf6ee2010-05-13 15:26:17 +00004374}
4375
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004376/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004377 * e1000_clean_rx_irq - Send received data up the network stack; legacy
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378 * @adapter: board private structure
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004379 * @rx_ring: ring to clean
4380 * @work_done: amount of napi work completed this call
4381 * @work_to_do: max amount of work allowed for this call to do
4382 */
Joe Perches64798842008-07-11 15:17:02 -07004383static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4384 struct e1000_rx_ring *rx_ring,
4385 int *work_done, int work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387 struct net_device *netdev = adapter->netdev;
4388 struct pci_dev *pdev = adapter->pdev;
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004389 struct e1000_rx_desc *rx_desc, *next_rxd;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004390 struct e1000_rx_buffer *buffer_info, *next_buffer;
Joe Perches406874a2008-04-03 10:06:32 -07004391 u32 length;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004392 unsigned int i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004393 int cleaned_count = 0;
Joe Perchesc3033b02008-03-21 11:06:25 -07004394 bool cleaned = false;
Janusz Wolaka48954c2015-09-17 23:34:29 +02004395 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004396
4397 i = rx_ring->next_to_clean;
4398 rx_desc = E1000_RX_DESC(*rx_ring, i);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004399 buffer_info = &rx_ring->buffer_info[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004401 while (rx_desc->status & E1000_RXD_STAT_DD) {
Auke Kok24f476e2006-06-08 09:28:47 -07004402 struct sk_buff *skb;
Florian Westphal13809602014-09-03 13:34:36 +00004403 u8 *data;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004404 u8 status;
Auke Kok90fb5132006-11-01 08:47:30 -08004405
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004406 if (*work_done >= work_to_do)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 break;
4408 (*work_done)++;
Alexander Duyck837a1db2015-04-07 16:55:27 -07004409 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
Francois Romieuc3570ac2008-07-11 15:17:38 -07004410
Jeff Kirshera292ca62006-01-12 16:51:30 -08004411 status = rx_desc->status;
Florian Westphal2b294b12014-09-03 13:34:21 +00004412 length = le16_to_cpu(rx_desc->length);
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004413
Florian Westphal13809602014-09-03 13:34:36 +00004414 data = buffer_info->rxbuf.data;
4415 prefetch(data);
4416 skb = e1000_copybreak(adapter, buffer_info, length, data);
Florian Westphal2b294b12014-09-03 13:34:21 +00004417 if (!skb) {
Florian Westphal13809602014-09-03 13:34:36 +00004418 unsigned int frag_len = e1000_frag_len(adapter);
4419
4420 skb = build_skb(data - E1000_HEADROOM, frag_len);
4421 if (!skb) {
4422 adapter->alloc_rx_buff_failed++;
4423 break;
4424 }
4425
4426 skb_reserve(skb, E1000_HEADROOM);
Florian Westphal2b294b12014-09-03 13:34:21 +00004427 dma_unmap_single(&pdev->dev, buffer_info->dma,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004428 adapter->rx_buffer_len,
4429 DMA_FROM_DEVICE);
Florian Westphal2b294b12014-09-03 13:34:21 +00004430 buffer_info->dma = 0;
Florian Westphal13809602014-09-03 13:34:36 +00004431 buffer_info->rxbuf.data = NULL;
Florian Westphal2b294b12014-09-03 13:34:21 +00004432 }
Jeff Kirsher30320be2006-03-02 18:21:57 -08004433
Janusz Wolaka48954c2015-09-17 23:34:29 +02004434 if (++i == rx_ring->count)
4435 i = 0;
4436
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004437 next_rxd = E1000_RX_DESC(*rx_ring, i);
Jeff Kirsher30320be2006-03-02 18:21:57 -08004438 prefetch(next_rxd);
4439
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004440 next_buffer = &rx_ring->buffer_info[i];
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004441
Joe Perchesc3033b02008-03-21 11:06:25 -07004442 cleaned = true;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004443 cleaned_count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444
Neil Hormanea30e112009-06-02 01:29:58 -07004445 /* !EOP means multiple descriptors were used to store a single
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004446 * packet, if thats the case we need to toss it. In fact, we
4447 * to toss every packet with the EOP bit clear and the next
4448 * frame that _does_ have the EOP bit set, as it is by
4449 * definition only a frame fragment
4450 */
4451 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4452 adapter->discarding = true;
4453
4454 if (adapter->discarding) {
Jeff Kirshera1415ee2006-02-28 20:24:07 -08004455 /* All receives must fit into a single buffer */
Florian Westphal20371102014-09-03 13:34:15 +00004456 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
Florian Westphal2b294b12014-09-03 13:34:21 +00004457 dev_kfree_skb(skb);
Jesse Brandeburg40a14de2010-01-19 14:15:38 +00004458 if (status & E1000_RXD_STAT_EOP)
4459 adapter->discarding = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 goto next_desc;
4461 }
4462
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004463 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
Florian Westphal20371102014-09-03 13:34:15 +00004464 if (e1000_tbi_should_accept(adapter, status,
4465 rx_desc->errors,
Florian Westphal13809602014-09-03 13:34:36 +00004466 length, data)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 length--;
Florian Westphal20371102014-09-03 13:34:15 +00004468 } else if (netdev->features & NETIF_F_RXALL) {
4469 goto process_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 } else {
Florian Westphal2b294b12014-09-03 13:34:21 +00004471 dev_kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 goto next_desc;
4473 }
Auke Kok1cb58212006-04-18 12:31:04 -07004474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475
Ben Greeare825b732012-04-04 06:01:29 +00004476process_skb:
Ben Greearb0d15622012-02-11 15:40:11 +00004477 total_rx_bytes += (length - 4); /* don't count FCS */
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004478 total_rx_packets++;
4479
Ben Greearb0d15622012-02-11 15:40:11 +00004480 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4481 /* adjust length to remove Ethernet CRC, this must be
4482 * done after the TBI_ACCEPT workaround above
4483 */
4484 length -= 4;
4485
Florian Westphal13809602014-09-03 13:34:36 +00004486 if (buffer_info->rxbuf.data == NULL)
Florian Westphal2b294b12014-09-03 13:34:21 +00004487 skb_put(skb, length);
4488 else /* copybreak skb */
4489 skb_trim(skb, length);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490
4491 /* Receive Checksum Offload */
Jeff Kirshera292ca62006-01-12 16:51:30 -08004492 e1000_rx_checksum(adapter,
Joe Perches406874a2008-04-03 10:06:32 -07004493 (u32)(status) |
4494 ((u32)(rx_desc->errors) << 24),
David S. Millerc3d7a3a2006-03-15 14:26:28 -08004495 le16_to_cpu(rx_desc->csum), skb);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004496
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004497 e1000_receive_skb(adapter, status, rx_desc->special, skb);
Francois Romieuc3570ac2008-07-11 15:17:38 -07004498
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499next_desc:
4500 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004502 /* return some buffers to hardware, one at a time is too slow */
4503 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4504 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4505 cleaned_count = 0;
4506 }
4507
Jeff Kirsher30320be2006-03-02 18:21:57 -08004508 /* use prefetched values */
Jesse Brandeburg86c3d592006-01-18 13:01:43 -08004509 rx_desc = next_rxd;
4510 buffer_info = next_buffer;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 rx_ring->next_to_clean = i;
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004513
4514 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4515 if (cleaned_count)
4516 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
Jesse Brandeburg835bb122006-11-01 08:48:13 -08004518 adapter->total_rx_packets += total_rx_packets;
4519 adapter->total_rx_bytes += total_rx_bytes;
Ajit Khaparde5fe31de2009-10-07 02:42:23 +00004520 netdev->stats.rx_bytes += total_rx_bytes;
4521 netdev->stats.rx_packets += total_rx_packets;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522 return cleaned;
4523}
4524
4525/**
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004526 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4527 * @adapter: address of board private structure
4528 * @rx_ring: pointer to receive ring structure
4529 * @cleaned_count: number of buffers to allocate this pass
4530 **/
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004531static void
4532e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004533 struct e1000_rx_ring *rx_ring, int cleaned_count)
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004534{
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004535 struct pci_dev *pdev = adapter->pdev;
4536 struct e1000_rx_desc *rx_desc;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004537 struct e1000_rx_buffer *buffer_info;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004538 unsigned int i;
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004539
4540 i = rx_ring->next_to_use;
4541 buffer_info = &rx_ring->buffer_info[i];
4542
4543 while (cleaned_count--) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004544 /* allocate a new page if necessary */
Florian Westphal13809602014-09-03 13:34:36 +00004545 if (!buffer_info->rxbuf.page) {
4546 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4547 if (unlikely(!buffer_info->rxbuf.page)) {
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004548 adapter->alloc_rx_buff_failed++;
4549 break;
4550 }
4551 }
4552
Anton Blanchardb5abb022010-02-19 17:54:53 +00004553 if (!buffer_info->dma) {
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004554 buffer_info->dma = dma_map_page(&pdev->dev,
Florian Westphal13809602014-09-03 13:34:36 +00004555 buffer_info->rxbuf.page, 0,
4556 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004557 DMA_FROM_DEVICE);
4558 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Florian Westphal13809602014-09-03 13:34:36 +00004559 put_page(buffer_info->rxbuf.page);
4560 buffer_info->rxbuf.page = NULL;
Anton Blanchardb5abb022010-02-19 17:54:53 +00004561 buffer_info->dma = 0;
4562 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004563 break;
Anton Blanchardb5abb022010-02-19 17:54:53 +00004564 }
4565 }
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004566
4567 rx_desc = E1000_RX_DESC(*rx_ring, i);
4568 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4569
4570 if (unlikely(++i == rx_ring->count))
4571 i = 0;
4572 buffer_info = &rx_ring->buffer_info[i];
4573 }
4574
4575 if (likely(rx_ring->next_to_use != i)) {
4576 rx_ring->next_to_use = i;
4577 if (unlikely(i-- == 0))
4578 i = (rx_ring->count - 1);
4579
4580 /* Force memory writes to complete before letting h/w
4581 * know there are new descriptors to fetch. (Only
4582 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004583 * such as IA-64).
4584 */
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004585 wmb();
4586 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4587 }
4588}
4589
4590/**
Malli Chilakala2d7edb92005-04-28 19:43:52 -07004591 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592 * @adapter: address of board private structure
4593 **/
Joe Perches64798842008-07-11 15:17:02 -07004594static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4595 struct e1000_rx_ring *rx_ring,
4596 int cleaned_count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597{
Joe Perches1dc32912008-07-11 15:17:08 -07004598 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599 struct pci_dev *pdev = adapter->pdev;
4600 struct e1000_rx_desc *rx_desc;
Florian Westphal93f0afe2014-09-03 13:34:26 +00004601 struct e1000_rx_buffer *buffer_info;
Malli Chilakala26483452005-04-28 19:44:46 -07004602 unsigned int i;
Eric Dumazet89d71a62009-10-13 05:34:20 +00004603 unsigned int bufsz = adapter->rx_buffer_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604
4605 i = rx_ring->next_to_use;
4606 buffer_info = &rx_ring->buffer_info[i];
4607
Jeff Kirshera292ca62006-01-12 16:51:30 -08004608 while (cleaned_count--) {
Florian Westphal13809602014-09-03 13:34:36 +00004609 void *data;
Jeff Kirshera292ca62006-01-12 16:51:30 -08004610
Florian Westphal13809602014-09-03 13:34:36 +00004611 if (buffer_info->rxbuf.data)
4612 goto skip;
4613
4614 data = e1000_alloc_frag(adapter);
4615 if (!data) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004616 /* Better luck next round */
Jeff Kirsher72d64a42006-01-12 16:51:19 -08004617 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618 break;
4619 }
4620
Malli Chilakala26483452005-04-28 19:44:46 -07004621 /* Fix for errata 23, can't cross 64kB boundary */
Florian Westphal13809602014-09-03 13:34:36 +00004622 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4623 void *olddata = data;
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004624 e_err(rx_err, "skb align check failed: %u bytes at "
Florian Westphal13809602014-09-03 13:34:36 +00004625 "%p\n", bufsz, data);
Malli Chilakala26483452005-04-28 19:44:46 -07004626 /* Try again, without freeing the previous */
Florian Westphal13809602014-09-03 13:34:36 +00004627 data = e1000_alloc_frag(adapter);
Malli Chilakala26483452005-04-28 19:44:46 -07004628 /* Failed allocation, critical failure */
Florian Westphal13809602014-09-03 13:34:36 +00004629 if (!data) {
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004630 skb_free_frag(olddata);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004631 adapter->alloc_rx_buff_failed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004632 break;
4633 }
Malli Chilakala26483452005-04-28 19:44:46 -07004634
Florian Westphal13809602014-09-03 13:34:36 +00004635 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 /* give up */
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004637 skb_free_frag(data);
4638 skb_free_frag(olddata);
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004639 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004640 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 }
Christoph Hellwigca6f7222006-08-31 14:27:47 -07004642
4643 /* Use new allocation */
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004644 skb_free_frag(olddata);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 }
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004646 buffer_info->dma = dma_map_single(&pdev->dev,
Florian Westphal13809602014-09-03 13:34:36 +00004647 data,
Florian Westphal93f0afe2014-09-03 13:34:26 +00004648 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004649 DMA_FROM_DEVICE);
4650 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004651 skb_free_frag(data);
Anton Blanchardb5abb022010-02-19 17:54:53 +00004652 buffer_info->dma = 0;
4653 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004654 break;
Anton Blanchardb5abb022010-02-19 17:54:53 +00004655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004657 /* XXX if it was allocated cleanly it will never map to a
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004658 * boundary crossing
4659 */
4660
Malli Chilakala26483452005-04-28 19:44:46 -07004661 /* Fix for errata 23, can't cross 64kB boundary */
4662 if (!e1000_check_64k_bound(adapter,
4663 (void *)(unsigned long)buffer_info->dma,
4664 adapter->rx_buffer_len)) {
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004665 e_err(rx_err, "dma align check failed: %u bytes at "
4666 "%p\n", adapter->rx_buffer_len,
Emil Tantilov675ad472010-04-27 14:02:58 +00004667 (void *)(unsigned long)buffer_info->dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004669 dma_unmap_single(&pdev->dev, buffer_info->dma,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 adapter->rx_buffer_len,
Nick Nunleyb16f53b2010-04-27 13:08:45 +00004671 DMA_FROM_DEVICE);
Florian Westphal13809602014-09-03 13:34:36 +00004672
Alexander Duyck6bf93ba2015-05-06 21:12:20 -07004673 skb_free_frag(data);
Florian Westphal13809602014-09-03 13:34:36 +00004674 buffer_info->rxbuf.data = NULL;
Jesse Brandeburg679be3b2009-06-30 12:45:34 +00004675 buffer_info->dma = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676
Jesse Brandeburgedbbb3c2009-07-06 10:44:39 +00004677 adapter->alloc_rx_buff_failed++;
Florian Westphal13809602014-09-03 13:34:36 +00004678 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 }
Florian Westphal13809602014-09-03 13:34:36 +00004680 buffer_info->rxbuf.data = data;
4681 skip:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 rx_desc = E1000_RX_DESC(*rx_ring, i);
4683 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4684
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004685 if (unlikely(++i == rx_ring->count))
4686 i = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 buffer_info = &rx_ring->buffer_info[i];
4688 }
4689
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004690 if (likely(rx_ring->next_to_use != i)) {
4691 rx_ring->next_to_use = i;
4692 if (unlikely(i-- == 0))
4693 i = (rx_ring->count - 1);
4694
4695 /* Force memory writes to complete before letting h/w
4696 * know there are new descriptors to fetch. (Only
4697 * applicable for weak-ordered memory model archs,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004698 * such as IA-64).
4699 */
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004700 wmb();
Joe Perches1dc32912008-07-11 15:17:08 -07004701 writel(i, hw->hw_addr + rx_ring->rdt);
Jesse Brandeburgb92ff8e2006-01-18 13:01:32 -08004702 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703}
4704
4705/**
4706 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4707 * @adapter:
4708 **/
Joe Perches64798842008-07-11 15:17:02 -07004709static void e1000_smartspeed(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710{
Joe Perches1dc32912008-07-11 15:17:08 -07004711 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004712 u16 phy_status;
4713 u16 phy_ctrl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714
Joe Perches1dc32912008-07-11 15:17:08 -07004715 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4716 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004717 return;
4718
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004719 if (adapter->smartspeed == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720 /* If Master/Slave config fault is asserted twice,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004721 * we assume back-to-back
4722 */
Joe Perches1dc32912008-07-11 15:17:08 -07004723 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Janusz Wolaka48954c2015-09-17 23:34:29 +02004724 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4725 return;
Joe Perches1dc32912008-07-11 15:17:08 -07004726 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
Janusz Wolaka48954c2015-09-17 23:34:29 +02004727 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4728 return;
Joe Perches1dc32912008-07-11 15:17:08 -07004729 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004730 if (phy_ctrl & CR_1000T_MS_ENABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 phy_ctrl &= ~CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004732 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 phy_ctrl);
4734 adapter->smartspeed++;
Joe Perches1dc32912008-07-11 15:17:08 -07004735 if (!e1000_phy_setup_autoneg(hw) &&
4736 !e1000_read_phy_reg(hw, PHY_CTRL,
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00004737 &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4739 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004740 e1000_write_phy_reg(hw, PHY_CTRL,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 phy_ctrl);
4742 }
4743 }
4744 return;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004745 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 /* If still no link, perhaps using 2/3 pair cable */
Joe Perches1dc32912008-07-11 15:17:08 -07004747 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 phy_ctrl |= CR_1000T_MS_ENABLE;
Joe Perches1dc32912008-07-11 15:17:08 -07004749 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4750 if (!e1000_phy_setup_autoneg(hw) &&
4751 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4753 MII_CR_RESTART_AUTO_NEG);
Joe Perches1dc32912008-07-11 15:17:08 -07004754 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 }
4756 }
4757 /* Restart process after E1000_SMARTSPEED_MAX iterations */
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004758 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004759 adapter->smartspeed = 0;
4760}
4761
4762/**
4763 * e1000_ioctl -
4764 * @netdev:
4765 * @ifreq:
4766 * @cmd:
4767 **/
Joe Perches64798842008-07-11 15:17:02 -07004768static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769{
4770 switch (cmd) {
4771 case SIOCGMIIPHY:
4772 case SIOCGMIIREG:
4773 case SIOCSMIIREG:
4774 return e1000_mii_ioctl(netdev, ifr, cmd);
4775 default:
4776 return -EOPNOTSUPP;
4777 }
4778}
4779
4780/**
4781 * e1000_mii_ioctl -
4782 * @netdev:
4783 * @ifreq:
4784 * @cmd:
4785 **/
Joe Perches64798842008-07-11 15:17:02 -07004786static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4787 int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004789 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004790 struct e1000_hw *hw = &adapter->hw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004791 struct mii_ioctl_data *data = if_mii(ifr);
4792 int retval;
Joe Perches406874a2008-04-03 10:06:32 -07004793 u16 mii_reg;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004794 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795
Joe Perches1dc32912008-07-11 15:17:08 -07004796 if (hw->media_type != e1000_media_type_copper)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004797 return -EOPNOTSUPP;
4798
4799 switch (cmd) {
4800 case SIOCGMIIPHY:
Joe Perches1dc32912008-07-11 15:17:08 -07004801 data->phy_id = hw->phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004802 break;
4803 case SIOCGMIIREG:
Malli Chilakala97876fc2005-06-17 17:40:19 -07004804 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004805 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004806 &data->val_out)) {
4807 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004809 }
4810 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004811 break;
4812 case SIOCSMIIREG:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004813 if (data->reg_num & ~(0x1F))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814 return -EFAULT;
4815 mii_reg = data->val_in;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004816 spin_lock_irqsave(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004817 if (e1000_write_phy_reg(hw, data->reg_num,
Malli Chilakala97876fc2005-06-17 17:40:19 -07004818 mii_reg)) {
4819 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820 return -EIO;
Malli Chilakala97876fc2005-06-17 17:40:19 -07004821 }
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004822 spin_unlock_irqrestore(&adapter->stats_lock, flags);
Joe Perches1dc32912008-07-11 15:17:08 -07004823 if (hw->media_type == e1000_media_type_copper) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824 switch (data->reg_num) {
4825 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004826 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827 break;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004828 if (mii_reg & MII_CR_AUTO_NEG_EN) {
Joe Perches1dc32912008-07-11 15:17:08 -07004829 hw->autoneg = 1;
4830 hw->autoneg_advertised = 0x2F;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 } else {
David Decotigny14ad2512011-04-27 18:32:43 +00004832 u32 speed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004833 if (mii_reg & 0x40)
David Decotigny14ad2512011-04-27 18:32:43 +00004834 speed = SPEED_1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004835 else if (mii_reg & 0x2000)
David Decotigny14ad2512011-04-27 18:32:43 +00004836 speed = SPEED_100;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004837 else
David Decotigny14ad2512011-04-27 18:32:43 +00004838 speed = SPEED_10;
4839 retval = e1000_set_spd_dplx(
4840 adapter, speed,
4841 ((mii_reg & 0x100)
4842 ? DUPLEX_FULL :
4843 DUPLEX_HALF));
Jesse Brandeburgf0163ac2007-11-13 21:00:09 -08004844 if (retval)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004845 return retval;
4846 }
Auke Kok2db10a02006-06-27 09:06:28 -07004847 if (netif_running(adapter->netdev))
4848 e1000_reinit_locked(adapter);
4849 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004850 e1000_reset(adapter);
4851 break;
4852 case M88E1000_PHY_SPEC_CTRL:
4853 case M88E1000_EXT_PHY_SPEC_CTRL:
Joe Perches1dc32912008-07-11 15:17:08 -07004854 if (e1000_phy_reset(hw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004855 return -EIO;
4856 break;
4857 }
4858 } else {
4859 switch (data->reg_num) {
4860 case PHY_CTRL:
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004861 if (mii_reg & MII_CR_POWER_DOWN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 break;
Auke Kok2db10a02006-06-27 09:06:28 -07004863 if (netif_running(adapter->netdev))
4864 e1000_reinit_locked(adapter);
4865 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 e1000_reset(adapter);
4867 break;
4868 }
4869 }
4870 break;
4871 default:
4872 return -EOPNOTSUPP;
4873 }
4874 return E1000_SUCCESS;
4875}
4876
Joe Perches64798842008-07-11 15:17:02 -07004877void e1000_pci_set_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878{
4879 struct e1000_adapter *adapter = hw->back;
Malli Chilakala26483452005-04-28 19:44:46 -07004880 int ret_val = pci_set_mwi(adapter->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004882 if (ret_val)
Emil Tantilovfeb8f472010-07-26 23:37:21 -07004883 e_err(probe, "Error in setting MWI\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884}
4885
Joe Perches64798842008-07-11 15:17:02 -07004886void e1000_pci_clear_mwi(struct e1000_hw *hw)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004887{
4888 struct e1000_adapter *adapter = hw->back;
4889
4890 pci_clear_mwi(adapter->pdev);
4891}
4892
Joe Perches64798842008-07-11 15:17:02 -07004893int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
Peter Oruba007755e2007-09-28 22:42:06 -07004894{
4895 struct e1000_adapter *adapter = hw->back;
4896 return pcix_get_mmrbc(adapter->pdev);
4897}
4898
Joe Perches64798842008-07-11 15:17:02 -07004899void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
Peter Oruba007755e2007-09-28 22:42:06 -07004900{
4901 struct e1000_adapter *adapter = hw->back;
4902 pcix_set_mmrbc(adapter->pdev, mmrbc);
4903}
4904
Joe Perches64798842008-07-11 15:17:02 -07004905void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906{
4907 outl(value, port);
4908}
4909
Jiri Pirko5622e402011-07-21 03:26:31 +00004910static bool e1000_vlan_used(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911{
Jiri Pirko5622e402011-07-21 03:26:31 +00004912 u16 vid;
4913
4914 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4915 return true;
4916 return false;
4917}
4918
Jiri Pirko52f55092012-03-20 18:10:01 +00004919static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4920 netdev_features_t features)
4921{
4922 struct e1000_hw *hw = &adapter->hw;
4923 u32 ctrl;
4924
4925 ctrl = er32(CTRL);
Patrick McHardyf6469682013-04-19 02:04:27 +00004926 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
Jiri Pirko52f55092012-03-20 18:10:01 +00004927 /* enable VLAN tag insert/strip */
4928 ctrl |= E1000_CTRL_VME;
4929 } else {
4930 /* disable VLAN tag insert/strip */
4931 ctrl &= ~E1000_CTRL_VME;
4932 }
4933 ew32(CTRL, ctrl);
4934}
Jiri Pirko5622e402011-07-21 03:26:31 +00004935static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4936 bool filter_on)
4937{
Joe Perches1dc32912008-07-11 15:17:08 -07004938 struct e1000_hw *hw = &adapter->hw;
Jiri Pirko5622e402011-07-21 03:26:31 +00004939 u32 rctl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004941 if (!test_bit(__E1000_DOWN, &adapter->flags))
4942 e1000_irq_disable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943
Jiri Pirko52f55092012-03-20 18:10:01 +00004944 __e1000_vlan_mode(adapter, adapter->netdev->features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004945 if (filter_on) {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004946 /* enable VLAN receive filtering */
4947 rctl = er32(RCTL);
4948 rctl &= ~E1000_RCTL_CFIEN;
Jiri Pirko5622e402011-07-21 03:26:31 +00004949 if (!(adapter->netdev->flags & IFF_PROMISC))
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004950 rctl |= E1000_RCTL_VFE;
4951 ew32(RCTL, rctl);
4952 e1000_update_mng_vlan(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 } else {
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00004954 /* disable VLAN receive filtering */
4955 rctl = er32(RCTL);
4956 rctl &= ~E1000_RCTL_VFE;
4957 ew32(RCTL, rctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958 }
4959
Jesse Brandeburg9150b762008-03-21 11:06:58 -07004960 if (!test_bit(__E1000_DOWN, &adapter->flags))
4961 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962}
4963
Michał Mirosławc8f44af2011-11-15 15:29:55 +00004964static void e1000_vlan_mode(struct net_device *netdev,
Jiri Pirko52f55092012-03-20 18:10:01 +00004965 netdev_features_t features)
Jiri Pirko5622e402011-07-21 03:26:31 +00004966{
4967 struct e1000_adapter *adapter = netdev_priv(netdev);
Jiri Pirko5622e402011-07-21 03:26:31 +00004968
4969 if (!test_bit(__E1000_DOWN, &adapter->flags))
4970 e1000_irq_disable(adapter);
4971
Jiri Pirko52f55092012-03-20 18:10:01 +00004972 __e1000_vlan_mode(adapter, features);
Jiri Pirko5622e402011-07-21 03:26:31 +00004973
4974 if (!test_bit(__E1000_DOWN, &adapter->flags))
4975 e1000_irq_enable(adapter);
4976}
4977
Patrick McHardy80d5c362013-04-19 02:04:28 +00004978static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4979 __be16 proto, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980{
Malli Chilakala60490fe2005-06-17 17:41:45 -07004981 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07004982 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07004983 u32 vfta, index;
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004984
Joe Perches1dc32912008-07-11 15:17:08 -07004985 if ((hw->mng_cookie.status &
Jesse Brandeburg96838a42006-01-18 13:01:39 -08004986 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4987 (vid == adapter->mng_vlan_id))
Jiri Pirko8e586132011-12-08 19:52:37 -05004988 return 0;
Jiri Pirko5622e402011-07-21 03:26:31 +00004989
4990 if (!e1000_vlan_used(adapter))
4991 e1000_vlan_filter_on_off(adapter, true);
4992
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 /* add VID to filter table */
4994 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07004995 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996 vfta |= (1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07004997 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00004998
4999 set_bit(vid, adapter->active_vlans);
Jiri Pirko8e586132011-12-08 19:52:37 -05005000
5001 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005002}
5003
Patrick McHardy80d5c362013-04-19 02:04:28 +00005004static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
5005 __be16 proto, u16 vid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005007 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005008 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005009 u32 vfta, index;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010
Jesse Brandeburg9150b762008-03-21 11:06:58 -07005011 if (!test_bit(__E1000_DOWN, &adapter->flags))
5012 e1000_irq_disable(adapter);
Jesse Brandeburg9150b762008-03-21 11:06:58 -07005013 if (!test_bit(__E1000_DOWN, &adapter->flags))
5014 e1000_irq_enable(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015
5016 /* remove VID from filter table */
5017 index = (vid >> 5) & 0x7F;
Joe Perches1dc32912008-07-11 15:17:08 -07005018 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 vfta &= ~(1 << (vid & 0x1F));
Joe Perches1dc32912008-07-11 15:17:08 -07005020 e1000_write_vfta(hw, index, vfta);
Jiri Pirko5622e402011-07-21 03:26:31 +00005021
5022 clear_bit(vid, adapter->active_vlans);
5023
5024 if (!e1000_vlan_used(adapter))
5025 e1000_vlan_filter_on_off(adapter, false);
Jiri Pirko8e586132011-12-08 19:52:37 -05005026
5027 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005028}
5029
Joe Perches64798842008-07-11 15:17:02 -07005030static void e1000_restore_vlan(struct e1000_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005031{
Jiri Pirko5622e402011-07-21 03:26:31 +00005032 u16 vid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005033
Jiri Pirko5622e402011-07-21 03:26:31 +00005034 if (!e1000_vlan_used(adapter))
5035 return;
5036
5037 e1000_vlan_filter_on_off(adapter, true);
5038 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
Patrick McHardy80d5c362013-04-19 02:04:28 +00005039 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040}
5041
David Decotigny14ad2512011-04-27 18:32:43 +00005042int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043{
Joe Perches1dc32912008-07-11 15:17:08 -07005044 struct e1000_hw *hw = &adapter->hw;
5045
5046 hw->autoneg = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005047
David Decotigny14ad2512011-04-27 18:32:43 +00005048 /* Make sure dplx is at most 1 bit and lsb of speed is not set
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00005049 * for the switch() below to work
5050 */
David Decotigny14ad2512011-04-27 18:32:43 +00005051 if ((spd & 1) || (dplx & ~1))
5052 goto err_inval;
5053
Malli Chilakala69213682005-06-17 17:44:20 -07005054 /* Fiber NICs only allow 1000 gbps Full duplex */
Joe Perches1dc32912008-07-11 15:17:08 -07005055 if ((hw->media_type == e1000_media_type_fiber) &&
David Decotigny14ad2512011-04-27 18:32:43 +00005056 spd != SPEED_1000 &&
5057 dplx != DUPLEX_FULL)
5058 goto err_inval;
Malli Chilakala69213682005-06-17 17:44:20 -07005059
David Decotigny14ad2512011-04-27 18:32:43 +00005060 switch (spd + dplx) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005061 case SPEED_10 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07005062 hw->forced_speed_duplex = e1000_10_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063 break;
5064 case SPEED_10 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07005065 hw->forced_speed_duplex = e1000_10_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 break;
5067 case SPEED_100 + DUPLEX_HALF:
Joe Perches1dc32912008-07-11 15:17:08 -07005068 hw->forced_speed_duplex = e1000_100_half;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 break;
5070 case SPEED_100 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07005071 hw->forced_speed_duplex = e1000_100_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 break;
5073 case SPEED_1000 + DUPLEX_FULL:
Joe Perches1dc32912008-07-11 15:17:08 -07005074 hw->autoneg = 1;
5075 hw->autoneg_advertised = ADVERTISE_1000_FULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076 break;
5077 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5078 default:
David Decotigny14ad2512011-04-27 18:32:43 +00005079 goto err_inval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 }
Jesse Brandeburgc819bbd52012-07-26 02:31:09 +00005081
5082 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5083 hw->mdix = AUTO_ALL_MODES;
5084
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085 return 0;
David Decotigny14ad2512011-04-27 18:32:43 +00005086
5087err_inval:
5088 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5089 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090}
5091
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005092static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093{
5094 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005095 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005096 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005097 u32 ctrl, ctrl_ext, rctl, status;
5098 u32 wufc = adapter->wol;
Auke Kok6fdfef12006-06-27 09:06:36 -07005099#ifdef CONFIG_PM
Jeff Kirsher240b1712006-01-12 16:51:28 -08005100 int retval = 0;
Auke Kok6fdfef12006-06-27 09:06:36 -07005101#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07005102
5103 netif_device_detach(netdev);
5104
Auke Kok2db10a02006-06-27 09:06:28 -07005105 if (netif_running(netdev)) {
yzhu16a7d64e2013-11-23 07:07:40 +00005106 int count = E1000_CHECK_RESET_COUNT;
5107
5108 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5109 usleep_range(10000, 20000);
5110
Auke Kok2db10a02006-06-27 09:06:28 -07005111 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005112 e1000_down(adapter);
Auke Kok2db10a02006-06-27 09:06:28 -07005113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005115#ifdef CONFIG_PM
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005116 retval = pci_save_state(pdev);
Jesse Brandeburg3a3847e2012-01-04 20:23:33 +00005117 if (retval)
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005118 return retval;
5119#endif
5120
Joe Perches1dc32912008-07-11 15:17:08 -07005121 status = er32(STATUS);
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005122 if (status & E1000_STATUS_LU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005123 wufc &= ~E1000_WUFC_LNKC;
5124
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005125 if (wufc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126 e1000_setup_rctl(adapter);
Patrick McHardydb0ce502007-11-13 20:54:59 -08005127 e1000_set_rx_mode(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005128
Dean Nelsonb8681792012-01-19 17:47:24 +00005129 rctl = er32(RCTL);
5130
Linus Torvalds1da177e2005-04-16 15:20:36 -07005131 /* turn on all-multi mode if wake on multicast is enabled */
Dean Nelsonb8681792012-01-19 17:47:24 +00005132 if (wufc & E1000_WUFC_MC)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005133 rctl |= E1000_RCTL_MPE;
Dean Nelsonb8681792012-01-19 17:47:24 +00005134
5135 /* enable receives in the hardware */
5136 ew32(RCTL, rctl | E1000_RCTL_EN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137
Joe Perches1dc32912008-07-11 15:17:08 -07005138 if (hw->mac_type >= e1000_82540) {
5139 ctrl = er32(CTRL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005140 /* advertise wake from D3Cold */
5141 #define E1000_CTRL_ADVD3WUC 0x00100000
5142 /* phy power management enable */
5143 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5144 ctrl |= E1000_CTRL_ADVD3WUC |
5145 E1000_CTRL_EN_PHY_PWR_MGMT;
Joe Perches1dc32912008-07-11 15:17:08 -07005146 ew32(CTRL, ctrl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147 }
5148
Joe Perches1dc32912008-07-11 15:17:08 -07005149 if (hw->media_type == e1000_media_type_fiber ||
Jesse Brandeburg1532ece2009-09-25 12:16:14 +00005150 hw->media_type == e1000_media_type_internal_serdes) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151 /* keep the laser running in D3 */
Joe Perches1dc32912008-07-11 15:17:08 -07005152 ctrl_ext = er32(CTRL_EXT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
Joe Perches1dc32912008-07-11 15:17:08 -07005154 ew32(CTRL_EXT, ctrl_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005155 }
5156
Joe Perches1dc32912008-07-11 15:17:08 -07005157 ew32(WUC, E1000_WUC_PME_EN);
5158 ew32(WUFC, wufc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005159 } else {
Joe Perches1dc32912008-07-11 15:17:08 -07005160 ew32(WUC, 0);
5161 ew32(WUFC, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162 }
5163
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005164 e1000_release_manageability(adapter);
5165
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005166 *enable_wake = !!wufc;
5167
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005168 /* make sure adapter isn't asleep if manageability is enabled */
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005169 if (adapter->en_mng_pt)
5170 *enable_wake = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171
Auke Kokedd106f2006-11-06 08:57:12 -08005172 if (netif_running(netdev))
5173 e1000_free_irq(adapter);
5174
Tushar Dave125ca932017-12-06 02:26:29 +05305175 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5176 pci_disable_device(pdev);
Jeff Kirsher240b1712006-01-12 16:51:28 -08005177
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178 return 0;
5179}
5180
Jesse Brandeburg2f826652006-01-18 13:01:34 -08005181#ifdef CONFIG_PM
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005182static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5183{
5184 int retval;
5185 bool wake;
5186
5187 retval = __e1000_shutdown(pdev, &wake);
5188 if (retval)
5189 return retval;
5190
5191 if (wake) {
5192 pci_prepare_to_sleep(pdev);
5193 } else {
5194 pci_wake_from_d3(pdev, false);
5195 pci_set_power_state(pdev, PCI_D3hot);
5196 }
5197
5198 return 0;
5199}
5200
Joe Perches64798842008-07-11 15:17:02 -07005201static int e1000_resume(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005202{
5203 struct net_device *netdev = pci_get_drvdata(pdev);
Malli Chilakala60490fe2005-06-17 17:41:45 -07005204 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005205 struct e1000_hw *hw = &adapter->hw;
Joe Perches406874a2008-04-03 10:06:32 -07005206 u32 err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207
Auke Kokd0e027d2006-04-14 19:04:40 -07005208 pci_set_power_state(pdev, PCI_D0);
Kok, Auke1d33e9c2007-02-16 14:39:28 -08005209 pci_restore_state(pdev);
Nick Nunleydbb5aae2010-02-03 14:49:48 +00005210 pci_save_state(pdev);
Taku Izumi81250292008-07-11 15:17:44 -07005211
5212 if (adapter->need_ioport)
5213 err = pci_enable_device(pdev);
5214 else
5215 err = pci_enable_device_mem(pdev);
Joe Perchesc7be73b2008-07-11 15:17:28 -07005216 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005217 pr_err("Cannot enable PCI device from suspend\n");
Auke Kok3d1dd8c2006-08-28 14:56:27 -07005218 return err;
5219 }
Tushar Dave125ca932017-12-06 02:26:29 +05305220
5221 /* flush memory to make sure state is correct */
5222 smp_mb__before_atomic();
5223 clear_bit(__E1000_DISABLED, &adapter->flags);
Malli Chilakalaa4cb8472005-04-28 19:41:28 -07005224 pci_set_master(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005225
Auke Kokd0e027d2006-04-14 19:04:40 -07005226 pci_enable_wake(pdev, PCI_D3hot, 0);
5227 pci_enable_wake(pdev, PCI_D3cold, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228
Joe Perchesc7be73b2008-07-11 15:17:28 -07005229 if (netif_running(netdev)) {
5230 err = e1000_request_irq(adapter);
5231 if (err)
5232 return err;
5233 }
Auke Kokedd106f2006-11-06 08:57:12 -08005234
5235 e1000_power_up_phy(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005237 ew32(WUS, ~0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005239 e1000_init_manageability(adapter);
5240
Jesse Brandeburg96838a42006-01-18 13:01:39 -08005241 if (netif_running(netdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242 e1000_up(adapter);
5243
5244 netif_device_attach(netdev);
5245
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 return 0;
5247}
5248#endif
Auke Kokc653e632006-05-23 13:35:57 -07005249
5250static void e1000_shutdown(struct pci_dev *pdev)
5251{
Rafael J. Wysockib43fcd72009-04-15 17:43:24 +00005252 bool wake;
5253
5254 __e1000_shutdown(pdev, &wake);
5255
5256 if (system_state == SYSTEM_POWER_OFF) {
5257 pci_wake_from_d3(pdev, wake);
5258 pci_set_power_state(pdev, PCI_D3hot);
5259 }
Auke Kokc653e632006-05-23 13:35:57 -07005260}
5261
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262#ifdef CONFIG_NET_POLL_CONTROLLER
Jeff Kirsher6cfbd972013-02-09 12:49:21 +00005263/* Polling 'interrupt' - used by things like netconsole to send skbs
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 * without having to re-enable interrupts. It's not called while
5265 * the interrupt routine is executing.
5266 */
Joe Perches64798842008-07-11 15:17:02 -07005267static void e1000_netpoll(struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268{
Malli Chilakala60490fe2005-06-17 17:41:45 -07005269 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kokd3d9e482006-07-14 16:14:23 -07005270
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 disable_irq(adapter->pdev->irq);
David Howells7d12e782006-10-05 14:55:46 +01005272 e1000_intr(adapter->pdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 enable_irq(adapter->pdev->irq);
5274}
5275#endif
5276
Auke Kok90267292006-06-08 09:30:24 -07005277/**
5278 * e1000_io_error_detected - called when PCI error is detected
5279 * @pdev: Pointer to PCI device
Jesse Brandeburg120a5d02009-09-25 15:19:46 -07005280 * @state: The current pci connection state
Auke Kok90267292006-06-08 09:30:24 -07005281 *
5282 * This function is called after a PCI bus error affecting
5283 * this device has been detected.
5284 */
Joe Perches64798842008-07-11 15:17:02 -07005285static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5286 pci_channel_state_t state)
Auke Kok90267292006-06-08 09:30:24 -07005287{
5288 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005289 struct e1000_adapter *adapter = netdev_priv(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005290
5291 netif_device_detach(netdev);
5292
Andre Detscheab63302009-06-30 12:46:13 +00005293 if (state == pci_channel_io_perm_failure)
5294 return PCI_ERS_RESULT_DISCONNECT;
5295
Auke Kok90267292006-06-08 09:30:24 -07005296 if (netif_running(netdev))
5297 e1000_down(adapter);
Tushar Dave125ca932017-12-06 02:26:29 +05305298
5299 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5300 pci_disable_device(pdev);
Auke Kok90267292006-06-08 09:30:24 -07005301
5302 /* Request a slot slot reset. */
5303 return PCI_ERS_RESULT_NEED_RESET;
5304}
5305
5306/**
5307 * e1000_io_slot_reset - called after the pci bus has been reset.
5308 * @pdev: Pointer to PCI device
5309 *
5310 * Restart the card from scratch, as if from a cold-boot. Implementation
5311 * resembles the first-half of the e1000_resume routine.
5312 */
5313static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5314{
5315 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005316 struct e1000_adapter *adapter = netdev_priv(netdev);
Joe Perches1dc32912008-07-11 15:17:08 -07005317 struct e1000_hw *hw = &adapter->hw;
Taku Izumi81250292008-07-11 15:17:44 -07005318 int err;
Auke Kok90267292006-06-08 09:30:24 -07005319
Taku Izumi81250292008-07-11 15:17:44 -07005320 if (adapter->need_ioport)
5321 err = pci_enable_device(pdev);
5322 else
5323 err = pci_enable_device_mem(pdev);
5324 if (err) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005325 pr_err("Cannot re-enable PCI device after reset.\n");
Auke Kok90267292006-06-08 09:30:24 -07005326 return PCI_ERS_RESULT_DISCONNECT;
5327 }
Tushar Dave125ca932017-12-06 02:26:29 +05305328
5329 /* flush memory to make sure state is correct */
5330 smp_mb__before_atomic();
5331 clear_bit(__E1000_DISABLED, &adapter->flags);
Auke Kok90267292006-06-08 09:30:24 -07005332 pci_set_master(pdev);
5333
Linas Vepstasdbf38c92006-09-27 12:54:11 -07005334 pci_enable_wake(pdev, PCI_D3hot, 0);
5335 pci_enable_wake(pdev, PCI_D3cold, 0);
Auke Kok90267292006-06-08 09:30:24 -07005336
Auke Kok90267292006-06-08 09:30:24 -07005337 e1000_reset(adapter);
Joe Perches1dc32912008-07-11 15:17:08 -07005338 ew32(WUS, ~0);
Auke Kok90267292006-06-08 09:30:24 -07005339
5340 return PCI_ERS_RESULT_RECOVERED;
5341}
5342
5343/**
5344 * e1000_io_resume - called when traffic can start flowing again.
5345 * @pdev: Pointer to PCI device
5346 *
5347 * This callback is called when the error recovery driver tells us that
5348 * its OK to resume normal operation. Implementation resembles the
5349 * second-half of the e1000_resume routine.
5350 */
5351static void e1000_io_resume(struct pci_dev *pdev)
5352{
5353 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen4cf16532008-11-12 23:38:14 -08005354 struct e1000_adapter *adapter = netdev_priv(netdev);
Jeff Garzik0fccd0e2006-12-15 10:56:10 -05005355
5356 e1000_init_manageability(adapter);
Auke Kok90267292006-06-08 09:30:24 -07005357
5358 if (netif_running(netdev)) {
5359 if (e1000_up(adapter)) {
Emil Tantilov675ad472010-04-27 14:02:58 +00005360 pr_info("can't bring device back up after reset\n");
Auke Kok90267292006-06-08 09:30:24 -07005361 return;
5362 }
5363 }
5364
5365 netif_device_attach(netdev);
Auke Kok90267292006-06-08 09:30:24 -07005366}
5367
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368/* e1000_main.c */