blob: c91d50e54427ac429b0acd7827df0b5c4d67ff6c [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -08004 Copyright(c) 1999 - 2009 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
Lucy Liu60127862009-07-22 14:07:33 +000037#include <linux/pkt_sched.h>
Auke Kok9a799d72007-09-15 14:07:45 -070038#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
Yi Zoueacd73f2009-05-13 13:11:06 +000043#include <scsi/fc/fc_fcoe.h>
Auke Kok9a799d72007-09-15 14:07:45 -070044
45#include "ixgbe.h"
46#include "ixgbe_common.h"
47
48char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070049static const char ixgbe_driver_string[] =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070050 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070051
Peter P Waskiewicz Jre0f4daf2009-09-30 12:07:57 +000052#define DRV_VERSION "2.0.44-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070053const char ixgbe_driver_version[] = DRV_VERSION;
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -080054static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070055
56static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070057 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000058 [board_82599] = &ixgbe_82599_info,
Auke Kok9a799d72007-09-15 14:07:45 -070059};
60
61/* ixgbe_pci_tbl - PCI Device ID Table
62 *
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
65 *
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
68 */
69static struct pci_device_id ixgbe_pci_tbl[] = {
Don Skidmore1e336d02009-01-26 20:57:51 -080070 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
71 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070072 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070073 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070074 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070075 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070076 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
77 board_82598 },
Peter P Waskiewicz Jr3845bec2009-07-16 15:50:52 +000078 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
79 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070081 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
83 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080084 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
85 board_82598 },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
87 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
89 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
91 board_82598 },
Don Skidmore2f21bdd2009-02-01 01:18:23 -080092 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
93 board_82598 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000094 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
95 board_82599 },
Peter P Waskiewicz Jr1fcf03e2009-05-17 20:58:04 +000096 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
97 board_82599 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000098 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 },
Don Skidmoredbfec662009-10-02 08:58:25 +0000100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
Peter P Waskiewicz Jr8911184f2009-09-14 07:47:49 +0000102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
103 board_82599 },
Don Skidmore312eb932009-10-02 08:58:04 +0000104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
Auke Kok9a799d72007-09-15 14:07:45 -0700106
107 /* required last entry */
108 {0, }
109};
110MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
111
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400112#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800113static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700114 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800115static struct notifier_block dca_notifier = {
116 .notifier_call = ixgbe_notify_dca,
117 .next = NULL,
118 .priority = 0
119};
120#endif
121
Auke Kok9a799d72007-09-15 14:07:45 -0700122MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
123MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
124MODULE_LICENSE("GPL");
125MODULE_VERSION(DRV_VERSION);
126
127#define DEFAULT_DEBUG_LEVEL_SHIFT 3
128
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800129static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
130{
131 u32 ctrl_ext;
132
133 /* Let firmware take over control of h/w */
134 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700136 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800137}
138
139static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
140{
141 u32 ctrl_ext;
142
143 /* Let firmware know the driver has taken over */
144 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
145 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700146 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800147}
Auke Kok9a799d72007-09-15 14:07:45 -0700148
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000149/*
150 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
151 * @adapter: pointer to adapter struct
152 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
153 * @queue: queue to map the corresponding interrupt to
154 * @msix_vector: the vector to map to the corresponding queue
155 *
156 */
157static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
158 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700159{
160 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000161 struct ixgbe_hw *hw = &adapter->hw;
162 switch (hw->mac.type) {
163 case ixgbe_mac_82598EB:
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 if (direction == -1)
166 direction = 0;
167 index = (((direction * 64) + queue) >> 2) & 0x1F;
168 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
169 ivar &= ~(0xFF << (8 * (queue & 0x3)));
170 ivar |= (msix_vector << (8 * (queue & 0x3)));
171 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
172 break;
173 case ixgbe_mac_82599EB:
174 if (direction == -1) {
175 /* other causes */
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 index = ((queue & 1) * 8);
178 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
179 ivar &= ~(0xFF << index);
180 ivar |= (msix_vector << index);
181 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
182 break;
183 } else {
184 /* tx or rx causes */
185 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
186 index = ((16 * (queue & 1)) + (8 * direction));
187 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
188 ivar &= ~(0xFF << index);
189 ivar |= (msix_vector << index);
190 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
191 break;
192 }
193 default:
194 break;
195 }
Auke Kok9a799d72007-09-15 14:07:45 -0700196}
197
Alexander Duyckfe49f042009-06-04 16:00:09 +0000198static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
199 u64 qmask)
200{
201 u32 mask;
202
203 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
204 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
205 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
206 } else {
207 mask = (qmask & 0xFFFFFFFF);
208 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
209 mask = (qmask >> 32);
210 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
211 }
212}
213
Auke Kok9a799d72007-09-15 14:07:45 -0700214static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700215 struct ixgbe_tx_buffer
216 *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700217{
Alexander Duyck44df32c2009-03-31 21:34:23 +0000218 tx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700219 if (tx_buffer_info->skb) {
Alexander Duyck44df32c2009-03-31 21:34:23 +0000220 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
221 DMA_TO_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700222 dev_kfree_skb_any(tx_buffer_info->skb);
223 tx_buffer_info->skb = NULL;
224 }
Alexander Duyck44df32c2009-03-31 21:34:23 +0000225 tx_buffer_info->time_stamp = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700226 /* tx_buffer_info must be completely set up in the transmit path */
227}
228
229static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700230 struct ixgbe_ring *tx_ring,
231 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700232{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700233 struct ixgbe_hw *hw = &adapter->hw;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700234
Auke Kok9a799d72007-09-15 14:07:45 -0700235 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700236 * check with the clearing of time_stamp and movement of eop */
Auke Kok9a799d72007-09-15 14:07:45 -0700237 adapter->detect_tx_hung = false;
Alexander Duyck44df32c2009-03-31 21:34:23 +0000238 if (tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700239 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
240 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
241 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700242 union ixgbe_adv_tx_desc *tx_desc;
243 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
Auke Kok9a799d72007-09-15 14:07:45 -0700244 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700245 " Tx Queue <%d>\n"
246 " TDH, TDT <%x>, <%x>\n"
Auke Kok9a799d72007-09-15 14:07:45 -0700247 " next_to_use <%x>\n"
248 " next_to_clean <%x>\n"
249 "tx_buffer_info[next_to_clean]\n"
250 " time_stamp <%lx>\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700251 " jiffies <%lx>\n",
252 tx_ring->queue_index,
Alexander Duyck44df32c2009-03-31 21:34:23 +0000253 IXGBE_READ_REG(hw, tx_ring->head),
254 IXGBE_READ_REG(hw, tx_ring->tail),
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700255 tx_ring->next_to_use, eop,
256 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700257 return true;
258 }
259
260 return false;
261}
262
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700263#define IXGBE_MAX_TXD_PWR 14
264#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800265
266/* Tx Descriptors needed, worst case */
267#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
268 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
269#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700270 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800271
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700272static void ixgbe_tx_timeout(struct net_device *netdev);
273
Auke Kok9a799d72007-09-15 14:07:45 -0700274/**
275 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfe49f042009-06-04 16:00:09 +0000276 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700277 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700278 **/
Alexander Duyckfe49f042009-06-04 16:00:09 +0000279static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700280 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700281{
Alexander Duyckfe49f042009-06-04 16:00:09 +0000282 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700283 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800284 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
285 struct ixgbe_tx_buffer *tx_buffer_info;
286 unsigned int i, eop, count = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700287 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700288
289 i = tx_ring->next_to_clean;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800290 eop = tx_ring->tx_buffer_info[i].next_to_watch;
291 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
292
293 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000294 (count < tx_ring->work_limit)) {
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800295 bool cleaned = false;
296 for ( ; !cleaned; count++) {
297 struct sk_buff *skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700298 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
299 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800300 cleaned = (i == eop);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700301 skb = tx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700302
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800303 if (cleaned && skb) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800304 unsigned int segs, bytecount;
Yi Zou3d8fd382009-06-08 14:38:44 +0000305 unsigned int hlen = skb_headlen(skb);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700306
307 /* gso_segs is currently only valid for tcp */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800308 segs = skb_shinfo(skb)->gso_segs ?: 1;
Yi Zou3d8fd382009-06-08 14:38:44 +0000309#ifdef IXGBE_FCOE
310 /* adjust for FCoE Sequence Offload */
311 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
312 && (skb->protocol == htons(ETH_P_FCOE)) &&
313 skb_is_gso(skb)) {
314 hlen = skb_transport_offset(skb) +
315 sizeof(struct fc_frame_header) +
316 sizeof(struct fcoe_crc_eof);
317 segs = DIV_ROUND_UP(skb->len - hlen,
318 skb_shinfo(skb)->gso_size);
319 }
320#endif /* IXGBE_FCOE */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800321 /* multiply data chunks by size of headers */
Yi Zou3d8fd382009-06-08 14:38:44 +0000322 bytecount = ((segs - 1) * hlen) + skb->len;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700323 total_packets += segs;
324 total_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800325 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700326
Auke Kok9a799d72007-09-15 14:07:45 -0700327 ixgbe_unmap_and_free_tx_resource(adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700328 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700329
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800330 tx_desc->wb.status = 0;
331
Auke Kok9a799d72007-09-15 14:07:45 -0700332 i++;
333 if (i == tx_ring->count)
334 i = 0;
335 }
336
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800337 eop = tx_ring->tx_buffer_info[i].next_to_watch;
338 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
339 }
340
Auke Kok9a799d72007-09-15 14:07:45 -0700341 tx_ring->next_to_clean = i;
342
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800343#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700344 if (unlikely(count && netif_carrier_ok(netdev) &&
345 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800346 /* Make sure that anybody stopping the queue after this
347 * sees the new next_to_clean.
348 */
349 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800350 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
351 !test_bit(__IXGBE_DOWN, &adapter->state)) {
352 netif_wake_subqueue(netdev, tx_ring->queue_index);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700353 ++adapter->restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800354 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800355 }
Auke Kok9a799d72007-09-15 14:07:45 -0700356
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700357 if (adapter->detect_tx_hung) {
358 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
359 /* schedule immediate reset if we believe we hung */
360 DPRINTK(PROBE, INFO,
361 "tx hang %d detected, resetting adapter\n",
362 adapter->tx_timeout_count + 1);
363 ixgbe_tx_timeout(adapter->netdev);
364 }
365 }
Auke Kok9a799d72007-09-15 14:07:45 -0700366
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700367 /* re-arm the interrupt */
Alexander Duyckfe49f042009-06-04 16:00:09 +0000368 if (count >= tx_ring->work_limit)
369 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -0700370
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700371 tx_ring->total_bytes += total_bytes;
372 tx_ring->total_packets += total_packets;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700373 tx_ring->stats.packets += total_packets;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800374 tx_ring->stats.bytes += total_bytes;
Ajit Khaparde2d86f132009-10-07 02:43:49 +0000375 netdev->stats.tx_bytes += total_bytes;
376 netdev->stats.tx_packets += total_packets;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000377 return (count < tx_ring->work_limit);
Auke Kok9a799d72007-09-15 14:07:45 -0700378}
379
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400380#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800381static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700382 struct ixgbe_ring *rx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800383{
384 u32 rxctrl;
385 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700386 int q = rx_ring - adapter->rx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800387
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700388 if (rx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800389 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000390 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
391 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
392 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
393 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
394 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
395 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
396 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
397 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800398 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
399 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
Don Skidmore15005a32009-01-19 16:54:13 -0800400 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
401 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000402 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700404 rx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800405 }
406 put_cpu();
407}
408
409static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700410 struct ixgbe_ring *tx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800411{
412 u32 txctrl;
413 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700414 int q = tx_ring - adapter->tx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800415
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700416 if (tx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800417 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000418 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
419 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
420 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
421 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
422 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
423 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
424 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
425 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800426 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700428 tx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800429 }
430 put_cpu();
431}
432
433static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
434{
435 int i;
436
437 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
438 return;
439
Alexander Duycke35ec122009-05-21 13:07:12 +0000440 /* always use CB2 mode, difference is masked in the CB driver */
441 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
442
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800443 for (i = 0; i < adapter->num_tx_queues; i++) {
444 adapter->tx_ring[i].cpu = -1;
445 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
446 }
447 for (i = 0; i < adapter->num_rx_queues; i++) {
448 adapter->rx_ring[i].cpu = -1;
449 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
450 }
451}
452
453static int __ixgbe_notify_dca(struct device *dev, void *data)
454{
455 struct net_device *netdev = dev_get_drvdata(dev);
456 struct ixgbe_adapter *adapter = netdev_priv(netdev);
457 unsigned long event = *(unsigned long *)data;
458
459 switch (event) {
460 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700461 /* if we're already enabled, don't do it again */
462 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
463 break;
Denis V. Lunev652f0932008-03-27 14:39:17 +0300464 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700465 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800466 ixgbe_setup_dca(adapter);
467 break;
468 }
469 /* Fall Through since DCA is disabled. */
470 case DCA_PROVIDER_REMOVE:
471 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
472 dca_remove_requester(dev);
473 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
474 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
475 }
476 break;
477 }
478
Denis V. Lunev652f0932008-03-27 14:39:17 +0300479 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800480}
481
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400482#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700483/**
484 * ixgbe_receive_skb - Send a completed packet up the stack
485 * @adapter: board private structure
486 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700487 * @status: hardware indication of status of receive
488 * @rx_ring: rx descriptor ring (for a specific queue) to setup
489 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700490 **/
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800491static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700492 struct sk_buff *skb, u8 status,
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000493 struct ixgbe_ring *ring,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700494 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700495{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800496 struct ixgbe_adapter *adapter = q_vector->adapter;
497 struct napi_struct *napi = &q_vector->napi;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700498 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
499 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700500
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000501 skb_record_rx_queue(skb, ring->queue_index);
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000502 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
Lucy Liu8a62bab2009-08-13 14:09:38 +0000503 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800504 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700505 else
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800506 napi_gro_receive(napi, skb);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700507 } else {
Lucy Liu8a62bab2009-08-13 14:09:38 +0000508 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000509 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
510 else
511 netif_rx(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700512 }
513}
514
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800515/**
516 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
517 * @adapter: address of board private structure
518 * @status_err: hardware indication of status of receive
519 * @skb: skb currently being received and modified
520 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700521static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Don Skidmore8bae1b22009-07-23 18:00:39 +0000522 union ixgbe_adv_rx_desc *rx_desc,
523 struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700524{
Don Skidmore8bae1b22009-07-23 18:00:39 +0000525 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
526
Auke Kok9a799d72007-09-15 14:07:45 -0700527 skb->ip_summed = CHECKSUM_NONE;
528
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700529 /* Rx csum disabled */
530 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700531 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800532
533 /* if IP and error */
534 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
535 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700536 adapter->hw_csum_rx_error++;
537 return;
538 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800539
540 if (!(status_err & IXGBE_RXD_STAT_L4CS))
541 return;
542
543 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
Don Skidmore8bae1b22009-07-23 18:00:39 +0000544 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
545
546 /*
547 * 82599 errata, UDP frames with a 0 checksum can be marked as
548 * checksum errors.
549 */
550 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
551 (adapter->hw.mac.type == ixgbe_mac_82599EB))
552 return;
553
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800554 adapter->hw_csum_rx_error++;
555 return;
556 }
557
Auke Kok9a799d72007-09-15 14:07:45 -0700558 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800559 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700560 adapter->hw_csum_rx_good++;
561}
562
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000563static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
564 struct ixgbe_ring *rx_ring, u32 val)
565{
566 /*
567 * Force memory writes to complete before letting h/w
568 * know there are new descriptors to fetch. (Only
569 * applicable for weak-ordered memory model archs,
570 * such as IA-64).
571 */
572 wmb();
573 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
574}
575
Auke Kok9a799d72007-09-15 14:07:45 -0700576/**
577 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
578 * @adapter: address of board private structure
579 **/
580static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700581 struct ixgbe_ring *rx_ring,
582 int cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -0700583{
Auke Kok9a799d72007-09-15 14:07:45 -0700584 struct pci_dev *pdev = adapter->pdev;
585 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700586 struct ixgbe_rx_buffer *bi;
Auke Kok9a799d72007-09-15 14:07:45 -0700587 unsigned int i;
Auke Kok9a799d72007-09-15 14:07:45 -0700588
589 i = rx_ring->next_to_use;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700590 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700591
592 while (cleaned_count--) {
593 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
594
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700595 if (!bi->page_dma &&
Yi Zou6e455b892009-08-06 13:05:44 +0000596 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700597 if (!bi->page) {
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700598 bi->page = alloc_page(GFP_ATOMIC);
599 if (!bi->page) {
600 adapter->alloc_rx_page_failed++;
601 goto no_buffers;
602 }
603 bi->page_offset = 0;
604 } else {
605 /* use a half page if we're re-using */
606 bi->page_offset ^= (PAGE_SIZE / 2);
Auke Kok9a799d72007-09-15 14:07:45 -0700607 }
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700608
609 bi->page_dma = pci_map_page(pdev, bi->page,
610 bi->page_offset,
611 (PAGE_SIZE / 2),
612 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700613 }
614
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700615 if (!bi->skb) {
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800616 struct sk_buff *skb;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000617 skb = netdev_alloc_skb(adapter->netdev,
618 (rx_ring->rx_buf_len +
619 NET_IP_ALIGN));
Auke Kok9a799d72007-09-15 14:07:45 -0700620
621 if (!skb) {
622 adapter->alloc_rx_buff_failed++;
623 goto no_buffers;
624 }
625
626 /*
627 * Make buffer alignment 2 beyond a 16 byte boundary
628 * this will result in a 16 byte aligned IP header after
629 * the 14 byte MAC header is removed
630 */
631 skb_reserve(skb, NET_IP_ALIGN);
632
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700633 bi->skb = skb;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000634 bi->dma = pci_map_single(pdev, skb->data,
635 rx_ring->rx_buf_len,
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700636 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700637 }
638 /* Refresh the desc even if buffer_addrs didn't change because
639 * each write-back erases this info. */
Yi Zou6e455b892009-08-06 13:05:44 +0000640 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700641 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
642 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700643 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700644 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700645 }
646
647 i++;
648 if (i == rx_ring->count)
649 i = 0;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700650 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700651 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700652
Auke Kok9a799d72007-09-15 14:07:45 -0700653no_buffers:
654 if (rx_ring->next_to_use != i) {
655 rx_ring->next_to_use = i;
656 if (i-- == 0)
657 i = (rx_ring->count - 1);
658
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000659 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -0700660 }
661}
662
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700663static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
664{
665 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
666}
667
668static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
669{
670 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
671}
672
Alexander Duyckf8212f92009-04-27 22:42:37 +0000673static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
674{
675 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
676 IXGBE_RXDADV_RSCCNT_MASK) >>
677 IXGBE_RXDADV_RSCCNT_SHIFT;
678}
679
680/**
681 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
682 * @skb: pointer to the last skb in the rsc queue
683 *
684 * This function changes a queue full of hw rsc buffers into a completed
685 * packet. It uses the ->prev pointers to find the first packet and then
686 * turns it into the frag list owner.
687 **/
688static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
689{
690 unsigned int frag_list_size = 0;
691
692 while (skb->prev) {
693 struct sk_buff *prev = skb->prev;
694 frag_list_size += skb->len;
695 skb->prev = NULL;
696 skb = prev;
697 }
698
699 skb_shinfo(skb)->frag_list = skb->next;
700 skb->next = NULL;
701 skb->len += frag_list_size;
702 skb->data_len += frag_list_size;
703 skb->truesize += frag_list_size;
704 return skb;
705}
706
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800707static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700708 struct ixgbe_ring *rx_ring,
709 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -0700710{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800711 struct ixgbe_adapter *adapter = q_vector->adapter;
Ajit Khaparde2d86f132009-10-07 02:43:49 +0000712 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -0700713 struct pci_dev *pdev = adapter->pdev;
714 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
715 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
716 struct sk_buff *skb;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000717 unsigned int i, rsc_count = 0;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700718 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700719 u16 hdr_info;
720 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700721 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800722 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Yi Zou3d8fd382009-06-08 14:38:44 +0000723#ifdef IXGBE_FCOE
724 int ddp_bytes = 0;
725#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -0700726
727 i = rx_ring->next_to_clean;
Auke Kok9a799d72007-09-15 14:07:45 -0700728 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
729 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
730 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700731
732 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700733 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700734 if (*work_done >= work_to_do)
735 break;
736 (*work_done)++;
737
Yi Zou6e455b892009-08-06 13:05:44 +0000738 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700739 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
740 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700741 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -0700742 if (hdr_info & IXGBE_RXDADV_SPH)
743 adapter->rx_hdr_split++;
744 if (len > IXGBE_RX_HDR_SIZE)
745 len = IXGBE_RX_HDR_SIZE;
746 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700747 } else {
Auke Kok9a799d72007-09-15 14:07:45 -0700748 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700749 }
Auke Kok9a799d72007-09-15 14:07:45 -0700750
751 cleaned = true;
752 skb = rx_buffer_info->skb;
753 prefetch(skb->data - NET_IP_ALIGN);
754 rx_buffer_info->skb = NULL;
755
Alexander Duyck21fa4e62009-06-04 15:59:49 +0000756 if (rx_buffer_info->dma) {
Auke Kok9a799d72007-09-15 14:07:45 -0700757 pci_unmap_single(pdev, rx_buffer_info->dma,
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800758 rx_ring->rx_buf_len,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700759 PCI_DMA_FROMDEVICE);
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000760 rx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700761 skb_put(skb, len);
762 }
763
764 if (upper_len) {
765 pci_unmap_page(pdev, rx_buffer_info->page_dma,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700766 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700767 rx_buffer_info->page_dma = 0;
768 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700769 rx_buffer_info->page,
770 rx_buffer_info->page_offset,
771 upper_len);
772
773 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
774 (page_count(rx_buffer_info->page) != 1))
775 rx_buffer_info->page = NULL;
776 else
777 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -0700778
779 skb->len += upper_len;
780 skb->data_len += upper_len;
781 skb->truesize += upper_len;
782 }
783
784 i++;
785 if (i == rx_ring->count)
786 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700787
788 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
789 prefetch(next_rxd);
Auke Kok9a799d72007-09-15 14:07:45 -0700790 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000791
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +0000792 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
Alexander Duyckf8212f92009-04-27 22:42:37 +0000793 rsc_count = ixgbe_get_rsc_count(rx_desc);
794
795 if (rsc_count) {
796 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
797 IXGBE_RXDADV_NEXTP_SHIFT;
798 next_buffer = &rx_ring->rx_buffer_info[nextp];
799 rx_ring->rsc_count += (rsc_count - 1);
800 } else {
801 next_buffer = &rx_ring->rx_buffer_info[i];
802 }
803
Auke Kok9a799d72007-09-15 14:07:45 -0700804 if (staterr & IXGBE_RXD_STAT_EOP) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000805 if (skb->prev)
806 skb = ixgbe_transform_rsc_queue(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700807 rx_ring->stats.packets++;
808 rx_ring->stats.bytes += skb->len;
809 } else {
Yi Zou6e455b892009-08-06 13:05:44 +0000810 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000811 rx_buffer_info->skb = next_buffer->skb;
812 rx_buffer_info->dma = next_buffer->dma;
813 next_buffer->skb = skb;
814 next_buffer->dma = 0;
815 } else {
816 skb->next = next_buffer->skb;
817 skb->next->prev = skb;
818 }
Auke Kok9a799d72007-09-15 14:07:45 -0700819 adapter->non_eop_descs++;
820 goto next_desc;
821 }
822
823 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
824 dev_kfree_skb_irq(skb);
825 goto next_desc;
826 }
827
Don Skidmore8bae1b22009-07-23 18:00:39 +0000828 ixgbe_rx_checksum(adapter, rx_desc, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800829
830 /* probably a little skewed due to removing CRC */
831 total_rx_bytes += skb->len;
832 total_rx_packets++;
833
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -0700834 skb->protocol = eth_type_trans(skb, adapter->netdev);
Yi Zou332d4a72009-05-13 13:11:53 +0000835#ifdef IXGBE_FCOE
836 /* if ddp, not passing to ULD unless for FCP_RSP or error */
Yi Zou3d8fd382009-06-08 14:38:44 +0000837 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
838 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
839 if (!ddp_bytes)
Yi Zou332d4a72009-05-13 13:11:53 +0000840 goto next_desc;
Yi Zou3d8fd382009-06-08 14:38:44 +0000841 }
Yi Zou332d4a72009-05-13 13:11:53 +0000842#endif /* IXGBE_FCOE */
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000843 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700844
845next_desc:
846 rx_desc->wb.upper.status_error = 0;
847
848 /* return some buffers to hardware, one at a time is too slow */
849 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
850 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
851 cleaned_count = 0;
852 }
853
854 /* use prefetched values */
855 rx_desc = next_rxd;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000856 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700857
858 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700859 }
860
Auke Kok9a799d72007-09-15 14:07:45 -0700861 rx_ring->next_to_clean = i;
862 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
863
864 if (cleaned_count)
865 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
866
Yi Zou3d8fd382009-06-08 14:38:44 +0000867#ifdef IXGBE_FCOE
868 /* include DDPed FCoE data */
869 if (ddp_bytes > 0) {
870 unsigned int mss;
871
872 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
873 sizeof(struct fc_frame_header) -
874 sizeof(struct fcoe_crc_eof);
875 if (mss > 512)
876 mss &= ~511;
877 total_rx_bytes += ddp_bytes;
878 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
879 }
880#endif /* IXGBE_FCOE */
881
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800882 rx_ring->total_packets += total_rx_packets;
883 rx_ring->total_bytes += total_rx_bytes;
Ajit Khaparde2d86f132009-10-07 02:43:49 +0000884 netdev->stats.rx_bytes += total_rx_bytes;
885 netdev->stats.rx_packets += total_rx_packets;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800886
Auke Kok9a799d72007-09-15 14:07:45 -0700887 return cleaned;
888}
889
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800890static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700891/**
892 * ixgbe_configure_msix - Configure MSI-X hardware
893 * @adapter: board private structure
894 *
895 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
896 * interrupts.
897 **/
898static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
899{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800900 struct ixgbe_q_vector *q_vector;
901 int i, j, q_vectors, v_idx, r_idx;
902 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700903
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800904 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
905
Jesse Brandeburg4df10462009-03-13 22:15:31 +0000906 /*
907 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800908 * corresponding register.
909 */
910 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +0000911 q_vector = adapter->q_vector[v_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800912 /* XXX for_each_bit(...) */
913 r_idx = find_first_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700914 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800915
916 for (i = 0; i < q_vector->rxr_count; i++) {
917 j = adapter->rx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000918 ixgbe_set_ivar(adapter, 0, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800919 r_idx = find_next_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700920 adapter->num_rx_queues,
921 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800922 }
923 r_idx = find_first_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700924 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800925
926 for (i = 0; i < q_vector->txr_count; i++) {
927 j = adapter->tx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000928 ixgbe_set_ivar(adapter, 1, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800929 r_idx = find_next_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700930 adapter->num_tx_queues,
931 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800932 }
933
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800934 if (q_vector->txr_count && !q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +0000935 /* tx only */
936 q_vector->eitr = adapter->tx_eitr_param;
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000937 else if (q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +0000938 /* rx or mixed */
939 q_vector->eitr = adapter->rx_eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800940
Alexander Duyckfe49f042009-06-04 16:00:09 +0000941 ixgbe_write_eitr(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -0700942 }
943
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000944 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
945 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
946 v_idx);
947 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
948 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700950
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700951 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800952 mask = IXGBE_EIMS_ENABLE_MASK;
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700953 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800954 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -0700955}
956
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800957enum latency_range {
958 lowest_latency = 0,
959 low_latency = 1,
960 bulk_latency = 2,
961 latency_invalid = 255
962};
963
964/**
965 * ixgbe_update_itr - update the dynamic ITR value based on statistics
966 * @adapter: pointer to adapter
967 * @eitr: eitr setting (ints per sec) to give last timeslice
968 * @itr_setting: current throttle rate in ints/second
969 * @packets: the number of packets during this measurement interval
970 * @bytes: the number of bytes during this measurement interval
971 *
972 * Stores a new ITR value based on packets and byte
973 * counts during the last interrupt. The advantage of per interrupt
974 * computation is faster updates and more accurate ITR for the current
975 * traffic pattern. Constants in this function were computed
976 * based on theoretical maximum wire speed and thresholds were set based
977 * on testing data as well as attempting to minimize response time
978 * while increasing bulk throughput.
979 * this functionality is controlled by the InterruptThrottleRate module
980 * parameter (see ixgbe_param.c)
981 **/
982static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700983 u32 eitr, u8 itr_setting,
984 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800985{
986 unsigned int retval = itr_setting;
987 u32 timepassed_us;
988 u64 bytes_perint;
989
990 if (packets == 0)
991 goto update_itr_done;
992
993
994 /* simple throttlerate management
995 * 0-20MB/s lowest (100000 ints/s)
996 * 20-100MB/s low (20000 ints/s)
997 * 100-1249MB/s bulk (8000 ints/s)
998 */
999 /* what was last interrupt timeslice? */
1000 timepassed_us = 1000000/eitr;
1001 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1002
1003 switch (itr_setting) {
1004 case lowest_latency:
1005 if (bytes_perint > adapter->eitr_low)
1006 retval = low_latency;
1007 break;
1008 case low_latency:
1009 if (bytes_perint > adapter->eitr_high)
1010 retval = bulk_latency;
1011 else if (bytes_perint <= adapter->eitr_low)
1012 retval = lowest_latency;
1013 break;
1014 case bulk_latency:
1015 if (bytes_perint <= adapter->eitr_high)
1016 retval = low_latency;
1017 break;
1018 }
1019
1020update_itr_done:
1021 return retval;
1022}
1023
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001024/**
1025 * ixgbe_write_eitr - write EITR register in hardware specific way
Alexander Duyckfe49f042009-06-04 16:00:09 +00001026 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001027 *
1028 * This function is made to be called by ethtool and by the driver
1029 * when it needs to update EITR registers at runtime. Hardware
1030 * specific quirks/differences are taken care of here.
1031 */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001032void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001033{
Alexander Duyckfe49f042009-06-04 16:00:09 +00001034 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001035 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001036 int v_idx = q_vector->v_idx;
1037 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1038
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001039 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1040 /* must write high and low 16 bits to reset counter */
1041 itr_reg |= (itr_reg << 16);
1042 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1043 /*
1044 * set the WDIS bit to not clear the timer bits and cause an
1045 * immediate assertion of the interrupt
1046 */
1047 itr_reg |= IXGBE_EITR_CNT_WDIS;
1048 }
1049 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1050}
1051
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001052static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1053{
1054 struct ixgbe_adapter *adapter = q_vector->adapter;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001055 u32 new_itr;
1056 u8 current_itr, ret_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001057 int i, r_idx;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001058 struct ixgbe_ring *rx_ring, *tx_ring;
1059
1060 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1061 for (i = 0; i < q_vector->txr_count; i++) {
1062 tx_ring = &(adapter->tx_ring[r_idx]);
1063 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001064 q_vector->tx_itr,
1065 tx_ring->total_packets,
1066 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001067 /* if the result for this queue would decrease interrupt
1068 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001069 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001070 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001071 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001072 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001073 }
1074
1075 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1076 for (i = 0; i < q_vector->rxr_count; i++) {
1077 rx_ring = &(adapter->rx_ring[r_idx]);
1078 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001079 q_vector->rx_itr,
1080 rx_ring->total_packets,
1081 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001082 /* if the result for this queue would decrease interrupt
1083 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001084 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001085 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001086 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001087 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001088 }
1089
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001090 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001091
1092 switch (current_itr) {
1093 /* counts and packets in update_itr are dependent on these numbers */
1094 case lowest_latency:
1095 new_itr = 100000;
1096 break;
1097 case low_latency:
1098 new_itr = 20000; /* aka hwitr = ~200 */
1099 break;
1100 case bulk_latency:
1101 default:
1102 new_itr = 8000;
1103 break;
1104 }
1105
1106 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001107 /* do an exponential smoothing */
1108 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001109
1110 /* save the algorithm value here, not the smoothed one */
1111 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001112
1113 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001114 }
1115
1116 return;
1117}
1118
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001119static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1120{
1121 struct ixgbe_hw *hw = &adapter->hw;
1122
1123 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1124 (eicr & IXGBE_EICR_GPI_SDP1)) {
1125 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1126 /* write to clear the interrupt */
1127 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1128 }
1129}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001130
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001131static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1132{
1133 struct ixgbe_hw *hw = &adapter->hw;
1134
1135 if (eicr & IXGBE_EICR_GPI_SDP1) {
1136 /* Clear the interrupt */
1137 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1138 schedule_work(&adapter->multispeed_fiber_task);
1139 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1140 /* Clear the interrupt */
1141 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1142 schedule_work(&adapter->sfp_config_module_task);
1143 } else {
1144 /* Interrupt isn't for us... */
1145 return;
1146 }
1147}
1148
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001149static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1150{
1151 struct ixgbe_hw *hw = &adapter->hw;
1152
1153 adapter->lsc_int++;
1154 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1155 adapter->link_check_timeout = jiffies;
1156 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1157 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1158 schedule_work(&adapter->watchdog_task);
1159 }
1160}
1161
Auke Kok9a799d72007-09-15 14:07:45 -07001162static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1163{
1164 struct net_device *netdev = data;
1165 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1166 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore54037502009-02-21 15:42:56 -08001167 u32 eicr;
1168
1169 /*
1170 * Workaround for Silicon errata. Use clear-by-write instead
1171 * of clear-by-read. Reading with EICS will return the
1172 * interrupt causes without clearing, which later be done
1173 * with the write to EICR.
1174 */
1175 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1176 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07001177
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001178 if (eicr & IXGBE_EICR_LSC)
1179 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001180
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001181 if (hw->mac.type == ixgbe_mac_82598EB)
1182 ixgbe_check_fan_failure(adapter, eicr);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001183
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001184 if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001185 ixgbe_check_sfp_event(adapter, eicr);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001186
1187 /* Handle Flow Director Full threshold interrupt */
1188 if (eicr & IXGBE_EICR_FLOW_DIR) {
1189 int i;
1190 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1191 /* Disable transmits before FDIR Re-initialization */
1192 netif_tx_stop_all_queues(netdev);
1193 for (i = 0; i < adapter->num_tx_queues; i++) {
1194 struct ixgbe_ring *tx_ring =
1195 &adapter->tx_ring[i];
1196 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1197 &tx_ring->reinit_state))
1198 schedule_work(&adapter->fdir_reinit_task);
1199 }
1200 }
1201 }
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001202 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1203 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -07001204
1205 return IRQ_HANDLED;
1206}
1207
Alexander Duyckfe49f042009-06-04 16:00:09 +00001208static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1209 u64 qmask)
1210{
1211 u32 mask;
1212
1213 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1214 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1215 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1216 } else {
1217 mask = (qmask & 0xFFFFFFFF);
1218 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1219 mask = (qmask >> 32);
1220 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1221 }
1222 /* skip the flush */
1223}
1224
1225static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1226 u64 qmask)
1227{
1228 u32 mask;
1229
1230 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1231 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1232 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1233 } else {
1234 mask = (qmask & 0xFFFFFFFF);
1235 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1236 mask = (qmask >> 32);
1237 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1238 }
1239 /* skip the flush */
1240}
1241
Auke Kok9a799d72007-09-15 14:07:45 -07001242static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1243{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001244 struct ixgbe_q_vector *q_vector = data;
1245 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001246 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001247 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001248
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001249 if (!q_vector->txr_count)
1250 return IRQ_HANDLED;
1251
1252 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1253 for (i = 0; i < q_vector->txr_count; i++) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001254 tx_ring = &(adapter->tx_ring[r_idx]);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001255 tx_ring->total_bytes = 0;
1256 tx_ring->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001257 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001258 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001259 }
1260
Alexander Duyck91281fd2009-06-04 16:00:27 +00001261 /* disable interrupts on this vector only */
1262 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1263 napi_schedule(&q_vector->napi);
1264
Auke Kok9a799d72007-09-15 14:07:45 -07001265 return IRQ_HANDLED;
1266}
1267
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001268/**
1269 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1270 * @irq: unused
1271 * @data: pointer to our q_vector struct for this interrupt vector
1272 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001273static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1274{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001275 struct ixgbe_q_vector *q_vector = data;
1276 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001277 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001278 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001279 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07001280
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001281 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001282 for (i = 0; i < q_vector->rxr_count; i++) {
1283 rx_ring = &(adapter->rx_ring[r_idx]);
1284 rx_ring->total_bytes = 0;
1285 rx_ring->total_packets = 0;
1286 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1287 r_idx + 1);
1288 }
1289
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001290 if (!q_vector->rxr_count)
1291 return IRQ_HANDLED;
1292
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001293 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001294 rx_ring = &(adapter->rx_ring[r_idx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001295 /* disable interrupts on this vector only */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001296 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
Ben Hutchings288379f2009-01-19 16:43:59 -08001297 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001298
Auke Kok9a799d72007-09-15 14:07:45 -07001299 return IRQ_HANDLED;
1300}
1301
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001302static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1303{
Alexander Duyck91281fd2009-06-04 16:00:27 +00001304 struct ixgbe_q_vector *q_vector = data;
1305 struct ixgbe_adapter *adapter = q_vector->adapter;
1306 struct ixgbe_ring *ring;
1307 int r_idx;
1308 int i;
1309
1310 if (!q_vector->txr_count && !q_vector->rxr_count)
1311 return IRQ_HANDLED;
1312
1313 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1314 for (i = 0; i < q_vector->txr_count; i++) {
1315 ring = &(adapter->tx_ring[r_idx]);
1316 ring->total_bytes = 0;
1317 ring->total_packets = 0;
1318 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1319 r_idx + 1);
1320 }
1321
1322 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1323 for (i = 0; i < q_vector->rxr_count; i++) {
1324 ring = &(adapter->rx_ring[r_idx]);
1325 ring->total_bytes = 0;
1326 ring->total_packets = 0;
1327 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1328 r_idx + 1);
1329 }
1330
1331 /* disable interrupts on this vector only */
1332 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1333 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001334
1335 return IRQ_HANDLED;
1336}
1337
1338/**
1339 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1340 * @napi: napi struct with our devices info in it
1341 * @budget: amount of work driver is allowed to do this pass, in packets
1342 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001343 * This function is optimized for cleaning one queue only on a single
1344 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001345 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001346static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1347{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001348 struct ixgbe_q_vector *q_vector =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001349 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001350 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001351 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001352 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001353 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001354
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001355 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001356 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001357#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001358 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001359 ixgbe_update_rx_dca(adapter, rx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001360#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001361
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001362 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001363
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001364 /* If all Rx work done, exit the polling mode */
1365 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001366 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001367 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001368 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001369 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001370 ixgbe_irq_enable_queues(adapter,
1371 ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -07001372 }
1373
1374 return work_done;
1375}
1376
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001377/**
Alexander Duyck91281fd2009-06-04 16:00:27 +00001378 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001379 * @napi: napi struct with our devices info in it
1380 * @budget: amount of work driver is allowed to do this pass, in packets
1381 *
1382 * This function will clean more than one rx queue associated with a
1383 * q_vector.
1384 **/
Alexander Duyck91281fd2009-06-04 16:00:27 +00001385static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001386{
1387 struct ixgbe_q_vector *q_vector =
1388 container_of(napi, struct ixgbe_q_vector, napi);
1389 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001390 struct ixgbe_ring *ring = NULL;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001391 int work_done = 0, i;
1392 long r_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001393 bool tx_clean_complete = true;
1394
1395 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1396 for (i = 0; i < q_vector->txr_count; i++) {
1397 ring = &(adapter->tx_ring[r_idx]);
1398#ifdef CONFIG_IXGBE_DCA
1399 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1400 ixgbe_update_tx_dca(adapter, ring);
1401#endif
1402 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1403 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1404 r_idx + 1);
1405 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001406
1407 /* attempt to distribute budget to each queue fairly, but don't allow
1408 * the budget to go below 1 because we'll exit polling */
1409 budget /= (q_vector->rxr_count ?: 1);
1410 budget = max(budget, 1);
1411 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1412 for (i = 0; i < q_vector->rxr_count; i++) {
Alexander Duyck91281fd2009-06-04 16:00:27 +00001413 ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001414#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001415 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck91281fd2009-06-04 16:00:27 +00001416 ixgbe_update_rx_dca(adapter, ring);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001417#endif
Alexander Duyck91281fd2009-06-04 16:00:27 +00001418 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001419 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1420 r_idx + 1);
1421 }
1422
1423 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Alexander Duyck91281fd2009-06-04 16:00:27 +00001424 ring = &(adapter->rx_ring[r_idx]);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001425 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07001426 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001427 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001428 if (adapter->rx_itr_setting & 1)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001429 ixgbe_set_itr_msix(q_vector);
1430 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001431 ixgbe_irq_enable_queues(adapter,
1432 ((u64)1 << q_vector->v_idx));
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001433 return 0;
1434 }
1435
1436 return work_done;
1437}
Alexander Duyck91281fd2009-06-04 16:00:27 +00001438
1439/**
1440 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1441 * @napi: napi struct with our devices info in it
1442 * @budget: amount of work driver is allowed to do this pass, in packets
1443 *
1444 * This function is optimized for cleaning one queue only on a single
1445 * q_vector!!!
1446 **/
1447static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1448{
1449 struct ixgbe_q_vector *q_vector =
1450 container_of(napi, struct ixgbe_q_vector, napi);
1451 struct ixgbe_adapter *adapter = q_vector->adapter;
1452 struct ixgbe_ring *tx_ring = NULL;
1453 int work_done = 0;
1454 long r_idx;
1455
1456 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1457 tx_ring = &(adapter->tx_ring[r_idx]);
1458#ifdef CONFIG_IXGBE_DCA
1459 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1460 ixgbe_update_tx_dca(adapter, tx_ring);
1461#endif
1462
1463 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1464 work_done = budget;
1465
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001466 /* If all Tx work done, exit the polling mode */
Alexander Duyck91281fd2009-06-04 16:00:27 +00001467 if (work_done < budget) {
1468 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001469 if (adapter->tx_itr_setting & 1)
Alexander Duyck91281fd2009-06-04 16:00:27 +00001470 ixgbe_set_itr_msix(q_vector);
1471 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1472 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1473 }
1474
1475 return work_done;
1476}
1477
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001478static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001479 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07001480{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001481 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1482
1483 set_bit(r_idx, q_vector->rxr_idx);
1484 q_vector->rxr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001485}
Auke Kok9a799d72007-09-15 14:07:45 -07001486
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001487static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001488 int t_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001489{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001490 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1491
1492 set_bit(t_idx, q_vector->txr_idx);
1493 q_vector->txr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001494}
Auke Kok9a799d72007-09-15 14:07:45 -07001495
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001496/**
1497 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1498 * @adapter: board private structure to initialize
1499 * @vectors: allotted vector count for descriptor rings
1500 *
1501 * This function maps descriptor rings to the queue-specific vectors
1502 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1503 * one vector per ring/queue, but on a constrained vector budget, we
1504 * group the rings as "efficiently" as possible. You would add new
1505 * mapping configurations in here.
1506 **/
1507static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001508 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001509{
1510 int v_start = 0;
1511 int rxr_idx = 0, txr_idx = 0;
1512 int rxr_remaining = adapter->num_rx_queues;
1513 int txr_remaining = adapter->num_tx_queues;
1514 int i, j;
1515 int rqpv, tqpv;
1516 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001517
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001518 /* No mapping required if MSI-X is disabled. */
1519 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001520 goto out;
1521
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001522 /*
1523 * The ideal configuration...
1524 * We have enough vectors to map one per queue.
1525 */
1526 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1527 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1528 map_vector_to_rxq(adapter, v_start, rxr_idx);
1529
1530 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1531 map_vector_to_txq(adapter, v_start, txr_idx);
1532
1533 goto out;
1534 }
1535
1536 /*
1537 * If we don't have enough vectors for a 1-to-1
1538 * mapping, we'll have to group them so there are
1539 * multiple queues per vector.
1540 */
1541 /* Re-adjusting *qpv takes care of the remainder. */
1542 for (i = v_start; i < vectors; i++) {
1543 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1544 for (j = 0; j < rqpv; j++) {
1545 map_vector_to_rxq(adapter, i, rxr_idx);
1546 rxr_idx++;
1547 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001548 }
Auke Kok9a799d72007-09-15 14:07:45 -07001549 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001550 for (i = v_start; i < vectors; i++) {
1551 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1552 for (j = 0; j < tqpv; j++) {
1553 map_vector_to_txq(adapter, i, txr_idx);
1554 txr_idx++;
1555 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001556 }
Auke Kok9a799d72007-09-15 14:07:45 -07001557 }
1558
Auke Kok9a799d72007-09-15 14:07:45 -07001559out:
Auke Kok9a799d72007-09-15 14:07:45 -07001560 return err;
1561}
1562
1563/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001564 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1565 * @adapter: board private structure
1566 *
1567 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1568 * interrupts from the kernel.
1569 **/
1570static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1571{
1572 struct net_device *netdev = adapter->netdev;
1573 irqreturn_t (*handler)(int, void *);
1574 int i, vector, q_vectors, err;
Robert Olssoncb13fc22008-11-25 16:43:52 -08001575 int ri=0, ti=0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001576
1577 /* Decrement for Other and TCP Timer vectors */
1578 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1579
1580 /* Map the Tx/Rx rings to the vectors we were allotted. */
1581 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1582 if (err)
1583 goto out;
1584
1585#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001586 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1587 &ixgbe_msix_clean_many)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001588 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001589 handler = SET_HANDLER(adapter->q_vector[vector]);
Robert Olssoncb13fc22008-11-25 16:43:52 -08001590
1591 if(handler == &ixgbe_msix_clean_rx) {
1592 sprintf(adapter->name[vector], "%s-%s-%d",
1593 netdev->name, "rx", ri++);
1594 }
1595 else if(handler == &ixgbe_msix_clean_tx) {
1596 sprintf(adapter->name[vector], "%s-%s-%d",
1597 netdev->name, "tx", ti++);
1598 }
1599 else
1600 sprintf(adapter->name[vector], "%s-%s-%d",
1601 netdev->name, "TxRx", vector);
1602
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001603 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001604 handler, 0, adapter->name[vector],
Alexander Duyck7a921c92009-05-06 10:43:28 +00001605 adapter->q_vector[vector]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001606 if (err) {
1607 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001608 "request_irq failed for MSIX interrupt "
1609 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001610 goto free_queue_irqs;
1611 }
1612 }
1613
1614 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1615 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001616 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001617 if (err) {
1618 DPRINTK(PROBE, ERR,
1619 "request_irq for msix_lsc failed: %d\n", err);
1620 goto free_queue_irqs;
1621 }
1622
1623 return 0;
1624
1625free_queue_irqs:
1626 for (i = vector - 1; i >= 0; i--)
1627 free_irq(adapter->msix_entries[--vector].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001628 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001629 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1630 pci_disable_msix(adapter->pdev);
1631 kfree(adapter->msix_entries);
1632 adapter->msix_entries = NULL;
1633out:
1634 return err;
1635}
1636
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001637static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1638{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001639 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001640 u8 current_itr;
1641 u32 new_itr = q_vector->eitr;
1642 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1643 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1644
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001645 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001646 q_vector->tx_itr,
1647 tx_ring->total_packets,
1648 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001649 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001650 q_vector->rx_itr,
1651 rx_ring->total_packets,
1652 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001653
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001654 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001655
1656 switch (current_itr) {
1657 /* counts and packets in update_itr are dependent on these numbers */
1658 case lowest_latency:
1659 new_itr = 100000;
1660 break;
1661 case low_latency:
1662 new_itr = 20000; /* aka hwitr = ~200 */
1663 break;
1664 case bulk_latency:
1665 new_itr = 8000;
1666 break;
1667 default:
1668 break;
1669 }
1670
1671 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001672 /* do an exponential smoothing */
1673 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001674
1675 /* save the algorithm value here, not the smoothed one */
1676 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001677
1678 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001679 }
1680
1681 return;
1682}
1683
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001684/**
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001685 * ixgbe_irq_enable - Enable default interrupt generation settings
1686 * @adapter: board private structure
1687 **/
1688static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1689{
1690 u32 mask;
Nelson, Shannon835462f2009-04-27 22:42:54 +00001691
1692 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
David S. Miller6ab33d52008-11-20 16:44:00 -08001693 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1694 mask |= IXGBE_EIMS_GPI_SDP1;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001695 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001696 mask |= IXGBE_EIMS_ECC;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001697 mask |= IXGBE_EIMS_GPI_SDP1;
1698 mask |= IXGBE_EIMS_GPI_SDP2;
1699 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001700 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1701 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1702 mask |= IXGBE_EIMS_FLOW_DIR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001703
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Nelson, Shannon835462f2009-04-27 22:42:54 +00001705 ixgbe_irq_enable_queues(adapter, ~0);
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001706 IXGBE_WRITE_FLUSH(&adapter->hw);
1707}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001708
1709/**
1710 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001711 * @irq: interrupt number
1712 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07001713 **/
1714static irqreturn_t ixgbe_intr(int irq, void *data)
1715{
1716 struct net_device *netdev = data;
1717 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1718 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00001719 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07001720 u32 eicr;
1721
Don Skidmore54037502009-02-21 15:42:56 -08001722 /*
1723 * Workaround for silicon errata. Mask the interrupts
1724 * before the read of EICR.
1725 */
1726 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1727
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001728 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1729 * therefore no explict interrupt disable is necessary */
1730 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001731 if (!eicr) {
1732 /* shared interrupt alert!
1733 * make sure interrupts are enabled because the read will
1734 * have disabled interrupts due to EIAM */
1735 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001736 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001737 }
Auke Kok9a799d72007-09-15 14:07:45 -07001738
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001739 if (eicr & IXGBE_EICR_LSC)
1740 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001741
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001742 if (hw->mac.type == ixgbe_mac_82599EB)
1743 ixgbe_check_sfp_event(adapter, eicr);
1744
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001745 ixgbe_check_fan_failure(adapter, eicr);
1746
Alexander Duyck7a921c92009-05-06 10:43:28 +00001747 if (napi_schedule_prep(&(q_vector->napi))) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001748 adapter->tx_ring[0].total_packets = 0;
1749 adapter->tx_ring[0].total_bytes = 0;
1750 adapter->rx_ring[0].total_packets = 0;
1751 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001752 /* would disable interrupts here but EIAM disabled it */
Alexander Duyck7a921c92009-05-06 10:43:28 +00001753 __napi_schedule(&(q_vector->napi));
Auke Kok9a799d72007-09-15 14:07:45 -07001754 }
1755
1756 return IRQ_HANDLED;
1757}
1758
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001759static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1760{
1761 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1762
1763 for (i = 0; i < q_vectors; i++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001764 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001765 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1766 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1767 q_vector->rxr_count = 0;
1768 q_vector->txr_count = 0;
1769 }
1770}
1771
Auke Kok9a799d72007-09-15 14:07:45 -07001772/**
1773 * ixgbe_request_irq - initialize interrupts
1774 * @adapter: board private structure
1775 *
1776 * Attempts to configure interrupts using the best available
1777 * capabilities of the hardware and kernel.
1778 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001779static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001780{
1781 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001782 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001783
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001784 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1785 err = ixgbe_request_msix_irqs(adapter);
1786 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1787 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001788 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001789 } else {
1790 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001791 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001792 }
1793
Auke Kok9a799d72007-09-15 14:07:45 -07001794 if (err)
1795 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1796
Auke Kok9a799d72007-09-15 14:07:45 -07001797 return err;
1798}
1799
1800static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1801{
1802 struct net_device *netdev = adapter->netdev;
1803
1804 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001805 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001806
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001807 q_vectors = adapter->num_msix_vectors;
1808
1809 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001810 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001811
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001812 i--;
1813 for (; i >= 0; i--) {
1814 free_irq(adapter->msix_entries[i].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001815 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001816 }
1817
1818 ixgbe_reset_q_vectors(adapter);
1819 } else {
1820 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001821 }
1822}
1823
1824/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001825 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1826 * @adapter: board private structure
1827 **/
1828static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1829{
Nelson, Shannon835462f2009-04-27 22:42:54 +00001830 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1831 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1832 } else {
1833 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001835 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001836 }
1837 IXGBE_WRITE_FLUSH(&adapter->hw);
1838 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1839 int i;
1840 for (i = 0; i < adapter->num_msix_vectors; i++)
1841 synchronize_irq(adapter->msix_entries[i].vector);
1842 } else {
1843 synchronize_irq(adapter->pdev->irq);
1844 }
1845}
1846
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001847/**
Auke Kok9a799d72007-09-15 14:07:45 -07001848 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1849 *
1850 **/
1851static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1852{
Auke Kok9a799d72007-09-15 14:07:45 -07001853 struct ixgbe_hw *hw = &adapter->hw;
1854
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001855 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001856 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07001857
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001858 ixgbe_set_ivar(adapter, 0, 0, 0);
1859 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001860
1861 map_vector_to_rxq(adapter, 0, 0);
1862 map_vector_to_txq(adapter, 0, 0);
1863
1864 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001865}
1866
1867/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001868 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001869 * @adapter: board private structure
1870 *
1871 * Configure the Tx unit of the MAC after a reset.
1872 **/
1873static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1874{
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08001875 u64 tdba;
Auke Kok9a799d72007-09-15 14:07:45 -07001876 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001877 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001878
1879 /* Setup the HW Tx Head and Tail descriptor pointers */
1880 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001881 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1882 j = ring->reg_idx;
1883 tdba = ring->dma;
1884 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001885 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
Yang Hongyang284901a2009-04-06 19:01:15 -07001886 (tdba & DMA_BIT_MASK(32)));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001887 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1888 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1889 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1890 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1891 adapter->tx_ring[i].head = IXGBE_TDH(j);
1892 adapter->tx_ring[i].tail = IXGBE_TDT(j);
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001893 /*
1894 * Disable Tx Head Writeback RO bit, since this hoses
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001895 * bookkeeping if things aren't delivered in order.
1896 */
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001897 switch (hw->mac.type) {
1898 case ixgbe_mac_82598EB:
1899 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1900 break;
1901 case ixgbe_mac_82599EB:
1902 default:
1903 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1904 break;
1905 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001906 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001907 switch (hw->mac.type) {
1908 case ixgbe_mac_82598EB:
1909 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1910 break;
1911 case ixgbe_mac_82599EB:
1912 default:
1913 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1914 break;
1915 }
Auke Kok9a799d72007-09-15 14:07:45 -07001916 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001917 if (hw->mac.type == ixgbe_mac_82599EB) {
1918 /* We enable 8 traffic classes, DCB only */
1919 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1920 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1921 IXGBE_MTQC_8TC_8TQ));
1922 }
Auke Kok9a799d72007-09-15 14:07:45 -07001923}
1924
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001925#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07001926
Yi Zoua6616b42009-08-06 13:05:23 +00001927static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
1928 struct ixgbe_ring *rx_ring)
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001929{
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001930 u32 srrctl;
Yi Zoua6616b42009-08-06 13:05:23 +00001931 int index;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001932 struct ixgbe_ring_feature *feature = adapter->ring_feature;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001933
Yi Zoua6616b42009-08-06 13:05:23 +00001934 index = rx_ring->reg_idx;
1935 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1936 unsigned long mask;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001937 mask = (unsigned long) feature[RING_F_RSS].mask;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001938 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001939 }
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001940 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1941
1942 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1943 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1944
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001945 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1946 IXGBE_SRRCTL_BSIZEHDR_MASK;
1947
Yi Zou6e455b892009-08-06 13:05:44 +00001948 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001949#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
1950 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1951#else
1952 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1953#endif
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001954 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001955 } else {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001956 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1957 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001958 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001959 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001960
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001961 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1962}
1963
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001964static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
1965{
1966 u32 mrqc = 0;
1967 int mask;
1968
1969 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
1970 return mrqc;
1971
1972 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
1973#ifdef CONFIG_IXGBE_DCB
1974 | IXGBE_FLAG_DCB_ENABLED
1975#endif
1976 );
1977
1978 switch (mask) {
1979 case (IXGBE_FLAG_RSS_ENABLED):
1980 mrqc = IXGBE_MRQC_RSSEN;
1981 break;
1982#ifdef CONFIG_IXGBE_DCB
1983 case (IXGBE_FLAG_DCB_ENABLED):
1984 mrqc = IXGBE_MRQC_RT8TCEN;
1985 break;
1986#endif /* CONFIG_IXGBE_DCB */
1987 default:
1988 break;
1989 }
1990
1991 return mrqc;
1992}
1993
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001994/**
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00001995 * ixgbe_configure_rscctl - enable RSC for the indicated ring
1996 * @adapter: address of board private structure
1997 * @index: index of ring to set
1998 * @rx_buf_len: rx buffer length
1999 **/
2000static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
2001 int rx_buf_len)
2002{
2003 struct ixgbe_ring *rx_ring;
2004 struct ixgbe_hw *hw = &adapter->hw;
2005 int j;
2006 u32 rscctrl;
2007
2008 rx_ring = &adapter->rx_ring[index];
2009 j = rx_ring->reg_idx;
2010 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2011 rscctrl |= IXGBE_RSCCTL_RSCEN;
2012 /*
2013 * we must limit the number of descriptors so that the
2014 * total size of max desc * buf_len is not greater
2015 * than 65535
2016 */
2017 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2018#if (MAX_SKB_FRAGS > 16)
2019 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2020#elif (MAX_SKB_FRAGS > 8)
2021 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2022#elif (MAX_SKB_FRAGS > 4)
2023 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2024#else
2025 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2026#endif
2027 } else {
2028 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2029 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2030 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2031 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2032 else
2033 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2034 }
2035 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2036}
2037
2038/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002039 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07002040 * @adapter: board private structure
2041 *
2042 * Configure the Rx unit of the MAC after a reset.
2043 **/
2044static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2045{
2046 u64 rdba;
2047 struct ixgbe_hw *hw = &adapter->hw;
Yi Zoua6616b42009-08-06 13:05:23 +00002048 struct ixgbe_ring *rx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07002049 struct net_device *netdev = adapter->netdev;
2050 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002051 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002052 u32 rdlen, rxctrl, rxcsum;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002053 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2054 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2055 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Auke Kok9a799d72007-09-15 14:07:45 -07002056 u32 fctrl, hlreg0;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002057 u32 reta = 0, mrqc = 0;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002058 u32 rdrxctl;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002059 int rx_buf_len;
Auke Kok9a799d72007-09-15 14:07:45 -07002060
2061 /* Decide whether to use packet split mode or not */
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002062 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07002063
2064 /* Set the RX buffer length according to the mode */
2065 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002066 rx_buf_len = IXGBE_RX_HDR_SIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002067 if (hw->mac.type == ixgbe_mac_82599EB) {
2068 /* PSRTYPE must be initialized in 82599 */
2069 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2070 IXGBE_PSRTYPE_UDPHDR |
2071 IXGBE_PSRTYPE_IPV4HDR |
Yi Zoudfa12f02009-05-07 10:39:35 +00002072 IXGBE_PSRTYPE_IPV6HDR |
2073 IXGBE_PSRTYPE_L2HDR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002074 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2075 }
Auke Kok9a799d72007-09-15 14:07:45 -07002076 } else {
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002077 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
Alexander Duyckf8212f92009-04-27 22:42:37 +00002078 (netdev->mtu <= ETH_DATA_LEN))
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002079 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07002080 else
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002081 rx_buf_len = ALIGN(max_frame, 1024);
Auke Kok9a799d72007-09-15 14:07:45 -07002082 }
2083
2084 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2085 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002086 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002087 fctrl |= IXGBE_FCTRL_PMCF;
Auke Kok9a799d72007-09-15 14:07:45 -07002088 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2089
2090 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2091 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2092 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2093 else
2094 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
Yi Zou63f39bd2009-05-17 12:34:35 +00002095#ifdef IXGBE_FCOE
Yi Zouf34c5c82009-08-14 12:42:17 +00002096 if (netdev->features & NETIF_F_FCOE_MTU)
Yi Zou63f39bd2009-05-17 12:34:35 +00002097 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2098#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002099 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2100
Auke Kok9a799d72007-09-15 14:07:45 -07002101 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
2102 /* disable receives while setting up the descriptors */
2103 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2104 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2105
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002106 /*
2107 * Setup the HW Rx Head and Tail Descriptor Pointers and
2108 * the Base and Length of the Rx Descriptor Ring
2109 */
Auke Kok9a799d72007-09-15 14:07:45 -07002110 for (i = 0; i < adapter->num_rx_queues; i++) {
Yi Zoua6616b42009-08-06 13:05:23 +00002111 rx_ring = &adapter->rx_ring[i];
2112 rdba = rx_ring->dma;
2113 j = rx_ring->reg_idx;
Yang Hongyang284901a2009-04-06 19:01:15 -07002114 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002115 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2116 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2117 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2118 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
Yi Zoua6616b42009-08-06 13:05:23 +00002119 rx_ring->head = IXGBE_RDH(j);
2120 rx_ring->tail = IXGBE_RDT(j);
2121 rx_ring->rx_buf_len = rx_buf_len;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002122
Yi Zou6e455b892009-08-06 13:05:44 +00002123 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2124 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
Peter P Waskiewicz Jr1b3ff022009-09-14 07:47:27 +00002125 else
2126 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002127
Yi Zou63f39bd2009-05-17 12:34:35 +00002128#ifdef IXGBE_FCOE
Yi Zouf34c5c82009-08-14 12:42:17 +00002129 if (netdev->features & NETIF_F_FCOE_MTU) {
Yi Zou63f39bd2009-05-17 12:34:35 +00002130 struct ixgbe_ring_feature *f;
2131 f = &adapter->ring_feature[RING_F_FCOE];
Yi Zou6e455b892009-08-06 13:05:44 +00002132 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2133 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2134 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2135 rx_ring->rx_buf_len =
2136 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2137 }
Yi Zou63f39bd2009-05-17 12:34:35 +00002138 }
2139
2140#endif /* IXGBE_FCOE */
Yi Zoua6616b42009-08-06 13:05:23 +00002141 ixgbe_configure_srrctl(adapter, rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07002142 }
2143
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002144 if (hw->mac.type == ixgbe_mac_82598EB) {
2145 /*
2146 * For VMDq support of different descriptor types or
2147 * buffer sizes through the use of multiple SRRCTL
2148 * registers, RDRXCTL.MVMEN must be set to 1
2149 *
2150 * also, the manual doesn't mention it clearly but DCA hints
2151 * will only use queue 0's tags unless this bit is set. Side
2152 * effects of setting this bit are only that SRRCTL must be
2153 * fully programmed [0..15]
2154 */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002155 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2156 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2157 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
Alexander Duyck2f90b862008-11-20 20:52:10 -08002158 }
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002159
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002160 /* Program MRQC for the distribution of queues */
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002161 mrqc = ixgbe_setup_mrqc(adapter);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002162
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002163 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07002164 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002165 for (i = 0, j = 0; i < 128; i++, j++) {
2166 if (j == adapter->ring_feature[RING_F_RSS].indices)
2167 j = 0;
2168 /* reta = 4-byte sliding window of
2169 * 0x00..(indices-1)(indices-1)00..etc. */
2170 reta = (reta << 8) | (j * 0x11);
2171 if ((i & 3) == 3)
2172 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07002173 }
2174
2175 /* Fill out hash function seeds */
2176 for (i = 0; i < 10; i++)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002177 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07002178
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002179 if (hw->mac.type == ixgbe_mac_82598EB)
2180 mrqc |= IXGBE_MRQC_RSSEN;
Auke Kok9a799d72007-09-15 14:07:45 -07002181 /* Perform hash on these packet types */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002182 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2183 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2184 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2185 | IXGBE_MRQC_RSS_FIELD_IPV6
2186 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2187 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
Auke Kok9a799d72007-09-15 14:07:45 -07002188 }
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002189 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002190
2191 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2192
2193 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
2194 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2195 /* Disable indicating checksum in descriptor, enables
2196 * RSS hash */
2197 rxcsum |= IXGBE_RXCSUM_PCSD;
2198 }
2199 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2200 /* Enable IPv4 payload checksum for UDP fragments
2201 * if PCSD is not set */
2202 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2203 }
2204
2205 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002206
2207 if (hw->mac.type == ixgbe_mac_82599EB) {
2208 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2209 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002210 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002211 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2212 }
Alexander Duyckf8212f92009-04-27 22:42:37 +00002213
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002214 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002215 /* Enable 82599 HW-RSC */
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002216 for (i = 0; i < adapter->num_rx_queues; i++)
2217 ixgbe_configure_rscctl(adapter, i, rx_buf_len);
2218
Alexander Duyckf8212f92009-04-27 22:42:37 +00002219 /* Disable RSC for ACK packets */
2220 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2221 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2222 }
Auke Kok9a799d72007-09-15 14:07:45 -07002223}
2224
Auke Kok9a799d72007-09-15 14:07:45 -07002225static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2226{
2227 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002228 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002229
2230 /* add VID to filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002231 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
Auke Kok9a799d72007-09-15 14:07:45 -07002232}
2233
2234static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2235{
2236 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002237 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002238
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002239 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2240 ixgbe_irq_disable(adapter);
2241
Auke Kok9a799d72007-09-15 14:07:45 -07002242 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002243
2244 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2245 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002246
2247 /* remove VID from filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002248 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
Auke Kok9a799d72007-09-15 14:07:45 -07002249}
2250
Don Skidmore068c89b2009-01-19 16:54:36 -08002251static void ixgbe_vlan_rx_register(struct net_device *netdev,
2252 struct vlan_group *grp)
2253{
2254 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2255 u32 ctrl;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002256 int i, j;
Don Skidmore068c89b2009-01-19 16:54:36 -08002257
2258 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2259 ixgbe_irq_disable(adapter);
2260 adapter->vlgrp = grp;
2261
2262 /*
2263 * For a DCB driver, always enable VLAN tag stripping so we can
2264 * still receive traffic from a DCB-enabled host even if we're
2265 * not in DCB mode.
2266 */
2267 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002268 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2269 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
Don Skidmore068c89b2009-01-19 16:54:36 -08002270 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2271 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002272 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2273 ctrl |= IXGBE_VLNCTRL_VFE;
2274 /* enable VLAN tag insert/strip */
2275 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2276 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2277 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2278 for (i = 0; i < adapter->num_rx_queues; i++) {
2279 j = adapter->rx_ring[i].reg_idx;
2280 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2281 ctrl |= IXGBE_RXDCTL_VME;
2282 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2283 }
Don Skidmore068c89b2009-01-19 16:54:36 -08002284 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002285 ixgbe_vlan_rx_add_vid(netdev, 0);
Don Skidmore068c89b2009-01-19 16:54:36 -08002286
2287 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2288 ixgbe_irq_enable(adapter);
2289}
2290
Auke Kok9a799d72007-09-15 14:07:45 -07002291static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2292{
2293 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2294
2295 if (adapter->vlgrp) {
2296 u16 vid;
2297 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2298 if (!vlan_group_get_device(adapter->vlgrp, vid))
2299 continue;
2300 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2301 }
2302 }
2303}
2304
Christopher Leech2c5645c2008-08-26 04:27:02 -07002305static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2306{
2307 struct dev_mc_list *mc_ptr;
2308 u8 *addr = *mc_addr_ptr;
2309 *vmdq = 0;
2310
2311 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2312 if (mc_ptr->next)
2313 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2314 else
2315 *mc_addr_ptr = NULL;
2316
2317 return addr;
2318}
2319
Auke Kok9a799d72007-09-15 14:07:45 -07002320/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07002321 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07002322 * @netdev: network interface device structure
2323 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07002324 * The set_rx_method entry point is called whenever the unicast/multicast
2325 * address list or the network interface flags are updated. This routine is
2326 * responsible for configuring the hardware for proper unicast, multicast and
2327 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07002328 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07002329static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07002330{
2331 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2332 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07002333 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002334 u8 *addr_list = NULL;
2335 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002336
2337 /* Check for Promiscuous and All Multicast modes */
2338
2339 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07002340 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07002341
2342 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07002343 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07002344 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07002345 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07002346 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002347 if (netdev->flags & IFF_ALLMULTI) {
2348 fctrl |= IXGBE_FCTRL_MPE;
2349 fctrl &= ~IXGBE_FCTRL_UPE;
2350 } else {
2351 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2352 }
Alexander Duyck3d016252008-08-26 18:30:04 -07002353 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002354 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002355 }
2356
2357 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07002358 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07002359
Christopher Leech2c5645c2008-08-26 04:27:02 -07002360 /* reprogram secondary unicast list */
Jiri Pirko31278e72009-06-17 01:12:19 +00002361 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
Auke Kok9a799d72007-09-15 14:07:45 -07002362
Christopher Leech2c5645c2008-08-26 04:27:02 -07002363 /* reprogram multicast list */
2364 addr_count = netdev->mc_count;
2365 if (addr_count)
2366 addr_list = netdev->mc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002367 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2368 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07002369}
2370
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002371static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2372{
2373 int q_idx;
2374 struct ixgbe_q_vector *q_vector;
2375 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2376
2377 /* legacy and MSI only use one vector */
2378 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2379 q_vectors = 1;
2380
2381 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002382 struct napi_struct *napi;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002383 q_vector = adapter->q_vector[q_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002384 napi = &q_vector->napi;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002385 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2386 if (!q_vector->rxr_count || !q_vector->txr_count) {
2387 if (q_vector->txr_count == 1)
2388 napi->poll = &ixgbe_clean_txonly;
2389 else if (q_vector->rxr_count == 1)
2390 napi->poll = &ixgbe_clean_rxonly;
2391 }
2392 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002393
2394 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002395 }
2396}
2397
2398static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2399{
2400 int q_idx;
2401 struct ixgbe_q_vector *q_vector;
2402 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2403
2404 /* legacy and MSI only use one vector */
2405 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2406 q_vectors = 1;
2407
2408 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002409 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002410 napi_disable(&q_vector->napi);
2411 }
2412}
2413
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002414#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002415/*
2416 * ixgbe_configure_dcb - Configure DCB hardware
2417 * @adapter: ixgbe adapter struct
2418 *
2419 * This is called by the driver on open to configure the DCB hardware.
2420 * This is also called by the gennetlink interface when reconfiguring
2421 * the DCB state.
2422 */
2423static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2424{
2425 struct ixgbe_hw *hw = &adapter->hw;
2426 u32 txdctl, vlnctrl;
2427 int i, j;
2428
2429 ixgbe_dcb_check_config(&adapter->dcb_cfg);
2430 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2431 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2432
2433 /* reconfigure the hardware */
2434 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2435
2436 for (i = 0; i < adapter->num_tx_queues; i++) {
2437 j = adapter->tx_ring[i].reg_idx;
2438 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2439 /* PThresh workaround for Tx hang with DFP enabled. */
2440 txdctl |= 32;
2441 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2442 }
2443 /* Enable VLAN tag insert/strip */
2444 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002445 if (hw->mac.type == ixgbe_mac_82598EB) {
2446 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2447 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2448 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2449 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2450 vlnctrl |= IXGBE_VLNCTRL_VFE;
2451 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2452 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2453 for (i = 0; i < adapter->num_rx_queues; i++) {
2454 j = adapter->rx_ring[i].reg_idx;
2455 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2456 vlnctrl |= IXGBE_RXDCTL_VME;
2457 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2458 }
2459 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08002460 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2461}
2462
2463#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002464static void ixgbe_configure(struct ixgbe_adapter *adapter)
2465{
2466 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002467 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002468 int i;
2469
Christopher Leech2c5645c2008-08-26 04:27:02 -07002470 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002471
2472 ixgbe_restore_vlan(adapter);
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002473#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002474 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2475 netif_set_gso_max_size(netdev, 32768);
2476 ixgbe_configure_dcb(adapter);
2477 } else {
2478 netif_set_gso_max_size(netdev, 65536);
2479 }
2480#else
2481 netif_set_gso_max_size(netdev, 65536);
2482#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002483
Yi Zoueacd73f2009-05-13 13:11:06 +00002484#ifdef IXGBE_FCOE
2485 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2486 ixgbe_configure_fcoe(adapter);
2487
2488#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002489 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2490 for (i = 0; i < adapter->num_tx_queues; i++)
2491 adapter->tx_ring[i].atr_sample_rate =
2492 adapter->atr_sample_rate;
2493 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2494 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2495 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2496 }
2497
Auke Kok9a799d72007-09-15 14:07:45 -07002498 ixgbe_configure_tx(adapter);
2499 ixgbe_configure_rx(adapter);
2500 for (i = 0; i < adapter->num_rx_queues; i++)
2501 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002502 (adapter->rx_ring[i].count - 1));
Auke Kok9a799d72007-09-15 14:07:45 -07002503}
2504
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002505static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2506{
2507 switch (hw->phy.type) {
2508 case ixgbe_phy_sfp_avago:
2509 case ixgbe_phy_sfp_ftl:
2510 case ixgbe_phy_sfp_intel:
2511 case ixgbe_phy_sfp_unknown:
2512 case ixgbe_phy_tw_tyco:
2513 case ixgbe_phy_tw_unknown:
2514 return true;
2515 default:
2516 return false;
2517 }
2518}
2519
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002520/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002521 * ixgbe_sfp_link_config - set up SFP+ link
2522 * @adapter: pointer to private adapter struct
2523 **/
2524static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2525{
2526 struct ixgbe_hw *hw = &adapter->hw;
2527
2528 if (hw->phy.multispeed_fiber) {
2529 /*
2530 * In multispeed fiber setups, the device may not have
2531 * had a physical connection when the driver loaded.
2532 * If that's the case, the initial link configuration
2533 * couldn't get the MAC into 10G or 1G mode, so we'll
2534 * never have a link status change interrupt fire.
2535 * We need to try and force an autonegotiation
2536 * session, then bring up link.
2537 */
2538 hw->mac.ops.setup_sfp(hw);
2539 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2540 schedule_work(&adapter->multispeed_fiber_task);
2541 } else {
2542 /*
2543 * Direct Attach Cu and non-multispeed fiber modules
2544 * still need to be configured properly prior to
2545 * attempting link.
2546 */
2547 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2548 schedule_work(&adapter->sfp_config_module_task);
2549 }
2550}
2551
2552/**
2553 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002554 * @hw: pointer to private hardware struct
2555 *
2556 * Returns 0 on success, negative on failure
2557 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002558static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002559{
2560 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002561 bool negotiation, link_up = false;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002562 u32 ret = IXGBE_ERR_LINK_SETUP;
2563
2564 if (hw->mac.ops.check_link)
2565 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2566
2567 if (ret)
2568 goto link_cfg_out;
2569
2570 if (hw->mac.ops.get_link_capabilities)
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002571 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002572 if (ret)
2573 goto link_cfg_out;
2574
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002575 if (hw->mac.ops.setup_link)
2576 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002577link_cfg_out:
2578 return ret;
2579}
2580
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002581#define IXGBE_MAX_RX_DESC_POLL 10
2582static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2583 int rxr)
2584{
2585 int j = adapter->rx_ring[rxr].reg_idx;
2586 int k;
2587
2588 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2589 if (IXGBE_READ_REG(&adapter->hw,
2590 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2591 break;
2592 else
2593 msleep(1);
2594 }
2595 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2596 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2597 "not set within the polling period\n", rxr);
2598 }
2599 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2600 (adapter->rx_ring[rxr].count - 1));
2601}
2602
Auke Kok9a799d72007-09-15 14:07:45 -07002603static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2604{
2605 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07002606 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002607 int i, j = 0;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002608 int num_rx_rings = adapter->num_rx_queues;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002609 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002610 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002611 u32 txdctl, rxdctl, mhadd;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002612 u32 dmatxctl;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002613 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07002614
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002615 ixgbe_get_hw_control(adapter);
2616
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002617 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2618 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002619 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2620 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002621 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
Auke Kok9a799d72007-09-15 14:07:45 -07002622 } else {
2623 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002624 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002625 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002626 /* XXX: to interrupt immediately for EICS writes, enable this */
2627 /* gpie |= IXGBE_GPIE_EIMEN; */
2628 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2629 }
2630
2631 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2632 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2633 * specifically only auto mask tx and rx interrupts */
2634 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07002635 }
2636
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002637 /* Enable fan failure interrupt if media type is copper */
2638 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2639 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2640 gpie |= IXGBE_SDP1_GPIEN;
2641 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2642 }
2643
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002644 if (hw->mac.type == ixgbe_mac_82599EB) {
2645 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2646 gpie |= IXGBE_SDP1_GPIEN;
2647 gpie |= IXGBE_SDP2_GPIEN;
2648 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2649 }
2650
Yi Zou63f39bd2009-05-17 12:34:35 +00002651#ifdef IXGBE_FCOE
2652 /* adjust max frame to be able to do baby jumbo for FCoE */
Yi Zouf34c5c82009-08-14 12:42:17 +00002653 if ((netdev->features & NETIF_F_FCOE_MTU) &&
Yi Zou63f39bd2009-05-17 12:34:35 +00002654 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2655 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2656
2657#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07002658 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07002659 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2660 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2661 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2662
2663 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2664 }
2665
2666 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002667 j = adapter->tx_ring[i].reg_idx;
2668 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002669 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2670 txdctl |= (8 << 16);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002671 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2672 }
2673
2674 if (hw->mac.type == ixgbe_mac_82599EB) {
2675 /* DMATXCTL.EN must be set after all Tx queue config is done */
2676 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2677 dmatxctl |= IXGBE_DMATXCTL_TE;
2678 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2679 }
2680 for (i = 0; i < adapter->num_tx_queues; i++) {
2681 j = adapter->tx_ring[i].reg_idx;
2682 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Auke Kok9a799d72007-09-15 14:07:45 -07002683 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002684 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002685 }
2686
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002687 for (i = 0; i < num_rx_rings; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002688 j = adapter->rx_ring[i].reg_idx;
2689 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2690 /* enable PTHRESH=32 descriptors (half the internal cache)
2691 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2692 * this also removes a pesky rx_no_buffer_count increment */
2693 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07002694 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002695 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002696 if (hw->mac.type == ixgbe_mac_82599EB)
2697 ixgbe_rx_desc_queue_enable(adapter, i);
Auke Kok9a799d72007-09-15 14:07:45 -07002698 }
2699 /* enable all receives */
2700 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002701 if (hw->mac.type == ixgbe_mac_82598EB)
2702 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2703 else
2704 rxdctl |= IXGBE_RXCTRL_RXEN;
2705 hw->mac.ops.enable_rx_dma(hw, rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002706
2707 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2708 ixgbe_configure_msix(adapter);
2709 else
2710 ixgbe_configure_msi_and_legacy(adapter);
2711
2712 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002713 ixgbe_napi_enable_all(adapter);
2714
2715 /* clear any pending interrupts, may auto mask */
2716 IXGBE_READ_REG(hw, IXGBE_EICR);
2717
Auke Kok9a799d72007-09-15 14:07:45 -07002718 ixgbe_irq_enable(adapter);
2719
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002720 /*
Don Skidmorebf069c92009-05-07 10:39:54 +00002721 * If this adapter has a fan, check to see if we had a failure
2722 * before we enabled the interrupt.
2723 */
2724 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2725 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2726 if (esdp & IXGBE_ESDP_SDP1)
2727 DPRINTK(DRV, CRIT,
2728 "Fan has stopped, replace the adapter\n");
2729 }
2730
2731 /*
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002732 * For hot-pluggable SFP+ devices, a new SFP+ module may have
Don Skidmore19343de2009-07-02 12:50:31 +00002733 * arrived before interrupts were enabled but after probe. Such
2734 * devices wouldn't have their type identified yet. We need to
2735 * kick off the SFP+ module setup first, then try to bring up link.
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002736 * If we're not hot-pluggable SFP+, we just need to configure link
2737 * and bring it up.
2738 */
Don Skidmore19343de2009-07-02 12:50:31 +00002739 if (hw->phy.type == ixgbe_phy_unknown) {
2740 err = hw->phy.ops.identify(hw);
2741 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore5da43c12009-07-02 12:50:52 +00002742 /*
2743 * Take the device down and schedule the sfp tasklet
2744 * which will unregister_netdev and log it.
2745 */
Don Skidmore19343de2009-07-02 12:50:31 +00002746 ixgbe_down(adapter);
Don Skidmore5da43c12009-07-02 12:50:52 +00002747 schedule_work(&adapter->sfp_config_module_task);
Don Skidmore19343de2009-07-02 12:50:31 +00002748 return err;
2749 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002750 }
2751
2752 if (ixgbe_is_sfp(hw)) {
2753 ixgbe_sfp_link_config(adapter);
2754 } else {
2755 err = ixgbe_non_sfp_link_config(hw);
2756 if (err)
2757 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2758 }
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002759
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002760 for (i = 0; i < adapter->num_tx_queues; i++)
2761 set_bit(__IXGBE_FDIR_INIT_DONE,
2762 &(adapter->tx_ring[i].reinit_state));
2763
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08002764 /* enable transmits */
2765 netif_tx_start_all_queues(netdev);
2766
Auke Kok9a799d72007-09-15 14:07:45 -07002767 /* bring the link up in the watchdog, this could race with our first
2768 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002769 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2770 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07002771 mod_timer(&adapter->watchdog_timer, jiffies);
2772 return 0;
2773}
2774
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002775void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2776{
2777 WARN_ON(in_interrupt());
2778 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2779 msleep(1);
2780 ixgbe_down(adapter);
2781 ixgbe_up(adapter);
2782 clear_bit(__IXGBE_RESETTING, &adapter->state);
2783}
2784
Auke Kok9a799d72007-09-15 14:07:45 -07002785int ixgbe_up(struct ixgbe_adapter *adapter)
2786{
2787 /* hardware has been reset, we need to reload some things */
2788 ixgbe_configure(adapter);
2789
2790 return ixgbe_up_complete(adapter);
2791}
2792
2793void ixgbe_reset(struct ixgbe_adapter *adapter)
2794{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002795 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore8ca783a2009-05-26 20:40:47 -07002796 int err;
2797
2798 err = hw->mac.ops.init_hw(hw);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00002799 switch (err) {
2800 case 0:
2801 case IXGBE_ERR_SFP_NOT_PRESENT:
2802 break;
2803 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
2804 dev_err(&adapter->pdev->dev, "master disable timed out\n");
2805 break;
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00002806 case IXGBE_ERR_EEPROM_VERSION:
2807 /* We are running on a pre-production device, log a warning */
2808 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
2809 "adapter/LOM. Please be aware there may be issues "
2810 "associated with your hardware. If you are "
2811 "experiencing problems please contact your Intel or "
2812 "hardware representative who provided you with this "
2813 "hardware.\n");
2814 break;
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00002815 default:
2816 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
2817 }
Auke Kok9a799d72007-09-15 14:07:45 -07002818
2819 /* reprogram the RAR[0] in case user changed it. */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002820 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07002821}
2822
Auke Kok9a799d72007-09-15 14:07:45 -07002823/**
2824 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2825 * @adapter: board private structure
2826 * @rx_ring: ring to free buffers from
2827 **/
2828static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002829 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002830{
2831 struct pci_dev *pdev = adapter->pdev;
2832 unsigned long size;
2833 unsigned int i;
2834
2835 /* Free all the Rx ring sk_buffs */
2836
2837 for (i = 0; i < rx_ring->count; i++) {
2838 struct ixgbe_rx_buffer *rx_buffer_info;
2839
2840 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2841 if (rx_buffer_info->dma) {
2842 pci_unmap_single(pdev, rx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002843 rx_ring->rx_buf_len,
2844 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002845 rx_buffer_info->dma = 0;
2846 }
2847 if (rx_buffer_info->skb) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002848 struct sk_buff *skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07002849 rx_buffer_info->skb = NULL;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002850 do {
2851 struct sk_buff *this = skb;
2852 skb = skb->prev;
2853 dev_kfree_skb(this);
2854 } while (skb);
Auke Kok9a799d72007-09-15 14:07:45 -07002855 }
2856 if (!rx_buffer_info->page)
2857 continue;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00002858 if (rx_buffer_info->page_dma) {
2859 pci_unmap_page(pdev, rx_buffer_info->page_dma,
2860 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
2861 rx_buffer_info->page_dma = 0;
2862 }
Auke Kok9a799d72007-09-15 14:07:45 -07002863 put_page(rx_buffer_info->page);
2864 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002865 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002866 }
2867
2868 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2869 memset(rx_ring->rx_buffer_info, 0, size);
2870
2871 /* Zero out the descriptor ring */
2872 memset(rx_ring->desc, 0, rx_ring->size);
2873
2874 rx_ring->next_to_clean = 0;
2875 rx_ring->next_to_use = 0;
2876
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002877 if (rx_ring->head)
2878 writel(0, adapter->hw.hw_addr + rx_ring->head);
2879 if (rx_ring->tail)
2880 writel(0, adapter->hw.hw_addr + rx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002881}
2882
2883/**
2884 * ixgbe_clean_tx_ring - Free Tx Buffers
2885 * @adapter: board private structure
2886 * @tx_ring: ring to be cleaned
2887 **/
2888static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002889 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002890{
2891 struct ixgbe_tx_buffer *tx_buffer_info;
2892 unsigned long size;
2893 unsigned int i;
2894
2895 /* Free all the Tx ring sk_buffs */
2896
2897 for (i = 0; i < tx_ring->count; i++) {
2898 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2899 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2900 }
2901
2902 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2903 memset(tx_ring->tx_buffer_info, 0, size);
2904
2905 /* Zero out the descriptor ring */
2906 memset(tx_ring->desc, 0, tx_ring->size);
2907
2908 tx_ring->next_to_use = 0;
2909 tx_ring->next_to_clean = 0;
2910
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002911 if (tx_ring->head)
2912 writel(0, adapter->hw.hw_addr + tx_ring->head);
2913 if (tx_ring->tail)
2914 writel(0, adapter->hw.hw_addr + tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002915}
2916
2917/**
Auke Kok9a799d72007-09-15 14:07:45 -07002918 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2919 * @adapter: board private structure
2920 **/
2921static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2922{
2923 int i;
2924
2925 for (i = 0; i < adapter->num_rx_queues; i++)
2926 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2927}
2928
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002929/**
2930 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2931 * @adapter: board private structure
2932 **/
2933static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2934{
2935 int i;
2936
2937 for (i = 0; i < adapter->num_tx_queues; i++)
2938 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2939}
2940
Auke Kok9a799d72007-09-15 14:07:45 -07002941void ixgbe_down(struct ixgbe_adapter *adapter)
2942{
2943 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002944 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002945 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002946 u32 txdctl;
2947 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002948
2949 /* signal that we are down to the interrupt handler */
2950 set_bit(__IXGBE_DOWN, &adapter->state);
2951
2952 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002953 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2954 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07002955
2956 netif_tx_disable(netdev);
2957
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002958 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07002959 msleep(10);
2960
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002961 netif_tx_stop_all_queues(netdev);
2962
Auke Kok9a799d72007-09-15 14:07:45 -07002963 ixgbe_irq_disable(adapter);
2964
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002965 ixgbe_napi_disable_all(adapter);
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002966
Don Skidmore0a1f87c2009-09-18 09:45:43 +00002967 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
2968 del_timer_sync(&adapter->sfp_timer);
Auke Kok9a799d72007-09-15 14:07:45 -07002969 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002970 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07002971
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002972 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2973 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2974 cancel_work_sync(&adapter->fdir_reinit_task);
2975
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002976 /* disable transmits in the hardware now that interrupts are off */
2977 for (i = 0; i < adapter->num_tx_queues; i++) {
2978 j = adapter->tx_ring[i].reg_idx;
2979 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2980 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2981 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2982 }
PJ Waskiewicz88512532009-03-13 22:15:10 +00002983 /* Disable the Tx DMA engine on 82599 */
2984 if (hw->mac.type == ixgbe_mac_82599EB)
2985 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
2986 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
2987 ~IXGBE_DMATXCTL_TE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002988
Auke Kok9a799d72007-09-15 14:07:45 -07002989 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002990
Paul Larson6f4a0e42008-06-24 17:00:56 -07002991 if (!pci_channel_offline(adapter->pdev))
2992 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002993 ixgbe_clean_all_tx_rings(adapter);
2994 ixgbe_clean_all_rx_rings(adapter);
2995
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002996#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002997 /* since we reset the hardware DCA settings were cleared */
Alexander Duycke35ec122009-05-21 13:07:12 +00002998 ixgbe_setup_dca(adapter);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002999#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003000}
3001
Auke Kok9a799d72007-09-15 14:07:45 -07003002/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003003 * ixgbe_poll - NAPI Rx polling callback
3004 * @napi: structure for representing this polling device
3005 * @budget: how many packets driver is allowed to clean
3006 *
3007 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07003008 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003009static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07003010{
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003011 struct ixgbe_q_vector *q_vector =
3012 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003013 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003014 int tx_clean_complete, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003015
Jeff Garzik5dd2d332008-10-16 05:09:31 -04003016#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003017 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3018 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
3019 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
3020 }
3021#endif
3022
Alexander Duyckfe49f042009-06-04 16:00:09 +00003023 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
Herbert Xu78b6f4c2009-01-18 21:49:45 -08003024 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07003025
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003026 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003027 work_done = budget;
3028
David S. Miller53e52c72008-01-07 21:06:12 -08003029 /* If budget not fully consumed, exit the polling mode */
3030 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08003031 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003032 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08003033 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003034 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00003035 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07003036 }
Auke Kok9a799d72007-09-15 14:07:45 -07003037 return work_done;
3038}
3039
3040/**
3041 * ixgbe_tx_timeout - Respond to a Tx Hang
3042 * @netdev: network interface device structure
3043 **/
3044static void ixgbe_tx_timeout(struct net_device *netdev)
3045{
3046 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3047
3048 /* Do the reset outside of interrupt context */
3049 schedule_work(&adapter->reset_task);
3050}
3051
3052static void ixgbe_reset_task(struct work_struct *work)
3053{
3054 struct ixgbe_adapter *adapter;
3055 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3056
Alexander Duyck2f90b862008-11-20 20:52:10 -08003057 /* If we're already down or resetting, just bail */
3058 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3059 test_bit(__IXGBE_RESETTING, &adapter->state))
3060 return;
3061
Auke Kok9a799d72007-09-15 14:07:45 -07003062 adapter->tx_timeout_count++;
3063
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003064 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003065}
3066
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003067#ifdef CONFIG_IXGBE_DCB
3068static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003069{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003070 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003071 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003072
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003073 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3074 return ret;
3075
3076 f->mask = 0x7 << 3;
3077 adapter->num_rx_queues = f->indices;
3078 adapter->num_tx_queues = f->indices;
3079 ret = true;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003080
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003081 return ret;
3082}
3083#endif
3084
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003085/**
3086 * ixgbe_set_rss_queues: Allocate queues for RSS
3087 * @adapter: board private structure to initialize
3088 *
3089 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3090 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3091 *
3092 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003093static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3094{
3095 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003096 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003097
3098 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003099 f->mask = 0xF;
3100 adapter->num_rx_queues = f->indices;
3101 adapter->num_tx_queues = f->indices;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003102 ret = true;
3103 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003104 ret = false;
3105 }
3106
3107 return ret;
3108}
3109
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003110/**
3111 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3112 * @adapter: board private structure to initialize
3113 *
3114 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3115 * to the original CPU that initiated the Tx session. This runs in addition
3116 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3117 * Rx load across CPUs using RSS.
3118 *
3119 **/
3120static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3121{
3122 bool ret = false;
3123 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3124
3125 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3126 f_fdir->mask = 0;
3127
3128 /* Flow Director must have RSS enabled */
3129 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3130 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3131 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3132 adapter->num_tx_queues = f_fdir->indices;
3133 adapter->num_rx_queues = f_fdir->indices;
3134 ret = true;
3135 } else {
3136 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3137 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3138 }
3139 return ret;
3140}
3141
Yi Zou0331a832009-05-17 12:33:52 +00003142#ifdef IXGBE_FCOE
3143/**
3144 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
3145 * @adapter: board private structure to initialize
3146 *
3147 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3148 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3149 * rx queues out of the max number of rx queues, instead, it is used as the
3150 * index of the first rx queue used by FCoE.
3151 *
3152 **/
3153static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3154{
3155 bool ret = false;
3156 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3157
3158 f->indices = min((int)num_online_cpus(), f->indices);
3159 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003160 adapter->num_rx_queues = 1;
3161 adapter->num_tx_queues = 1;
Yi Zou0331a832009-05-17 12:33:52 +00003162#ifdef CONFIG_IXGBE_DCB
3163 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003164 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
Yi Zou0331a832009-05-17 12:33:52 +00003165 ixgbe_set_dcb_queues(adapter);
3166 }
3167#endif
3168 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003169 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
Yi Zou8faa2a72009-07-09 02:29:50 +00003170 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3171 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3172 ixgbe_set_fdir_queues(adapter);
3173 else
3174 ixgbe_set_rss_queues(adapter);
Yi Zou0331a832009-05-17 12:33:52 +00003175 }
3176 /* adding FCoE rx rings to the end */
3177 f->mask = adapter->num_rx_queues;
3178 adapter->num_rx_queues += f->indices;
Yi Zou8de8b2e2009-09-03 14:55:50 +00003179 adapter->num_tx_queues += f->indices;
Yi Zou0331a832009-05-17 12:33:52 +00003180
3181 ret = true;
3182 }
3183
3184 return ret;
3185}
3186
3187#endif /* IXGBE_FCOE */
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003188/*
3189 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3190 * @adapter: board private structure to initialize
3191 *
3192 * This is the top level queue allocation routine. The order here is very
3193 * important, starting with the "most" number of features turned on at once,
3194 * and ending with the smallest set of features. This way large combinations
3195 * can be allocated if they're turned on, and smaller combinations are the
3196 * fallthrough conditions.
3197 *
3198 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003199static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3200{
Yi Zou0331a832009-05-17 12:33:52 +00003201#ifdef IXGBE_FCOE
3202 if (ixgbe_set_fcoe_queues(adapter))
3203 goto done;
3204
3205#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003206#ifdef CONFIG_IXGBE_DCB
3207 if (ixgbe_set_dcb_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07003208 goto done;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003209
3210#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003211 if (ixgbe_set_fdir_queues(adapter))
3212 goto done;
3213
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003214 if (ixgbe_set_rss_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07003215 goto done;
3216
3217 /* fallback to base case */
3218 adapter->num_rx_queues = 1;
3219 adapter->num_tx_queues = 1;
3220
3221done:
3222 /* Notify the stack of the (possibly) reduced Tx Queue count. */
3223 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003224}
3225
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003226static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003227 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003228{
3229 int err, vector_threshold;
3230
3231 /* We'll want at least 3 (vector_threshold):
3232 * 1) TxQ[0] Cleanup
3233 * 2) RxQ[0] Cleanup
3234 * 3) Other (Link Status Change, etc.)
3235 * 4) TCP Timer (optional)
3236 */
3237 vector_threshold = MIN_MSIX_COUNT;
3238
3239 /* The more we get, the more we will assign to Tx/Rx Cleanup
3240 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
3241 * Right now, we simply care about how many we'll get; we'll
3242 * set them up later while requesting irq's.
3243 */
3244 while (vectors >= vector_threshold) {
3245 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003246 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003247 if (!err) /* Success in acquiring all requested vectors. */
3248 break;
3249 else if (err < 0)
3250 vectors = 0; /* Nasty failure, quit now */
3251 else /* err == number of vectors we should try again with */
3252 vectors = err;
3253 }
3254
3255 if (vectors < vector_threshold) {
3256 /* Can't allocate enough MSI-X interrupts? Oh well.
3257 * This just means we'll go with either a single MSI
3258 * vector or fall back to legacy interrupts.
3259 */
3260 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
3261 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3262 kfree(adapter->msix_entries);
3263 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003264 } else {
3265 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
Peter P Waskiewicz Jreb7f1392009-02-01 01:18:58 -08003266 /*
3267 * Adjust for only the vectors we'll use, which is minimum
3268 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
3269 * vectors we were allocated.
3270 */
3271 adapter->num_msix_vectors = min(vectors,
3272 adapter->max_msix_q_vectors + NON_Q_VECTORS);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003273 }
3274}
3275
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003276/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003277 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003278 * @adapter: board private structure to initialize
3279 *
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003280 * Cache the descriptor ring offsets for RSS to the assigned rings.
3281 *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003282 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003283static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003284{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003285 int i;
3286 bool ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003287
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003288 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3289 for (i = 0; i < adapter->num_rx_queues; i++)
3290 adapter->rx_ring[i].reg_idx = i;
3291 for (i = 0; i < adapter->num_tx_queues; i++)
3292 adapter->tx_ring[i].reg_idx = i;
3293 ret = true;
3294 } else {
3295 ret = false;
3296 }
3297
3298 return ret;
3299}
3300
3301#ifdef CONFIG_IXGBE_DCB
3302/**
3303 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
3304 * @adapter: board private structure to initialize
3305 *
3306 * Cache the descriptor ring offsets for DCB to the assigned rings.
3307 *
3308 **/
3309static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3310{
3311 int i;
3312 bool ret = false;
3313 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
3314
3315 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3316 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08003317 /* the number of queues is assumed to be symmetric */
3318 for (i = 0; i < dcb_i; i++) {
3319 adapter->rx_ring[i].reg_idx = i << 3;
3320 adapter->tx_ring[i].reg_idx = i << 2;
3321 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003322 ret = true;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003323 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00003324 if (dcb_i == 8) {
3325 /*
3326 * Tx TC0 starts at: descriptor queue 0
3327 * Tx TC1 starts at: descriptor queue 32
3328 * Tx TC2 starts at: descriptor queue 64
3329 * Tx TC3 starts at: descriptor queue 80
3330 * Tx TC4 starts at: descriptor queue 96
3331 * Tx TC5 starts at: descriptor queue 104
3332 * Tx TC6 starts at: descriptor queue 112
3333 * Tx TC7 starts at: descriptor queue 120
3334 *
3335 * Rx TC0-TC7 are offset by 16 queues each
3336 */
3337 for (i = 0; i < 3; i++) {
3338 adapter->tx_ring[i].reg_idx = i << 5;
3339 adapter->rx_ring[i].reg_idx = i << 4;
3340 }
3341 for ( ; i < 5; i++) {
3342 adapter->tx_ring[i].reg_idx =
3343 ((i + 2) << 4);
3344 adapter->rx_ring[i].reg_idx = i << 4;
3345 }
3346 for ( ; i < dcb_i; i++) {
3347 adapter->tx_ring[i].reg_idx =
3348 ((i + 8) << 3);
3349 adapter->rx_ring[i].reg_idx = i << 4;
3350 }
3351
3352 ret = true;
3353 } else if (dcb_i == 4) {
3354 /*
3355 * Tx TC0 starts at: descriptor queue 0
3356 * Tx TC1 starts at: descriptor queue 64
3357 * Tx TC2 starts at: descriptor queue 96
3358 * Tx TC3 starts at: descriptor queue 112
3359 *
3360 * Rx TC0-TC3 are offset by 32 queues each
3361 */
3362 adapter->tx_ring[0].reg_idx = 0;
3363 adapter->tx_ring[1].reg_idx = 64;
3364 adapter->tx_ring[2].reg_idx = 96;
3365 adapter->tx_ring[3].reg_idx = 112;
3366 for (i = 0 ; i < dcb_i; i++)
3367 adapter->rx_ring[i].reg_idx = i << 5;
3368
3369 ret = true;
3370 } else {
3371 ret = false;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003372 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003373 } else {
3374 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003375 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003376 } else {
3377 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003378 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003379
3380 return ret;
3381}
3382#endif
3383
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003384/**
3385 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3386 * @adapter: board private structure to initialize
3387 *
3388 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3389 *
3390 **/
3391static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3392{
3393 int i;
3394 bool ret = false;
3395
3396 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3397 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3398 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3399 for (i = 0; i < adapter->num_rx_queues; i++)
3400 adapter->rx_ring[i].reg_idx = i;
3401 for (i = 0; i < adapter->num_tx_queues; i++)
3402 adapter->tx_ring[i].reg_idx = i;
3403 ret = true;
3404 }
3405
3406 return ret;
3407}
3408
Yi Zou0331a832009-05-17 12:33:52 +00003409#ifdef IXGBE_FCOE
3410/**
3411 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
3412 * @adapter: board private structure to initialize
3413 *
3414 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
3415 *
3416 */
3417static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3418{
Yi Zou8de8b2e2009-09-03 14:55:50 +00003419 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
Yi Zou0331a832009-05-17 12:33:52 +00003420 bool ret = false;
3421 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3422
3423 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3424#ifdef CONFIG_IXGBE_DCB
3425 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003426 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
3427
Yi Zou0331a832009-05-17 12:33:52 +00003428 ixgbe_cache_ring_dcb(adapter);
Yi Zou8de8b2e2009-09-03 14:55:50 +00003429 /* find out queues in TC for FCoE */
3430 fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
3431 fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
3432 /*
3433 * In 82599, the number of Tx queues for each traffic
3434 * class for both 8-TC and 4-TC modes are:
3435 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
3436 * 8 TCs: 32 32 16 16 8 8 8 8
3437 * 4 TCs: 64 64 32 32
3438 * We have max 8 queues for FCoE, where 8 the is
3439 * FCoE redirection table size. If TC for FCoE is
3440 * less than or equal to TC3, we have enough queues
3441 * to add max of 8 queues for FCoE, so we start FCoE
3442 * tx descriptor from the next one, i.e., reg_idx + 1.
3443 * If TC for FCoE is above TC3, implying 8 TC mode,
3444 * and we need 8 for FCoE, we have to take all queues
3445 * in that traffic class for FCoE.
3446 */
3447 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
3448 fcoe_tx_i--;
Yi Zou0331a832009-05-17 12:33:52 +00003449 }
3450#endif /* CONFIG_IXGBE_DCB */
3451 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8faa2a72009-07-09 02:29:50 +00003452 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3453 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3454 ixgbe_cache_ring_fdir(adapter);
3455 else
3456 ixgbe_cache_ring_rss(adapter);
3457
Yi Zou8de8b2e2009-09-03 14:55:50 +00003458 fcoe_rx_i = f->mask;
3459 fcoe_tx_i = f->mask;
Yi Zou0331a832009-05-17 12:33:52 +00003460 }
Yi Zou8de8b2e2009-09-03 14:55:50 +00003461 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
3462 adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
3463 adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
3464 }
Yi Zou0331a832009-05-17 12:33:52 +00003465 ret = true;
3466 }
3467 return ret;
3468}
3469
3470#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003471/**
3472 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3473 * @adapter: board private structure to initialize
3474 *
3475 * Once we know the feature-set enabled for the device, we'll cache
3476 * the register offset the descriptor ring is assigned to.
3477 *
3478 * Note, the order the various feature calls is important. It must start with
3479 * the "most" features enabled at the same time, then trickle down to the
3480 * least amount of features turned on at once.
3481 **/
3482static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3483{
3484 /* start with default case */
3485 adapter->rx_ring[0].reg_idx = 0;
3486 adapter->tx_ring[0].reg_idx = 0;
3487
Yi Zou0331a832009-05-17 12:33:52 +00003488#ifdef IXGBE_FCOE
3489 if (ixgbe_cache_ring_fcoe(adapter))
3490 return;
3491
3492#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003493#ifdef CONFIG_IXGBE_DCB
3494 if (ixgbe_cache_ring_dcb(adapter))
3495 return;
3496
3497#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003498 if (ixgbe_cache_ring_fdir(adapter))
3499 return;
3500
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003501 if (ixgbe_cache_ring_rss(adapter))
3502 return;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003503}
3504
Auke Kok9a799d72007-09-15 14:07:45 -07003505/**
3506 * ixgbe_alloc_queues - Allocate memory for all rings
3507 * @adapter: board private structure to initialize
3508 *
3509 * We allocate one ring per queue at run-time since we don't know the
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003510 * number of queues at compile-time. The polling_netdev array is
3511 * intended for Multiqueue, but should work fine with a single queue.
Auke Kok9a799d72007-09-15 14:07:45 -07003512 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003513static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003514{
3515 int i;
3516
3517 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003518 sizeof(struct ixgbe_ring), GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07003519 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003520 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07003521
3522 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003523 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003524 if (!adapter->rx_ring)
3525 goto err_rx_ring_allocation;
3526
3527 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003528 adapter->tx_ring[i].count = adapter->tx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003529 adapter->tx_ring[i].queue_index = i;
3530 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003531
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003532 for (i = 0; i < adapter->num_rx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003533 adapter->rx_ring[i].count = adapter->rx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003534 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07003535 }
3536
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003537 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003538
3539 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003540
3541err_rx_ring_allocation:
3542 kfree(adapter->tx_ring);
3543err_tx_ring_allocation:
3544 return -ENOMEM;
3545}
3546
3547/**
3548 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3549 * @adapter: board private structure to initialize
3550 *
3551 * Attempt to configure the interrupts using the best available
3552 * capabilities of the hardware and the kernel.
3553 **/
Al Virofeea6a52008-11-27 15:34:07 -08003554static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003555{
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003556 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003557 int err = 0;
3558 int vector, v_budget;
3559
3560 /*
3561 * It's easy to be greedy for MSI-X vectors, but it really
3562 * doesn't do us much good if we have a lot more vectors
3563 * than CPU's. So let's be conservative and only ask for
3564 * (roughly) twice the number of vectors as there are CPU's.
3565 */
3566 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003567 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003568
3569 /*
3570 * At the same time, hardware can only support a maximum of
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003571 * hw.mac->max_msix_vectors vectors. With features
3572 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3573 * descriptor queues supported by our device. Thus, we cap it off in
3574 * those rare cases where the cpu count also exceeds our vector limit.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003575 */
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003576 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003577
3578 /* A failure in MSI-X entry allocation isn't fatal, but it does
3579 * mean we disable MSI-X capabilities of the adapter. */
3580 adapter->msix_entries = kcalloc(v_budget,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003581 sizeof(struct msix_entry), GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003582 if (adapter->msix_entries) {
3583 for (vector = 0; vector < v_budget; vector++)
3584 adapter->msix_entries[vector].entry = vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003585
Alexander Duyck7a921c92009-05-06 10:43:28 +00003586 ixgbe_acquire_msix_vectors(adapter, v_budget);
3587
3588 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3589 goto out;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003590 }
3591
Alexander Duyck7a921c92009-05-06 10:43:28 +00003592 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3593 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003594 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3595 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3596 adapter->atr_sample_rate = 0;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003597 ixgbe_set_num_queues(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003598
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003599 err = pci_enable_msi(adapter->pdev);
3600 if (!err) {
3601 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
3602 } else {
3603 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003604 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003605 /* reset err */
3606 err = 0;
3607 }
3608
3609out:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003610 return err;
3611}
3612
Alexander Duyck7a921c92009-05-06 10:43:28 +00003613/**
3614 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
3615 * @adapter: board private structure to initialize
3616 *
3617 * We allocate one q_vector per queue interrupt. If allocation fails we
3618 * return -ENOMEM.
3619 **/
3620static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3621{
3622 int q_idx, num_q_vectors;
3623 struct ixgbe_q_vector *q_vector;
3624 int napi_vectors;
3625 int (*poll)(struct napi_struct *, int);
3626
3627 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3628 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3629 napi_vectors = adapter->num_rx_queues;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003630 poll = &ixgbe_clean_rxtx_many;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003631 } else {
3632 num_q_vectors = 1;
3633 napi_vectors = 1;
3634 poll = &ixgbe_poll;
3635 }
3636
3637 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3638 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
3639 if (!q_vector)
3640 goto err_out;
3641 q_vector->adapter = adapter;
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003642 if (q_vector->txr_count && !q_vector->rxr_count)
3643 q_vector->eitr = adapter->tx_eitr_param;
3644 else
3645 q_vector->eitr = adapter->rx_eitr_param;
Alexander Duyckfe49f042009-06-04 16:00:09 +00003646 q_vector->v_idx = q_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003647 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003648 adapter->q_vector[q_idx] = q_vector;
3649 }
3650
3651 return 0;
3652
3653err_out:
3654 while (q_idx) {
3655 q_idx--;
3656 q_vector = adapter->q_vector[q_idx];
3657 netif_napi_del(&q_vector->napi);
3658 kfree(q_vector);
3659 adapter->q_vector[q_idx] = NULL;
3660 }
3661 return -ENOMEM;
3662}
3663
3664/**
3665 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
3666 * @adapter: board private structure to initialize
3667 *
3668 * This function frees the memory allocated to the q_vectors. In addition if
3669 * NAPI is enabled it will delete any references to the NAPI struct prior
3670 * to freeing the q_vector.
3671 **/
3672static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3673{
3674 int q_idx, num_q_vectors;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003675
Alexander Duyck91281fd2009-06-04 16:00:27 +00003676 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
Alexander Duyck7a921c92009-05-06 10:43:28 +00003677 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003678 else
Alexander Duyck7a921c92009-05-06 10:43:28 +00003679 num_q_vectors = 1;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003680
3681 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3682 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00003683 adapter->q_vector[q_idx] = NULL;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003684 netif_napi_del(&q_vector->napi);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003685 kfree(q_vector);
3686 }
3687}
3688
Don Skidmore7b25cdb2009-08-25 04:47:32 +00003689static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003690{
3691 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3692 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3693 pci_disable_msix(adapter->pdev);
3694 kfree(adapter->msix_entries);
3695 adapter->msix_entries = NULL;
3696 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
3697 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
3698 pci_disable_msi(adapter->pdev);
3699 }
3700 return;
3701}
3702
3703/**
3704 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
3705 * @adapter: board private structure to initialize
3706 *
3707 * We determine which interrupt scheme to use based on...
3708 * - Kernel support (MSI, MSI-X)
3709 * - which can be user-defined (via MODULE_PARAM)
3710 * - Hardware queue count (num_*_queues)
3711 * - defined by miscellaneous hardware support/features (RSS, etc.)
3712 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003713int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003714{
3715 int err;
3716
3717 /* Number of supported queues */
3718 ixgbe_set_num_queues(adapter);
3719
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003720 err = ixgbe_set_interrupt_capability(adapter);
3721 if (err) {
3722 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
3723 goto err_set_interrupt;
3724 }
3725
Alexander Duyck7a921c92009-05-06 10:43:28 +00003726 err = ixgbe_alloc_q_vectors(adapter);
3727 if (err) {
3728 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
3729 "vectors\n");
3730 goto err_alloc_q_vectors;
3731 }
3732
3733 err = ixgbe_alloc_queues(adapter);
3734 if (err) {
3735 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
3736 goto err_alloc_queues;
3737 }
3738
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003739 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003740 "Tx Queue count = %u\n",
3741 (adapter->num_rx_queues > 1) ? "Enabled" :
3742 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003743
3744 set_bit(__IXGBE_DOWN, &adapter->state);
3745
3746 return 0;
3747
Alexander Duyck7a921c92009-05-06 10:43:28 +00003748err_alloc_queues:
3749 ixgbe_free_q_vectors(adapter);
3750err_alloc_q_vectors:
3751 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003752err_set_interrupt:
Alexander Duyck7a921c92009-05-06 10:43:28 +00003753 return err;
3754}
3755
3756/**
3757 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
3758 * @adapter: board private structure to clear interrupt scheme on
3759 *
3760 * We go through and clear interrupt specific resources and reset the structure
3761 * to pre-load conditions
3762 **/
3763void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3764{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003765 kfree(adapter->tx_ring);
3766 kfree(adapter->rx_ring);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003767 adapter->tx_ring = NULL;
3768 adapter->rx_ring = NULL;
3769
3770 ixgbe_free_q_vectors(adapter);
3771 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003772}
3773
3774/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08003775 * ixgbe_sfp_timer - worker thread to find a missing module
3776 * @data: pointer to our adapter struct
3777 **/
3778static void ixgbe_sfp_timer(unsigned long data)
3779{
3780 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3781
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003782 /*
3783 * Do the sfp_timer outside of interrupt context due to the
Donald Skidmorec4900be2008-11-20 21:11:42 -08003784 * delays that sfp+ detection requires
3785 */
3786 schedule_work(&adapter->sfp_task);
3787}
3788
3789/**
3790 * ixgbe_sfp_task - worker thread to find a missing module
3791 * @work: pointer to work_struct containing our data
3792 **/
3793static void ixgbe_sfp_task(struct work_struct *work)
3794{
3795 struct ixgbe_adapter *adapter = container_of(work,
3796 struct ixgbe_adapter,
3797 sfp_task);
3798 struct ixgbe_hw *hw = &adapter->hw;
3799
3800 if ((hw->phy.type == ixgbe_phy_nl) &&
3801 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3802 s32 ret = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00003803 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
Donald Skidmorec4900be2008-11-20 21:11:42 -08003804 goto reschedule;
3805 ret = hw->phy.ops.reset(hw);
3806 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00003807 dev_err(&adapter->pdev->dev, "failed to initialize "
3808 "because an unsupported SFP+ module type "
3809 "was detected.\n"
3810 "Reload the driver after installing a "
3811 "supported module.\n");
Donald Skidmorec4900be2008-11-20 21:11:42 -08003812 unregister_netdev(adapter->netdev);
3813 } else {
3814 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
3815 hw->phy.sfp_type);
3816 }
3817 /* don't need this routine any more */
3818 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3819 }
3820 return;
3821reschedule:
3822 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
3823 mod_timer(&adapter->sfp_timer,
3824 round_jiffies(jiffies + (2 * HZ)));
3825}
3826
3827/**
Auke Kok9a799d72007-09-15 14:07:45 -07003828 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
3829 * @adapter: board private structure to initialize
3830 *
3831 * ixgbe_sw_init initializes the Adapter private data structure.
3832 * Fields are initialized based on PCI device information and
3833 * OS network device settings (MTU size).
3834 **/
3835static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3836{
3837 struct ixgbe_hw *hw = &adapter->hw;
3838 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003839 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003840#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003841 int j;
3842 struct tc_configuration *tc;
3843#endif
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003844
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003845 /* PCI config space info */
3846
3847 hw->vendor_id = pdev->vendor;
3848 hw->device_id = pdev->device;
3849 hw->revision_id = pdev->revision;
3850 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3851 hw->subsystem_device_id = pdev->subsystem_device;
3852
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003853 /* Set capability flags */
3854 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
3855 adapter->ring_feature[RING_F_RSS].indices = rss;
3856 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003857 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
Don Skidmorebf069c92009-05-07 10:39:54 +00003858 if (hw->mac.type == ixgbe_mac_82598EB) {
3859 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3860 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003861 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
Don Skidmorebf069c92009-05-07 10:39:54 +00003862 } else if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003863 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00003864 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3865 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003866 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3867 adapter->ring_feature[RING_F_FDIR].indices =
3868 IXGBE_MAX_FDIR_INDICES;
3869 adapter->atr_sample_rate = 20;
3870 adapter->fdir_pballoc = 0;
Yi Zoueacd73f2009-05-13 13:11:06 +00003871#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00003872 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
3873 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
3874 adapter->ring_feature[RING_F_FCOE].indices = 0;
Yi Zou6ee16522009-08-31 12:34:28 +00003875 /* Default traffic class to use for FCoE */
3876 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
Yi Zoueacd73f2009-05-13 13:11:06 +00003877#endif /* IXGBE_FCOE */
Alexander Duyckf8212f92009-04-27 22:42:37 +00003878 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08003879
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003880#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003881 /* Configure DCB traffic classes */
3882 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
3883 tc = &adapter->dcb_cfg.tc_config[j];
3884 tc->path[DCB_TX_CONFIG].bwg_id = 0;
3885 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
3886 tc->path[DCB_RX_CONFIG].bwg_id = 0;
3887 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
3888 tc->dcb_pfc = pfc_disabled;
3889 }
3890 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
3891 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
3892 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00003893 adapter->dcb_cfg.pfc_mode_enable = false;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003894 adapter->dcb_cfg.round_robin_enable = false;
3895 adapter->dcb_set_bitmap = 0x00;
3896 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
3897 adapter->ring_feature[RING_F_DCB].indices);
3898
3899#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003900
3901 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00003902 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00003903 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00003904#ifdef CONFIG_DCB
3905 adapter->last_lfc_mode = hw->fc.current_mode;
3906#endif
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07003907 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3908 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3909 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3910 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00003911 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07003912
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003913 /* enable itr by default in dynamic mode */
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003914 adapter->rx_itr_setting = 1;
3915 adapter->rx_eitr_param = 20000;
3916 adapter->tx_itr_setting = 1;
3917 adapter->tx_eitr_param = 10000;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003918
3919 /* set defaults for eitr in MegaBytes */
3920 adapter->eitr_low = 10;
3921 adapter->eitr_high = 20;
3922
3923 /* set default ring sizes */
3924 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
3925 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
3926
Auke Kok9a799d72007-09-15 14:07:45 -07003927 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003928 if (ixgbe_init_eeprom_params_generic(hw)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003929 dev_err(&pdev->dev, "EEPROM initialization failed\n");
3930 return -EIO;
3931 }
3932
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003933 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07003934 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3935
Auke Kok9a799d72007-09-15 14:07:45 -07003936 set_bit(__IXGBE_DOWN, &adapter->state);
3937
3938 return 0;
3939}
3940
3941/**
3942 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3943 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003944 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07003945 *
3946 * Return 0 on success, negative on failure
3947 **/
3948int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003949 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003950{
3951 struct pci_dev *pdev = adapter->pdev;
3952 int size;
3953
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003954 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3955 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003956 if (!tx_ring->tx_buffer_info)
3957 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003958 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07003959
3960 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08003961 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003962 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07003963
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003964 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
3965 &tx_ring->dma);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003966 if (!tx_ring->desc)
3967 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07003968
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003969 tx_ring->next_to_use = 0;
3970 tx_ring->next_to_clean = 0;
3971 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07003972 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003973
3974err:
3975 vfree(tx_ring->tx_buffer_info);
3976 tx_ring->tx_buffer_info = NULL;
3977 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
3978 "descriptor ring\n");
3979 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07003980}
3981
3982/**
Alexander Duyck69888672008-09-11 20:05:39 -07003983 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
3984 * @adapter: board private structure
3985 *
3986 * If this function returns with an error, then it's possible one or
3987 * more of the rings is populated (while the rest are not). It is the
3988 * callers duty to clean those orphaned rings.
3989 *
3990 * Return 0 on success, negative on failure
3991 **/
3992static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
3993{
3994 int i, err = 0;
3995
3996 for (i = 0; i < adapter->num_tx_queues; i++) {
3997 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
3998 if (!err)
3999 continue;
4000 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
4001 break;
4002 }
4003
4004 return err;
4005}
4006
4007/**
Auke Kok9a799d72007-09-15 14:07:45 -07004008 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
4009 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004010 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07004011 *
4012 * Returns 0 on success, negative on failure
4013 **/
4014int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004015 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004016{
4017 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004018 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07004019
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004020 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4021 rx_ring->rx_buffer_info = vmalloc(size);
4022 if (!rx_ring->rx_buffer_info) {
Auke Kok9a799d72007-09-15 14:07:45 -07004023 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004024 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004025 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07004026 }
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004027 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07004028
Auke Kok9a799d72007-09-15 14:07:45 -07004029 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004030 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4031 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004032
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004033 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07004034
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004035 if (!rx_ring->desc) {
Auke Kok9a799d72007-09-15 14:07:45 -07004036 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004037 "Memory allocation failed for the rx desc ring\n");
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004038 vfree(rx_ring->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004039 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07004040 }
4041
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004042 rx_ring->next_to_clean = 0;
4043 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004044
4045 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004046
4047alloc_failed:
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004048 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07004049}
4050
4051/**
Alexander Duyck69888672008-09-11 20:05:39 -07004052 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
4053 * @adapter: board private structure
4054 *
4055 * If this function returns with an error, then it's possible one or
4056 * more of the rings is populated (while the rest are not). It is the
4057 * callers duty to clean those orphaned rings.
4058 *
4059 * Return 0 on success, negative on failure
4060 **/
4061
4062static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4063{
4064 int i, err = 0;
4065
4066 for (i = 0; i < adapter->num_rx_queues; i++) {
4067 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
4068 if (!err)
4069 continue;
4070 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
4071 break;
4072 }
4073
4074 return err;
4075}
4076
4077/**
Auke Kok9a799d72007-09-15 14:07:45 -07004078 * ixgbe_free_tx_resources - Free Tx Resources per Queue
4079 * @adapter: board private structure
4080 * @tx_ring: Tx descriptor ring for a specific queue
4081 *
4082 * Free all transmit software resources
4083 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07004084void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4085 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004086{
4087 struct pci_dev *pdev = adapter->pdev;
4088
4089 ixgbe_clean_tx_ring(adapter, tx_ring);
4090
4091 vfree(tx_ring->tx_buffer_info);
4092 tx_ring->tx_buffer_info = NULL;
4093
4094 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
4095
4096 tx_ring->desc = NULL;
4097}
4098
4099/**
4100 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4101 * @adapter: board private structure
4102 *
4103 * Free all transmit software resources
4104 **/
4105static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4106{
4107 int i;
4108
4109 for (i = 0; i < adapter->num_tx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00004110 if (adapter->tx_ring[i].desc)
4111 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004112}
4113
4114/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004115 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07004116 * @adapter: board private structure
4117 * @rx_ring: ring to clean the resources from
4118 *
4119 * Free all receive software resources
4120 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07004121void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4122 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004123{
4124 struct pci_dev *pdev = adapter->pdev;
4125
4126 ixgbe_clean_rx_ring(adapter, rx_ring);
4127
4128 vfree(rx_ring->rx_buffer_info);
4129 rx_ring->rx_buffer_info = NULL;
4130
4131 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4132
4133 rx_ring->desc = NULL;
4134}
4135
4136/**
4137 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4138 * @adapter: board private structure
4139 *
4140 * Free all receive software resources
4141 **/
4142static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4143{
4144 int i;
4145
4146 for (i = 0; i < adapter->num_rx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00004147 if (adapter->rx_ring[i].desc)
4148 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004149}
4150
4151/**
Auke Kok9a799d72007-09-15 14:07:45 -07004152 * ixgbe_change_mtu - Change the Maximum Transfer Unit
4153 * @netdev: network interface device structure
4154 * @new_mtu: new value for maximum frame size
4155 *
4156 * Returns 0 on success, negative on failure
4157 **/
4158static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4159{
4160 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4161 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4162
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07004163 /* MTU < 68 is an error and causes problems on some kernels */
4164 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07004165 return -EINVAL;
4166
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004167 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004168 netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004169 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07004170 netdev->mtu = new_mtu;
4171
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004172 if (netif_running(netdev))
4173 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004174
4175 return 0;
4176}
4177
4178/**
4179 * ixgbe_open - Called when a network interface is made active
4180 * @netdev: network interface device structure
4181 *
4182 * Returns 0 on success, negative value on failure
4183 *
4184 * The open entry point is called when a network interface is made
4185 * active by the system (IFF_UP). At this point all resources needed
4186 * for transmit and receive operations are allocated, the interrupt
4187 * handler is registered with the OS, the watchdog timer is started,
4188 * and the stack is notified that the interface is ready.
4189 **/
4190static int ixgbe_open(struct net_device *netdev)
4191{
4192 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4193 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07004194
Auke Kok4bebfaa2008-02-11 09:26:01 -08004195 /* disallow open during test */
4196 if (test_bit(__IXGBE_TESTING, &adapter->state))
4197 return -EBUSY;
4198
Jesse Brandeburg54386462009-04-17 20:44:27 +00004199 netif_carrier_off(netdev);
4200
Auke Kok9a799d72007-09-15 14:07:45 -07004201 /* allocate transmit descriptors */
4202 err = ixgbe_setup_all_tx_resources(adapter);
4203 if (err)
4204 goto err_setup_tx;
4205
Auke Kok9a799d72007-09-15 14:07:45 -07004206 /* allocate receive descriptors */
4207 err = ixgbe_setup_all_rx_resources(adapter);
4208 if (err)
4209 goto err_setup_rx;
4210
4211 ixgbe_configure(adapter);
4212
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004213 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004214 if (err)
4215 goto err_req_irq;
4216
Auke Kok9a799d72007-09-15 14:07:45 -07004217 err = ixgbe_up_complete(adapter);
4218 if (err)
4219 goto err_up;
4220
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07004221 netif_tx_start_all_queues(netdev);
4222
Auke Kok9a799d72007-09-15 14:07:45 -07004223 return 0;
4224
4225err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004226 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004227 ixgbe_free_irq(adapter);
4228err_req_irq:
Auke Kok9a799d72007-09-15 14:07:45 -07004229err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004230 ixgbe_free_all_rx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004231err_setup_tx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004232 ixgbe_free_all_tx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004233 ixgbe_reset(adapter);
4234
4235 return err;
4236}
4237
4238/**
4239 * ixgbe_close - Disables a network interface
4240 * @netdev: network interface device structure
4241 *
4242 * Returns 0, this is not allowed to fail
4243 *
4244 * The close entry point is called when an interface is de-activated
4245 * by the OS. The hardware is still under the drivers control, but
4246 * needs to be disabled. A global MAC reset is issued to stop the
4247 * hardware, and all transmit and receive resources are freed.
4248 **/
4249static int ixgbe_close(struct net_device *netdev)
4250{
4251 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004252
4253 ixgbe_down(adapter);
4254 ixgbe_free_irq(adapter);
4255
4256 ixgbe_free_all_tx_resources(adapter);
4257 ixgbe_free_all_rx_resources(adapter);
4258
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004259 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004260
4261 return 0;
4262}
4263
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004264#ifdef CONFIG_PM
4265static int ixgbe_resume(struct pci_dev *pdev)
4266{
4267 struct net_device *netdev = pci_get_drvdata(pdev);
4268 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4269 u32 err;
4270
4271 pci_set_power_state(pdev, PCI_D0);
4272 pci_restore_state(pdev);
gouji-new9ce77662009-05-06 10:44:45 +00004273
4274 err = pci_enable_device_mem(pdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004275 if (err) {
Alexander Duyck69888672008-09-11 20:05:39 -07004276 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004277 "suspend\n");
4278 return err;
4279 }
4280 pci_set_master(pdev);
4281
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07004282 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004283
4284 err = ixgbe_init_interrupt_scheme(adapter);
4285 if (err) {
4286 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
4287 "device\n");
4288 return err;
4289 }
4290
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004291 ixgbe_reset(adapter);
4292
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00004293 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4294
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004295 if (netif_running(netdev)) {
4296 err = ixgbe_open(adapter->netdev);
4297 if (err)
4298 return err;
4299 }
4300
4301 netif_device_attach(netdev);
4302
4303 return 0;
4304}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004305#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004306
4307static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004308{
4309 struct net_device *netdev = pci_get_drvdata(pdev);
4310 struct ixgbe_adapter *adapter = netdev_priv(netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004311 struct ixgbe_hw *hw = &adapter->hw;
4312 u32 ctrl, fctrl;
4313 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004314#ifdef CONFIG_PM
4315 int retval = 0;
4316#endif
4317
4318 netif_device_detach(netdev);
4319
4320 if (netif_running(netdev)) {
4321 ixgbe_down(adapter);
4322 ixgbe_free_irq(adapter);
4323 ixgbe_free_all_tx_resources(adapter);
4324 ixgbe_free_all_rx_resources(adapter);
4325 }
Alexander Duyck7a921c92009-05-06 10:43:28 +00004326 ixgbe_clear_interrupt_scheme(adapter);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004327
4328#ifdef CONFIG_PM
4329 retval = pci_save_state(pdev);
4330 if (retval)
4331 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004332
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004333#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004334 if (wufc) {
4335 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004336
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004337 /* turn on all-multi mode if wake on multicast is enabled */
4338 if (wufc & IXGBE_WUFC_MC) {
4339 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4340 fctrl |= IXGBE_FCTRL_MPE;
4341 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4342 }
4343
4344 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4345 ctrl |= IXGBE_CTRL_GIO_DIS;
4346 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4347
4348 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
4349 } else {
4350 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
4351 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4352 }
4353
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07004354 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
4355 pci_wake_from_d3(pdev, true);
4356 else
4357 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004358
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004359 *enable_wake = !!wufc;
4360
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004361 ixgbe_release_hw_control(adapter);
4362
4363 pci_disable_device(pdev);
4364
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004365 return 0;
4366}
4367
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004368#ifdef CONFIG_PM
4369static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
4370{
4371 int retval;
4372 bool wake;
4373
4374 retval = __ixgbe_shutdown(pdev, &wake);
4375 if (retval)
4376 return retval;
4377
4378 if (wake) {
4379 pci_prepare_to_sleep(pdev);
4380 } else {
4381 pci_wake_from_d3(pdev, false);
4382 pci_set_power_state(pdev, PCI_D3hot);
4383 }
4384
4385 return 0;
4386}
4387#endif /* CONFIG_PM */
4388
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004389static void ixgbe_shutdown(struct pci_dev *pdev)
4390{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004391 bool wake;
4392
4393 __ixgbe_shutdown(pdev, &wake);
4394
4395 if (system_state == SYSTEM_POWER_OFF) {
4396 pci_wake_from_d3(pdev, wake);
4397 pci_set_power_state(pdev, PCI_D3hot);
4398 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004399}
4400
4401/**
Auke Kok9a799d72007-09-15 14:07:45 -07004402 * ixgbe_update_stats - Update the board statistics counters.
4403 * @adapter: board private structure
4404 **/
4405void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4406{
Ajit Khaparde2d86f132009-10-07 02:43:49 +00004407 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07004408 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004409 u64 total_mpc = 0;
4410 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004411
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004412 if (hw->mac.type == ixgbe_mac_82599EB) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00004413 u64 rsc_count = 0;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004414 for (i = 0; i < 16; i++)
4415 adapter->hw_rx_no_dma_resources +=
4416 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Alexander Duyckf8212f92009-04-27 22:42:37 +00004417 for (i = 0; i < adapter->num_rx_queues; i++)
4418 rsc_count += adapter->rx_ring[i].rsc_count;
4419 adapter->rsc_count = rsc_count;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004420 }
4421
Auke Kok9a799d72007-09-15 14:07:45 -07004422 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004423 for (i = 0; i < 8; i++) {
4424 /* for packet buffers not used, the register should read 0 */
4425 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4426 missed_rx += mpc;
4427 adapter->stats.mpc[i] += mpc;
4428 total_mpc += adapter->stats.mpc[i];
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004429 if (hw->mac.type == ixgbe_mac_82598EB)
4430 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004431 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4432 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4433 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4434 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004435 if (hw->mac.type == ixgbe_mac_82599EB) {
4436 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4437 IXGBE_PXONRXCNT(i));
4438 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4439 IXGBE_PXOFFRXCNT(i));
4440 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004441 } else {
4442 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4443 IXGBE_PXONRXC(i));
4444 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4445 IXGBE_PXOFFRXC(i));
4446 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08004447 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
4448 IXGBE_PXONTXC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004449 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004450 IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004451 }
4452 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4453 /* work around hardware counting issue */
4454 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07004455
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004456 /* 82598 hardware only has a 32 bit counter in the high register */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004457 if (hw->mac.type == ixgbe_mac_82599EB) {
Ben Greearaad71912009-09-30 12:08:16 +00004458 u64 tmp;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004459 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
Ben Greearaad71912009-09-30 12:08:16 +00004460 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4461 adapter->stats.gorc += (tmp << 32);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004462 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
Ben Greearaad71912009-09-30 12:08:16 +00004463 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4464 adapter->stats.gotc += (tmp << 32);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004465 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4466 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4467 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4468 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004469 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4470 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
Yi Zou6d455222009-05-13 13:12:16 +00004471#ifdef IXGBE_FCOE
4472 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4473 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4474 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4475 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4476 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4477 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4478#endif /* IXGBE_FCOE */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004479 } else {
4480 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4481 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4482 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4483 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4484 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4485 }
Auke Kok9a799d72007-09-15 14:07:45 -07004486 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4487 adapter->stats.bprc += bprc;
4488 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004489 if (hw->mac.type == ixgbe_mac_82598EB)
4490 adapter->stats.mprc -= bprc;
Auke Kok9a799d72007-09-15 14:07:45 -07004491 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4492 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4493 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4494 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4495 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4496 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4497 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004498 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004499 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4500 adapter->stats.lxontxc += lxon;
4501 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4502 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07004503 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4504 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004505 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4506 /*
4507 * 82598 errata - tx of flow control packets is included in tx counters
4508 */
4509 xon_off_tot = lxon + lxoff;
4510 adapter->stats.gptc -= xon_off_tot;
4511 adapter->stats.mptc -= xon_off_tot;
4512 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07004513 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4514 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4515 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07004516 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4517 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004518 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004519 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4520 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4521 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4522 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4523 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004524 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4525
4526 /* Fill out the OS statistics structure */
Ajit Khaparde2d86f132009-10-07 02:43:49 +00004527 netdev->stats.multicast = adapter->stats.mprc;
Auke Kok9a799d72007-09-15 14:07:45 -07004528
4529 /* Rx Errors */
Ajit Khaparde2d86f132009-10-07 02:43:49 +00004530 netdev->stats.rx_errors = adapter->stats.crcerrs +
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004531 adapter->stats.rlec;
Ajit Khaparde2d86f132009-10-07 02:43:49 +00004532 netdev->stats.rx_dropped = 0;
4533 netdev->stats.rx_length_errors = adapter->stats.rlec;
4534 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4535 netdev->stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07004536}
4537
4538/**
4539 * ixgbe_watchdog - Timer Call-back
4540 * @data: pointer to adapter cast into an unsigned long
4541 **/
4542static void ixgbe_watchdog(unsigned long data)
4543{
4544 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004545 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00004546 u64 eics = 0;
4547 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07004548
Alexander Duyckfe49f042009-06-04 16:00:09 +00004549 /*
4550 * Do the watchdog outside of interrupt context due to the lovely
4551 * delays that some of the newer hardware requires
4552 */
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004553
Alexander Duyckfe49f042009-06-04 16:00:09 +00004554 if (test_bit(__IXGBE_DOWN, &adapter->state))
4555 goto watchdog_short_circuit;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004556
Alexander Duyckfe49f042009-06-04 16:00:09 +00004557 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4558 /*
4559 * for legacy and MSI interrupts don't set any bits
4560 * that are enabled for EIAM, because this operation
4561 * would set *both* EIMS and EICS for any bit in EIAM
4562 */
4563 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4564 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4565 goto watchdog_reschedule;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004566 }
4567
Alexander Duyckfe49f042009-06-04 16:00:09 +00004568 /* get one bit for every active tx/rx interrupt vector */
4569 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4570 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4571 if (qv->rxr_count || qv->txr_count)
4572 eics |= ((u64)1 << i);
4573 }
4574
4575 /* Cause software interrupt to ensure rx rings are cleaned */
4576 ixgbe_irq_rearm_queues(adapter, eics);
4577
4578watchdog_reschedule:
4579 /* Reset the timer */
4580 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4581
4582watchdog_short_circuit:
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004583 schedule_work(&adapter->watchdog_task);
4584}
4585
4586/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004587 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
4588 * @work: pointer to work_struct containing our data
4589 **/
4590static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4591{
4592 struct ixgbe_adapter *adapter = container_of(work,
4593 struct ixgbe_adapter,
4594 multispeed_fiber_task);
4595 struct ixgbe_hw *hw = &adapter->hw;
4596 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004597 bool negotiation;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004598
4599 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
Mallikarjuna R Chilakalaa1f25322009-06-30 11:44:36 +00004600 autoneg = hw->phy.autoneg_advertised;
4601 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004602 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
4603 if (hw->mac.ops.setup_link)
4604 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004605 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4606 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
4607}
4608
4609/**
4610 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
4611 * @work: pointer to work_struct containing our data
4612 **/
4613static void ixgbe_sfp_config_module_task(struct work_struct *work)
4614{
4615 struct ixgbe_adapter *adapter = container_of(work,
4616 struct ixgbe_adapter,
4617 sfp_config_module_task);
4618 struct ixgbe_hw *hw = &adapter->hw;
4619 u32 err;
4620
4621 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004622
4623 /* Time for electrical oscillations to settle down */
4624 msleep(100);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004625 err = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004626
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004627 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00004628 dev_err(&adapter->pdev->dev, "failed to initialize because "
4629 "an unsupported SFP+ module type was detected.\n"
4630 "Reload the driver after installing a supported "
4631 "module.\n");
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004632 unregister_netdev(adapter->netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004633 return;
4634 }
4635 hw->mac.ops.setup_sfp(hw);
4636
Tony Breeds8d1c3c02009-04-09 22:29:10 +00004637 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004638 /* This will also work for DA Twinax connections */
4639 schedule_work(&adapter->multispeed_fiber_task);
4640 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
4641}
4642
4643/**
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004644 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
4645 * @work: pointer to work_struct containing our data
4646 **/
4647static void ixgbe_fdir_reinit_task(struct work_struct *work)
4648{
4649 struct ixgbe_adapter *adapter = container_of(work,
4650 struct ixgbe_adapter,
4651 fdir_reinit_task);
4652 struct ixgbe_hw *hw = &adapter->hw;
4653 int i;
4654
4655 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4656 for (i = 0; i < adapter->num_tx_queues; i++)
4657 set_bit(__IXGBE_FDIR_INIT_DONE,
4658 &(adapter->tx_ring[i].reinit_state));
4659 } else {
4660 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4661 "ignored adding FDIR ATR filters \n");
4662 }
4663 /* Done FDIR Re-initialization, enable transmits */
4664 netif_tx_start_all_queues(adapter->netdev);
4665}
4666
4667/**
Alexander Duyck69888672008-09-11 20:05:39 -07004668 * ixgbe_watchdog_task - worker thread to bring link up
4669 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004670 **/
4671static void ixgbe_watchdog_task(struct work_struct *work)
4672{
4673 struct ixgbe_adapter *adapter = container_of(work,
4674 struct ixgbe_adapter,
4675 watchdog_task);
4676 struct net_device *netdev = adapter->netdev;
4677 struct ixgbe_hw *hw = &adapter->hw;
4678 u32 link_speed = adapter->link_speed;
4679 bool link_up = adapter->link_up;
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004680 int i;
4681 struct ixgbe_ring *tx_ring;
4682 int some_tx_pending = 0;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004683
4684 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
4685
4686 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4687 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004688 if (link_up) {
4689#ifdef CONFIG_DCB
4690 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4691 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004692 hw->mac.ops.fc_enable(hw, i);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004693 } else {
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004694 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004695 }
4696#else
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004697 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004698#endif
4699 }
4700
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004701 if (link_up ||
4702 time_after(jiffies, (adapter->link_check_timeout +
4703 IXGBE_TRY_LINK_TIMEOUT))) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004704 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004705 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004706 }
4707 adapter->link_up = link_up;
4708 adapter->link_speed = link_speed;
4709 }
Auke Kok9a799d72007-09-15 14:07:45 -07004710
4711 if (link_up) {
4712 if (!netif_carrier_ok(netdev)) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004713 bool flow_rx, flow_tx;
4714
4715 if (hw->mac.type == ixgbe_mac_82599EB) {
4716 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4717 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00004718 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
4719 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004720 } else {
4721 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4722 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00004723 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
4724 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004725 }
4726
Jeff Kirshera46e5342008-11-27 00:22:21 -08004727 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
4728 "Flow Control: %s\n",
4729 netdev->name,
4730 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
4731 "10 Gbps" :
4732 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
4733 "1 Gbps" : "unknown speed")),
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004734 ((flow_rx && flow_tx) ? "RX/TX" :
4735 (flow_rx ? "RX" :
4736 (flow_tx ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07004737
4738 netif_carrier_on(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004739 } else {
4740 /* Force detection of hung controller */
4741 adapter->detect_tx_hung = true;
4742 }
4743 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004744 adapter->link_up = false;
4745 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004746 if (netif_carrier_ok(netdev)) {
Jeff Kirshera46e5342008-11-27 00:22:21 -08004747 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
4748 netdev->name);
Auke Kok9a799d72007-09-15 14:07:45 -07004749 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004750 }
4751 }
4752
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004753 if (!netif_carrier_ok(netdev)) {
4754 for (i = 0; i < adapter->num_tx_queues; i++) {
4755 tx_ring = &adapter->tx_ring[i];
4756 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4757 some_tx_pending = 1;
4758 break;
4759 }
4760 }
4761
4762 if (some_tx_pending) {
4763 /* We've lost link, so the controller stops DMA,
4764 * but we've got queued Tx work that's never going
4765 * to get done, so reset controller to flush Tx.
4766 * (Do the reset outside of interrupt context).
4767 */
4768 schedule_work(&adapter->reset_task);
4769 }
4770 }
4771
Auke Kok9a799d72007-09-15 14:07:45 -07004772 ixgbe_update_stats(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004773 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
Auke Kok9a799d72007-09-15 14:07:45 -07004774}
4775
Auke Kok9a799d72007-09-15 14:07:45 -07004776static int ixgbe_tso(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004777 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
4778 u32 tx_flags, u8 *hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07004779{
4780 struct ixgbe_adv_tx_context_desc *context_desc;
4781 unsigned int i;
4782 int err;
4783 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004784 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
4785 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07004786
4787 if (skb_is_gso(skb)) {
4788 if (skb_header_cloned(skb)) {
4789 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4790 if (err)
4791 return err;
4792 }
4793 l4len = tcp_hdrlen(skb);
4794 *hdr_len += l4len;
4795
Al Viro8327d002007-12-10 18:54:12 +00004796 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07004797 struct iphdr *iph = ip_hdr(skb);
4798 iph->tot_len = 0;
4799 iph->check = 0;
4800 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004801 iph->daddr, 0,
4802 IPPROTO_TCP,
4803 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004804 adapter->hw_tso_ctxt++;
4805 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
4806 ipv6_hdr(skb)->payload_len = 0;
4807 tcp_hdr(skb)->check =
4808 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004809 &ipv6_hdr(skb)->daddr,
4810 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004811 adapter->hw_tso6_ctxt++;
4812 }
4813
4814 i = tx_ring->next_to_use;
4815
4816 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4817 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4818
4819 /* VLAN MACLEN IPLEN */
4820 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4821 vlan_macip_lens |=
4822 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4823 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004824 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004825 *hdr_len += skb_network_offset(skb);
4826 vlan_macip_lens |=
4827 (skb_transport_header(skb) - skb_network_header(skb));
4828 *hdr_len +=
4829 (skb_transport_header(skb) - skb_network_header(skb));
4830 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4831 context_desc->seqnum_seed = 0;
4832
4833 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004834 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004835 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004836
Al Viro8327d002007-12-10 18:54:12 +00004837 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07004838 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4839 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
4840 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4841
4842 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004843 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07004844 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
4845 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004846 /* use index 1 for TSO */
4847 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004848 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4849
4850 tx_buffer_info->time_stamp = jiffies;
4851 tx_buffer_info->next_to_watch = i;
4852
4853 i++;
4854 if (i == tx_ring->count)
4855 i = 0;
4856 tx_ring->next_to_use = i;
4857
4858 return true;
4859 }
4860 return false;
4861}
4862
4863static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004864 struct ixgbe_ring *tx_ring,
4865 struct sk_buff *skb, u32 tx_flags)
Auke Kok9a799d72007-09-15 14:07:45 -07004866{
4867 struct ixgbe_adv_tx_context_desc *context_desc;
4868 unsigned int i;
4869 struct ixgbe_tx_buffer *tx_buffer_info;
4870 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4871
4872 if (skb->ip_summed == CHECKSUM_PARTIAL ||
4873 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
4874 i = tx_ring->next_to_use;
4875 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4876 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4877
4878 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4879 vlan_macip_lens |=
4880 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4881 vlan_macip_lens |= (skb_network_offset(skb) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004882 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004883 if (skb->ip_summed == CHECKSUM_PARTIAL)
4884 vlan_macip_lens |= (skb_transport_header(skb) -
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004885 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07004886
4887 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4888 context_desc->seqnum_seed = 0;
4889
4890 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004891 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004892
4893 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08004894 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004895 case cpu_to_be16(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07004896 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08004897 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4898 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004899 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004900 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4901 type_tucmd_mlhl |=
4902 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004903 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004904 case cpu_to_be16(ETH_P_IPV6):
Auke Kok41825d72008-02-12 15:20:33 -08004905 /* XXX what about other V6 headers?? */
4906 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4907 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004908 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004909 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4910 type_tucmd_mlhl |=
4911 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004912 break;
Auke Kok41825d72008-02-12 15:20:33 -08004913 default:
4914 if (unlikely(net_ratelimit())) {
4915 DPRINTK(PROBE, WARNING,
4916 "partial checksum but proto=%x!\n",
4917 skb->protocol);
4918 }
4919 break;
4920 }
Auke Kok9a799d72007-09-15 14:07:45 -07004921 }
4922
4923 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004924 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07004925 context_desc->mss_l4len_idx = 0;
4926
4927 tx_buffer_info->time_stamp = jiffies;
4928 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004929
Auke Kok9a799d72007-09-15 14:07:45 -07004930 adapter->hw_csum_tx_good++;
4931 i++;
4932 if (i == tx_ring->count)
4933 i = 0;
4934 tx_ring->next_to_use = i;
4935
4936 return true;
4937 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004938
Auke Kok9a799d72007-09-15 14:07:45 -07004939 return false;
4940}
4941
4942static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004943 struct ixgbe_ring *tx_ring,
Yi Zoueacd73f2009-05-13 13:11:06 +00004944 struct sk_buff *skb, u32 tx_flags,
4945 unsigned int first)
Auke Kok9a799d72007-09-15 14:07:45 -07004946{
4947 struct ixgbe_tx_buffer *tx_buffer_info;
Yi Zoueacd73f2009-05-13 13:11:06 +00004948 unsigned int len;
4949 unsigned int total = skb->len;
Auke Kok9a799d72007-09-15 14:07:45 -07004950 unsigned int offset = 0, size, count = 0, i;
4951 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4952 unsigned int f;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004953 dma_addr_t *map;
Auke Kok9a799d72007-09-15 14:07:45 -07004954
4955 i = tx_ring->next_to_use;
4956
Alexander Duyck44df32c2009-03-31 21:34:23 +00004957 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4958 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4959 return 0;
4960 }
4961
4962 map = skb_shinfo(skb)->dma_maps;
4963
Yi Zoueacd73f2009-05-13 13:11:06 +00004964 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
4965 /* excluding fcoe_crc_eof for FCoE */
4966 total -= sizeof(struct fcoe_crc_eof);
4967
4968 len = min(skb_headlen(skb), total);
Auke Kok9a799d72007-09-15 14:07:45 -07004969 while (len) {
4970 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4971 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4972
4973 tx_buffer_info->length = size;
Eric Dumazet042a53a2009-06-05 04:04:16 +00004974 tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07004975 tx_buffer_info->time_stamp = jiffies;
4976 tx_buffer_info->next_to_watch = i;
4977
4978 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00004979 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07004980 offset += size;
4981 count++;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004982
4983 if (len) {
4984 i++;
4985 if (i == tx_ring->count)
4986 i = 0;
4987 }
Auke Kok9a799d72007-09-15 14:07:45 -07004988 }
4989
4990 for (f = 0; f < nr_frags; f++) {
4991 struct skb_frag_struct *frag;
4992
4993 frag = &skb_shinfo(skb)->frags[f];
Yi Zoueacd73f2009-05-13 13:11:06 +00004994 len = min((unsigned int)frag->size, total);
Alexander Duyck44df32c2009-03-31 21:34:23 +00004995 offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004996
4997 while (len) {
Alexander Duyck44df32c2009-03-31 21:34:23 +00004998 i++;
4999 if (i == tx_ring->count)
5000 i = 0;
5001
Auke Kok9a799d72007-09-15 14:07:45 -07005002 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5003 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5004
5005 tx_buffer_info->length = size;
Eric Dumazet042a53a2009-06-05 04:04:16 +00005006 tx_buffer_info->dma = map[f] + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07005007 tx_buffer_info->time_stamp = jiffies;
5008 tx_buffer_info->next_to_watch = i;
5009
5010 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00005011 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07005012 offset += size;
5013 count++;
Auke Kok9a799d72007-09-15 14:07:45 -07005014 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005015 if (total == 0)
5016 break;
Auke Kok9a799d72007-09-15 14:07:45 -07005017 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00005018
Auke Kok9a799d72007-09-15 14:07:45 -07005019 tx_ring->tx_buffer_info[i].skb = skb;
5020 tx_ring->tx_buffer_info[first].next_to_watch = i;
5021
5022 return count;
5023}
5024
5025static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005026 struct ixgbe_ring *tx_ring,
5027 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07005028{
5029 union ixgbe_adv_tx_desc *tx_desc = NULL;
5030 struct ixgbe_tx_buffer *tx_buffer_info;
5031 u32 olinfo_status = 0, cmd_type_len = 0;
5032 unsigned int i;
5033 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
5034
5035 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
5036
5037 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
5038
5039 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5040 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
5041
5042 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
5043 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5044
5045 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005046 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005047
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07005048 /* use index 1 context for tso */
5049 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07005050 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
5051 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005052 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005053
5054 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
5055 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005056 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005057
Yi Zoueacd73f2009-05-13 13:11:06 +00005058 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5059 olinfo_status |= IXGBE_ADVTXD_CC;
5060 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5061 if (tx_flags & IXGBE_TX_FLAGS_FSO)
5062 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5063 }
5064
Auke Kok9a799d72007-09-15 14:07:45 -07005065 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
5066
5067 i = tx_ring->next_to_use;
5068 while (count--) {
5069 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5070 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
5071 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
5072 tx_desc->read.cmd_type_len =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005073 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07005074 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07005075 i++;
5076 if (i == tx_ring->count)
5077 i = 0;
5078 }
5079
5080 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
5081
5082 /*
5083 * Force memory writes to complete before letting h/w
5084 * know there are new descriptors to fetch. (Only
5085 * applicable for weak-ordered memory model archs,
5086 * such as IA-64).
5087 */
5088 wmb();
5089
5090 tx_ring->next_to_use = i;
5091 writel(i, adapter->hw.hw_addr + tx_ring->tail);
5092}
5093
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005094static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5095 int queue, u32 tx_flags)
5096{
5097 /* Right now, we support IPv4 only */
5098 struct ixgbe_atr_input atr_input;
5099 struct tcphdr *th;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005100 struct iphdr *iph = ip_hdr(skb);
5101 struct ethhdr *eth = (struct ethhdr *)skb->data;
5102 u16 vlan_id, src_port, dst_port, flex_bytes;
5103 u32 src_ipv4_addr, dst_ipv4_addr;
5104 u8 l4type = 0;
5105
5106 /* check if we're UDP or TCP */
5107 if (iph->protocol == IPPROTO_TCP) {
5108 th = tcp_hdr(skb);
5109 src_port = th->source;
5110 dst_port = th->dest;
5111 l4type |= IXGBE_ATR_L4TYPE_TCP;
5112 /* l4type IPv4 type is 0, no need to assign */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005113 } else {
5114 /* Unsupported L4 header, just bail here */
5115 return;
5116 }
5117
5118 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5119
5120 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5121 IXGBE_TX_FLAGS_VLAN_SHIFT;
5122 src_ipv4_addr = iph->saddr;
5123 dst_ipv4_addr = iph->daddr;
5124 flex_bytes = eth->h_proto;
5125
5126 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5127 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5128 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5129 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5130 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5131 /* src and dst are inverted, think how the receiver sees them */
5132 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5133 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5134
5135 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5136 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5137}
5138
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005139static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005140 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005141{
5142 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5143
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005144 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005145 /* Herbert's original patch had:
5146 * smp_mb__after_netif_stop_queue();
5147 * but since that doesn't exist yet, just open code it. */
5148 smp_mb();
5149
5150 /* We need to check again in a case another CPU has just
5151 * made room available. */
5152 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
5153 return -EBUSY;
5154
5155 /* A reprieve! - use start_queue because it doesn't call schedule */
Jesse Brandeburgaf721662008-09-11 19:54:23 -07005156 netif_start_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005157 ++adapter->restart_queue;
5158 return 0;
5159}
5160
5161static int ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005162 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005163{
5164 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
5165 return 0;
5166 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
5167}
5168
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005169static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5170{
5171 struct ixgbe_adapter *adapter = netdev_priv(dev);
5172
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005173 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
5174 return smp_processor_id();
5175
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005176 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
Lucy Liu36e89d72009-08-05 13:06:34 -07005177 return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005178
5179 return skb_tx_hash(dev, skb);
5180}
5181
Stephen Hemminger3b29a562009-08-31 19:50:55 +00005182static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5183 struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07005184{
5185 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5186 struct ixgbe_ring *tx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07005187 unsigned int first;
5188 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005189 u8 hdr_len = 0;
5190 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07005191 int count = 0;
5192 unsigned int f;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005193
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005194 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5195 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08005196 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5197 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5198 tx_flags |= (skb->queue_mapping << 13);
5199 }
5200 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5201 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5202 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Lucy Liu60127862009-07-22 14:07:33 +00005203 if (skb->priority != TC_PRIO_CONTROL) {
5204 tx_flags |= (skb->queue_mapping << 13);
5205 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5206 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5207 } else {
5208 skb->queue_mapping =
5209 adapter->ring_feature[RING_F_DCB].indices-1;
5210 }
Auke Kok9a799d72007-09-15 14:07:45 -07005211 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005212
Lucy Liu60127862009-07-22 14:07:33 +00005213 r_idx = skb->queue_mapping;
5214 tx_ring = &adapter->tx_ring[r_idx];
5215
Yi Zoueacd73f2009-05-13 13:11:06 +00005216 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
Yi Zou09ad1cc2009-09-03 14:56:10 +00005217 (skb->protocol == htons(ETH_P_FCOE))) {
Yi Zoueacd73f2009-05-13 13:11:06 +00005218 tx_flags |= IXGBE_TX_FLAGS_FCOE;
Yi Zou09ad1cc2009-09-03 14:56:10 +00005219#ifdef IXGBE_FCOE
5220 r_idx = smp_processor_id();
5221 r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5222 r_idx += adapter->ring_feature[RING_F_FCOE].mask;
5223 tx_ring = &adapter->tx_ring[r_idx];
5224#endif
5225 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005226 /* four things can cause us to need a context descriptor */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005227 if (skb_is_gso(skb) ||
5228 (skb->ip_summed == CHECKSUM_PARTIAL) ||
Yi Zoueacd73f2009-05-13 13:11:06 +00005229 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
5230 (tx_flags & IXGBE_TX_FLAGS_FCOE))
Auke Kok9a799d72007-09-15 14:07:45 -07005231 count++;
5232
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005233 count += TXD_USE_COUNT(skb_headlen(skb));
5234 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07005235 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5236
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005237 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005238 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07005239 return NETDEV_TX_BUSY;
5240 }
Auke Kok9a799d72007-09-15 14:07:45 -07005241
Auke Kok9a799d72007-09-15 14:07:45 -07005242 first = tx_ring->next_to_use;
Yi Zoueacd73f2009-05-13 13:11:06 +00005243 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5244#ifdef IXGBE_FCOE
5245 /* setup tx offload for FCoE */
5246 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5247 if (tso < 0) {
5248 dev_kfree_skb_any(skb);
5249 return NETDEV_TX_OK;
5250 }
5251 if (tso)
5252 tx_flags |= IXGBE_TX_FLAGS_FSO;
5253#endif /* IXGBE_FCOE */
5254 } else {
5255 if (skb->protocol == htons(ETH_P_IP))
5256 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5257 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5258 if (tso < 0) {
5259 dev_kfree_skb_any(skb);
5260 return NETDEV_TX_OK;
5261 }
5262
5263 if (tso)
5264 tx_flags |= IXGBE_TX_FLAGS_TSO;
5265 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
5266 (skb->ip_summed == CHECKSUM_PARTIAL))
5267 tx_flags |= IXGBE_TX_FLAGS_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07005268 }
5269
Yi Zoueacd73f2009-05-13 13:11:06 +00005270 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005271 if (count) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005272 /* add the ATR filter if ATR is on */
5273 if (tx_ring->atr_sample_rate) {
5274 ++tx_ring->atr_count;
5275 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5276 test_bit(__IXGBE_FDIR_INIT_DONE,
5277 &tx_ring->reinit_state)) {
5278 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5279 tx_flags);
5280 tx_ring->atr_count = 0;
5281 }
5282 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00005283 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
5284 hdr_len);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005285 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07005286
Alexander Duyck44df32c2009-03-31 21:34:23 +00005287 } else {
5288 dev_kfree_skb_any(skb);
5289 tx_ring->tx_buffer_info[first].time_stamp = 0;
5290 tx_ring->next_to_use = first;
5291 }
Auke Kok9a799d72007-09-15 14:07:45 -07005292
5293 return NETDEV_TX_OK;
5294}
5295
5296/**
5297 * ixgbe_get_stats - Get System Network Statistics
5298 * @netdev: network interface device structure
5299 *
5300 * Returns the address of the device statistics structure.
5301 * The statistics are actually updated from the timer callback.
5302 **/
5303static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5304{
Auke Kok9a799d72007-09-15 14:07:45 -07005305 /* only return the current stats */
Ajit Khaparde2d86f132009-10-07 02:43:49 +00005306 return &netdev->stats;
Auke Kok9a799d72007-09-15 14:07:45 -07005307}
5308
5309/**
5310 * ixgbe_set_mac - Change the Ethernet Address of the NIC
5311 * @netdev: network interface device structure
5312 * @p: pointer to an address structure
5313 *
5314 * Returns 0 on success, negative on failure
5315 **/
5316static int ixgbe_set_mac(struct net_device *netdev, void *p)
5317{
5318 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005319 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07005320 struct sockaddr *addr = p;
5321
5322 if (!is_valid_ether_addr(addr->sa_data))
5323 return -EADDRNOTAVAIL;
5324
5325 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005326 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07005327
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005328 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07005329
5330 return 0;
5331}
5332
Ben Hutchings6b73e102009-04-29 08:08:58 +00005333static int
5334ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
5335{
5336 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5337 struct ixgbe_hw *hw = &adapter->hw;
5338 u16 value;
5339 int rc;
5340
5341 if (prtad != hw->phy.mdio.prtad)
5342 return -EINVAL;
5343 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
5344 if (!rc)
5345 rc = value;
5346 return rc;
5347}
5348
5349static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
5350 u16 addr, u16 value)
5351{
5352 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5353 struct ixgbe_hw *hw = &adapter->hw;
5354
5355 if (prtad != hw->phy.mdio.prtad)
5356 return -EINVAL;
5357 return hw->phy.ops.write_reg(hw, addr, devad, value);
5358}
5359
5360static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5361{
5362 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5363
5364 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
5365}
5366
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005367/**
5368 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00005369 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005370 * @netdev: network interface device structure
5371 *
5372 * Returns non-zero on failure
5373 **/
5374static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5375{
5376 int err = 0;
5377 struct ixgbe_adapter *adapter = netdev_priv(dev);
5378 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5379
5380 if (is_valid_ether_addr(mac->san_addr)) {
5381 rtnl_lock();
5382 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5383 rtnl_unlock();
5384 }
5385 return err;
5386}
5387
5388/**
5389 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00005390 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005391 * @netdev: network interface device structure
5392 *
5393 * Returns non-zero on failure
5394 **/
5395static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5396{
5397 int err = 0;
5398 struct ixgbe_adapter *adapter = netdev_priv(dev);
5399 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5400
5401 if (is_valid_ether_addr(mac->san_addr)) {
5402 rtnl_lock();
5403 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5404 rtnl_unlock();
5405 }
5406 return err;
5407}
5408
Auke Kok9a799d72007-09-15 14:07:45 -07005409#ifdef CONFIG_NET_POLL_CONTROLLER
5410/*
5411 * Polling 'interrupt' - used by things like netconsole to send skbs
5412 * without having to re-enable interrupts. It's not called while
5413 * the interrupt routine is executing.
5414 */
5415static void ixgbe_netpoll(struct net_device *netdev)
5416{
5417 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00005418 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07005419
Auke Kok9a799d72007-09-15 14:07:45 -07005420 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00005421 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5422 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5423 for (i = 0; i < num_q_vectors; i++) {
5424 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5425 ixgbe_msix_clean_many(0, q_vector);
5426 }
5427 } else {
5428 ixgbe_intr(adapter->pdev->irq, netdev);
5429 }
Auke Kok9a799d72007-09-15 14:07:45 -07005430 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Auke Kok9a799d72007-09-15 14:07:45 -07005431}
5432#endif
5433
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005434static const struct net_device_ops ixgbe_netdev_ops = {
5435 .ndo_open = ixgbe_open,
5436 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08005437 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005438 .ndo_select_queue = ixgbe_select_queue,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005439 .ndo_get_stats = ixgbe_get_stats,
Chris Leeche90d4002009-03-10 16:00:24 +00005440 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005441 .ndo_set_multicast_list = ixgbe_set_rx_mode,
5442 .ndo_validate_addr = eth_validate_addr,
5443 .ndo_set_mac_address = ixgbe_set_mac,
5444 .ndo_change_mtu = ixgbe_change_mtu,
5445 .ndo_tx_timeout = ixgbe_tx_timeout,
5446 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
5447 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5448 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00005449 .ndo_do_ioctl = ixgbe_ioctl,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005450#ifdef CONFIG_NET_POLL_CONTROLLER
5451 .ndo_poll_controller = ixgbe_netpoll,
5452#endif
Yi Zou332d4a72009-05-13 13:11:53 +00005453#ifdef IXGBE_FCOE
5454 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
5455 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
Yi Zou8450ff82009-08-31 12:32:14 +00005456 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5457 .ndo_fcoe_disable = ixgbe_fcoe_disable,
Yi Zou332d4a72009-05-13 13:11:53 +00005458#endif /* IXGBE_FCOE */
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005459};
5460
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005461/**
Auke Kok9a799d72007-09-15 14:07:45 -07005462 * ixgbe_probe - Device Initialization Routine
5463 * @pdev: PCI device information struct
5464 * @ent: entry in ixgbe_pci_tbl
5465 *
5466 * Returns 0 on success, negative on failure
5467 *
5468 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
5469 * The OS initialization, configuring of the adapter private structure,
5470 * and a hardware reset occur.
5471 **/
5472static int __devinit ixgbe_probe(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005473 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07005474{
5475 struct net_device *netdev;
5476 struct ixgbe_adapter *adapter = NULL;
5477 struct ixgbe_hw *hw;
5478 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07005479 static int cards_found;
5480 int i, err, pci_using_dac;
Yi Zoueacd73f2009-05-13 13:11:06 +00005481#ifdef IXGBE_FCOE
5482 u16 device_caps;
5483#endif
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005484 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07005485
gouji-new9ce77662009-05-06 10:44:45 +00005486 err = pci_enable_device_mem(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005487 if (err)
5488 return err;
5489
Yang Hongyang6a355282009-04-06 19:01:13 -07005490 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
5491 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07005492 pci_using_dac = 1;
5493 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07005494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07005495 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07005496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07005497 if (err) {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005498 dev_err(&pdev->dev, "No usable DMA "
5499 "configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005500 goto err_dma;
5501 }
5502 }
5503 pci_using_dac = 0;
5504 }
5505
gouji-new9ce77662009-05-06 10:44:45 +00005506 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
5507 IORESOURCE_MEM), ixgbe_driver_name);
Auke Kok9a799d72007-09-15 14:07:45 -07005508 if (err) {
gouji-new9ce77662009-05-06 10:44:45 +00005509 dev_err(&pdev->dev,
5510 "pci_request_selected_regions failed 0x%x\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07005511 goto err_pci_reg;
5512 }
5513
Frans Pop19d5afd2009-10-02 10:04:12 -07005514 pci_enable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005515
Auke Kok9a799d72007-09-15 14:07:45 -07005516 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07005517 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005518
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005519 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07005520 if (!netdev) {
5521 err = -ENOMEM;
5522 goto err_alloc_etherdev;
5523 }
5524
Auke Kok9a799d72007-09-15 14:07:45 -07005525 SET_NETDEV_DEV(netdev, &pdev->dev);
5526
5527 pci_set_drvdata(pdev, netdev);
5528 adapter = netdev_priv(netdev);
5529
5530 adapter->netdev = netdev;
5531 adapter->pdev = pdev;
5532 hw = &adapter->hw;
5533 hw->back = adapter;
5534 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5535
Jeff Kirsher05857982008-09-11 19:57:00 -07005536 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5537 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07005538 if (!hw->hw_addr) {
5539 err = -EIO;
5540 goto err_ioremap;
5541 }
5542
5543 for (i = 1; i <= 5; i++) {
5544 if (pci_resource_len(pdev, i) == 0)
5545 continue;
5546 }
5547
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005548 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07005549 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005550 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07005551 strcpy(netdev->name, pci_name(pdev));
5552
Auke Kok9a799d72007-09-15 14:07:45 -07005553 adapter->bd_number = cards_found;
5554
Auke Kok9a799d72007-09-15 14:07:45 -07005555 /* Setup hw api */
5556 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005557 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07005558
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005559 /* EEPROM */
5560 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
5561 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5562 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
5563 if (!(eec & (1 << 8)))
5564 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
5565
5566 /* PHY */
5567 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08005568 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00005569 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
5570 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
5571 hw->phy.mdio.mmds = 0;
5572 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
5573 hw->phy.mdio.dev = netdev;
5574 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
5575 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08005576
5577 /* set up this timer and work struct before calling get_invariants
5578 * which might start the timer
5579 */
5580 init_timer(&adapter->sfp_timer);
5581 adapter->sfp_timer.function = &ixgbe_sfp_timer;
5582 adapter->sfp_timer.data = (unsigned long) adapter;
5583
5584 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005585
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005586 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
5587 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
5588
5589 /* a new SFP+ module arrival, called from GPI SDP2 context */
5590 INIT_WORK(&adapter->sfp_config_module_task,
5591 ixgbe_sfp_config_module_task);
5592
Don Skidmore8ca783a2009-05-26 20:40:47 -07005593 ii->get_invariants(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07005594
5595 /* setup the private structure */
5596 err = ixgbe_sw_init(adapter);
5597 if (err)
5598 goto err_sw_init;
5599
Don Skidmorebf069c92009-05-07 10:39:54 +00005600 /*
5601 * If there is a fan on this device and it has failed log the
5602 * failure.
5603 */
5604 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5605 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5606 if (esdp & IXGBE_ESDP_SDP1)
5607 DPRINTK(PROBE, CRIT,
5608 "Fan has stopped, replace the adapter\n");
5609 }
5610
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005611 /* reset_hw fills in the perm_addr as well */
5612 err = hw->mac.ops.reset_hw(hw);
Don Skidmore8ca783a2009-05-26 20:40:47 -07005613 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
5614 hw->mac.type == ixgbe_mac_82598EB) {
5615 /*
5616 * Start a kernel thread to watch for a module to arrive.
5617 * Only do this for 82598, since 82599 will generate
5618 * interrupts on module arrival.
5619 */
5620 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5621 mod_timer(&adapter->sfp_timer,
5622 round_jiffies(jiffies + (2 * HZ)));
5623 err = 0;
5624 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00005625 dev_err(&adapter->pdev->dev, "failed to initialize because "
5626 "an unsupported SFP+ module type was detected.\n"
5627 "Reload the driver after installing a supported "
5628 "module.\n");
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00005629 goto err_sw_init;
5630 } else if (err) {
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005631 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
5632 goto err_sw_init;
5633 }
5634
Auke Kok9a799d72007-09-15 14:07:45 -07005635 netdev->features = NETIF_F_SG |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005636 NETIF_F_IP_CSUM |
5637 NETIF_F_HW_VLAN_TX |
5638 NETIF_F_HW_VLAN_RX |
5639 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07005640
Jesse Brandeburge9990a92008-08-26 04:27:24 -07005641 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07005642 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07005643 netdev->features |= NETIF_F_TSO6;
Herbert Xu78b6f4c2009-01-18 21:49:45 -08005644 netdev->features |= NETIF_F_GRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07005645
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00005646 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5647 netdev->features |= NETIF_F_SCTP_CSUM;
5648
Jeff Kirsherad31c402008-06-05 04:05:30 -07005649 netdev->vlan_features |= NETIF_F_TSO;
5650 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07005651 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00005652 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07005653 netdev->vlan_features |= NETIF_F_SG;
5654
Alexander Duyck2f90b862008-11-20 20:52:10 -08005655 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5656 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5657
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08005658#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08005659 netdev->dcbnl_ops = &dcbnl_ops;
5660#endif
5661
Yi Zoueacd73f2009-05-13 13:11:06 +00005662#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00005663 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
Yi Zoueacd73f2009-05-13 13:11:06 +00005664 if (hw->mac.ops.get_device_caps) {
5665 hw->mac.ops.get_device_caps(hw, &device_caps);
Yi Zou0d551582009-07-22 14:07:12 +00005666 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
5667 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
Yi Zoueacd73f2009-05-13 13:11:06 +00005668 }
5669 }
5670#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07005671 if (pci_using_dac)
5672 netdev->features |= NETIF_F_HIGHDMA;
5673
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00005674 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Alexander Duyckf8212f92009-04-27 22:42:37 +00005675 netdev->features |= NETIF_F_LRO;
5676
Auke Kok9a799d72007-09-15 14:07:45 -07005677 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005678 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Auke Kok9a799d72007-09-15 14:07:45 -07005679 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
5680 err = -EIO;
5681 goto err_eeprom;
5682 }
5683
5684 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
5685 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
5686
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005687 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
5688 dev_err(&pdev->dev, "invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005689 err = -EIO;
5690 goto err_eeprom;
5691 }
5692
5693 init_timer(&adapter->watchdog_timer);
5694 adapter->watchdog_timer.function = &ixgbe_watchdog;
5695 adapter->watchdog_timer.data = (unsigned long)adapter;
5696
5697 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005698 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005699
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005700 err = ixgbe_init_interrupt_scheme(adapter);
5701 if (err)
5702 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07005703
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005704 switch (pdev->device) {
5705 case IXGBE_DEV_ID_82599_KX4:
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00005706 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
5707 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
Peter P Waskiewicz Jrbdf0a552009-06-04 11:09:58 +00005708 /* Enable ACPI wakeup in GRC */
5709 IXGBE_WRITE_REG(hw, IXGBE_GRC,
5710 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005711 break;
5712 default:
5713 adapter->wol = 0;
5714 break;
5715 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005716 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5717
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00005718 /* pick up the PCI bus settings for reporting later */
5719 hw->mac.ops.get_bus_info(hw);
5720
Auke Kok9a799d72007-09-15 14:07:45 -07005721 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07005722 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005723 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
5724 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
5725 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
5726 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
5727 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005728 "Unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07005729 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005730 ixgbe_read_pba_num_generic(hw, &part_num);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005731 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
5732 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
5733 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
5734 (part_num >> 8), (part_num & 0xff));
5735 else
5736 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5737 hw->mac.type, hw->phy.type,
5738 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07005739
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005740 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Auke Kok0c254d82008-02-11 09:25:56 -08005741 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005742 "this card is not sufficient for optimal "
5743 "performance.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005744 dev_warn(&pdev->dev, "For optimal performance a x8 "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005745 "PCI-Express slot is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005746 }
5747
Peter P Waskiewicz Jr34b03682009-02-05 23:54:42 -08005748 /* save off EEPROM version number */
5749 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5750
Auke Kok9a799d72007-09-15 14:07:45 -07005751 /* reset the hardware with the new settings */
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00005752 err = hw->mac.ops.start_hw(hw);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005753
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00005754 if (err == IXGBE_ERR_EEPROM_VERSION) {
5755 /* We are running on a pre-production device, log a warning */
5756 dev_warn(&pdev->dev, "This device is a pre-production "
5757 "adapter/LOM. Please be aware there may be issues "
5758 "associated with your hardware. If you are "
5759 "experiencing problems please contact your Intel or "
5760 "hardware representative who provided you with this "
5761 "hardware.\n");
5762 }
Auke Kok9a799d72007-09-15 14:07:45 -07005763 strcpy(netdev->name, "eth%d");
5764 err = register_netdev(netdev);
5765 if (err)
5766 goto err_register;
5767
Jesse Brandeburg54386462009-04-17 20:44:27 +00005768 /* carrier off reporting is important to ethtool even BEFORE open */
5769 netif_carrier_off(netdev);
5770
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005771 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5772 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5773 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
5774
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005775#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03005776 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005777 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005778 ixgbe_setup_dca(adapter);
5779 }
5780#endif
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005781 /* add san mac addr to netdev */
5782 ixgbe_add_sanmac_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005783
5784 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
5785 cards_found++;
5786 return 0;
5787
5788err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005789 ixgbe_release_hw_control(adapter);
Alexander Duyck7a921c92009-05-06 10:43:28 +00005790 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005791err_sw_init:
5792err_eeprom:
Donald Skidmorec4900be2008-11-20 21:11:42 -08005793 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5794 del_timer_sync(&adapter->sfp_timer);
5795 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005796 cancel_work_sync(&adapter->multispeed_fiber_task);
5797 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005798 iounmap(hw->hw_addr);
5799err_ioremap:
5800 free_netdev(netdev);
5801err_alloc_etherdev:
gouji-new9ce77662009-05-06 10:44:45 +00005802 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5803 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07005804err_pci_reg:
5805err_dma:
5806 pci_disable_device(pdev);
5807 return err;
5808}
5809
5810/**
5811 * ixgbe_remove - Device Removal Routine
5812 * @pdev: PCI device information struct
5813 *
5814 * ixgbe_remove is called by the PCI subsystem to alert the driver
5815 * that it should release a PCI device. The could be caused by a
5816 * Hot-Plug event, or because the driver is going to be removed from
5817 * memory.
5818 **/
5819static void __devexit ixgbe_remove(struct pci_dev *pdev)
5820{
5821 struct net_device *netdev = pci_get_drvdata(pdev);
5822 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5823
5824 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08005825 /* clear the module not found bit to make sure the worker won't
5826 * reschedule
5827 */
5828 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07005829 del_timer_sync(&adapter->watchdog_timer);
5830
Donald Skidmorec4900be2008-11-20 21:11:42 -08005831 del_timer_sync(&adapter->sfp_timer);
5832 cancel_work_sync(&adapter->watchdog_task);
5833 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005834 cancel_work_sync(&adapter->multispeed_fiber_task);
5835 cancel_work_sync(&adapter->sfp_config_module_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005836 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5837 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5838 cancel_work_sync(&adapter->fdir_reinit_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005839 flush_scheduled_work();
5840
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005841#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005842 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5843 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5844 dca_remove_requester(&pdev->dev);
5845 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
5846 }
5847
5848#endif
Yi Zou332d4a72009-05-13 13:11:53 +00005849#ifdef IXGBE_FCOE
5850 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
5851 ixgbe_cleanup_fcoe(adapter);
5852
5853#endif /* IXGBE_FCOE */
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005854
5855 /* remove the added san mac */
5856 ixgbe_del_sanmac_netdev(netdev);
5857
Donald Skidmorec4900be2008-11-20 21:11:42 -08005858 if (netdev->reg_state == NETREG_REGISTERED)
5859 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005860
Alexander Duyck7a921c92009-05-06 10:43:28 +00005861 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005862
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005863 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005864
5865 iounmap(adapter->hw.hw_addr);
gouji-new9ce77662009-05-06 10:44:45 +00005866 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5867 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07005868
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005869 DPRINTK(PROBE, INFO, "complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005870
Auke Kok9a799d72007-09-15 14:07:45 -07005871 free_netdev(netdev);
5872
Frans Pop19d5afd2009-10-02 10:04:12 -07005873 pci_disable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005874
Auke Kok9a799d72007-09-15 14:07:45 -07005875 pci_disable_device(pdev);
5876}
5877
5878/**
5879 * ixgbe_io_error_detected - called when PCI error is detected
5880 * @pdev: Pointer to PCI device
5881 * @state: The current pci connection state
5882 *
5883 * This function is called after a PCI bus error affecting
5884 * this device has been detected.
5885 */
5886static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005887 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07005888{
5889 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005890 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005891
5892 netif_device_detach(netdev);
5893
Breno Leitao3044b8d2009-05-06 10:44:26 +00005894 if (state == pci_channel_io_perm_failure)
5895 return PCI_ERS_RESULT_DISCONNECT;
5896
Auke Kok9a799d72007-09-15 14:07:45 -07005897 if (netif_running(netdev))
5898 ixgbe_down(adapter);
5899 pci_disable_device(pdev);
5900
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005901 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07005902 return PCI_ERS_RESULT_NEED_RESET;
5903}
5904
5905/**
5906 * ixgbe_io_slot_reset - called after the pci bus has been reset.
5907 * @pdev: Pointer to PCI device
5908 *
5909 * Restart the card from scratch, as if from a cold-boot.
5910 */
5911static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5912{
5913 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005914 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005915 pci_ers_result_t result;
5916 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005917
gouji-new9ce77662009-05-06 10:44:45 +00005918 if (pci_enable_device_mem(pdev)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005919 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005920 "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005921 result = PCI_ERS_RESULT_DISCONNECT;
5922 } else {
5923 pci_set_master(pdev);
5924 pci_restore_state(pdev);
5925
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005926 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005927
5928 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00005929 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005930 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07005931 }
Auke Kok9a799d72007-09-15 14:07:45 -07005932
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005933 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5934 if (err) {
5935 dev_err(&pdev->dev,
5936 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
5937 /* non-fatal, continue */
5938 }
Auke Kok9a799d72007-09-15 14:07:45 -07005939
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005940 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07005941}
5942
5943/**
5944 * ixgbe_io_resume - called when traffic can start flowing again.
5945 * @pdev: Pointer to PCI device
5946 *
5947 * This callback is called when the error recovery driver tells us that
5948 * its OK to resume normal operation.
5949 */
5950static void ixgbe_io_resume(struct pci_dev *pdev)
5951{
5952 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005953 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005954
5955 if (netif_running(netdev)) {
5956 if (ixgbe_up(adapter)) {
5957 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
5958 return;
5959 }
5960 }
5961
5962 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005963}
5964
5965static struct pci_error_handlers ixgbe_err_handler = {
5966 .error_detected = ixgbe_io_error_detected,
5967 .slot_reset = ixgbe_io_slot_reset,
5968 .resume = ixgbe_io_resume,
5969};
5970
5971static struct pci_driver ixgbe_driver = {
5972 .name = ixgbe_driver_name,
5973 .id_table = ixgbe_pci_tbl,
5974 .probe = ixgbe_probe,
5975 .remove = __devexit_p(ixgbe_remove),
5976#ifdef CONFIG_PM
5977 .suspend = ixgbe_suspend,
5978 .resume = ixgbe_resume,
5979#endif
5980 .shutdown = ixgbe_shutdown,
5981 .err_handler = &ixgbe_err_handler
5982};
5983
5984/**
5985 * ixgbe_init_module - Driver Registration Routine
5986 *
5987 * ixgbe_init_module is the first routine called when the driver is
5988 * loaded. All it does is register with the PCI subsystem.
5989 **/
5990static int __init ixgbe_init_module(void)
5991{
5992 int ret;
5993 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
5994 ixgbe_driver_string, ixgbe_driver_version);
5995
5996 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
5997
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005998#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005999 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006000#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006001
Auke Kok9a799d72007-09-15 14:07:45 -07006002 ret = pci_register_driver(&ixgbe_driver);
6003 return ret;
6004}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006005
Auke Kok9a799d72007-09-15 14:07:45 -07006006module_init(ixgbe_init_module);
6007
6008/**
6009 * ixgbe_exit_module - Driver Exit Cleanup Routine
6010 *
6011 * ixgbe_exit_module is called just before the driver is removed
6012 * from memory.
6013 **/
6014static void __exit ixgbe_exit_module(void)
6015{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006016#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006017 dca_unregister_notify(&dca_notifier);
6018#endif
Auke Kok9a799d72007-09-15 14:07:45 -07006019 pci_unregister_driver(&ixgbe_driver);
6020}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006021
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006022#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006023static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006024 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006025{
6026 int ret_val;
6027
6028 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006029 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006030
6031 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6032}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006033
Alexander Duyckb4533682009-03-31 21:32:42 +00006034#endif /* CONFIG_IXGBE_DCA */
6035#ifdef DEBUG
6036/**
6037 * ixgbe_get_hw_dev_name - return device name string
6038 * used by hardware layer to print debugging information
6039 **/
6040char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
6041{
6042 struct ixgbe_adapter *adapter = hw->back;
6043 return adapter->netdev->name;
6044}
6045
6046#endif
Auke Kok9a799d72007-09-15 14:07:45 -07006047module_exit(ixgbe_exit_module);
6048
6049/* ixgbe_main.c */