blob: 26fc1df2b2e0fac75411361e05b25f7807001e16 [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -08004 Copyright(c) 1999 - 2009 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
Lucy Liu60127862009-07-22 14:07:33 +000037#include <linux/pkt_sched.h>
Auke Kok9a799d72007-09-15 14:07:45 -070038#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
Yi Zoueacd73f2009-05-13 13:11:06 +000043#include <scsi/fc/fc_fcoe.h>
Auke Kok9a799d72007-09-15 14:07:45 -070044
45#include "ixgbe.h"
46#include "ixgbe_common.h"
47
48char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070049static const char ixgbe_driver_string[] =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070050 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070051
Peter P Waskiewicz Jre0f4daf2009-09-30 12:07:57 +000052#define DRV_VERSION "2.0.44-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070053const char ixgbe_driver_version[] = DRV_VERSION;
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -080054static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070055
56static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070057 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000058 [board_82599] = &ixgbe_82599_info,
Auke Kok9a799d72007-09-15 14:07:45 -070059};
60
61/* ixgbe_pci_tbl - PCI Device ID Table
62 *
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
65 *
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
68 */
69static struct pci_device_id ixgbe_pci_tbl[] = {
Don Skidmore1e336d02009-01-26 20:57:51 -080070 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
71 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070072 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070073 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070074 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070075 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070076 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
77 board_82598 },
Peter P Waskiewicz Jr3845bec2009-07-16 15:50:52 +000078 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
79 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070081 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
83 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080084 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
85 board_82598 },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
87 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
89 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
91 board_82598 },
Don Skidmore2f21bdd2009-02-01 01:18:23 -080092 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
93 board_82598 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000094 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
95 board_82599 },
Peter P Waskiewicz Jr1fcf03e2009-05-17 20:58:04 +000096 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
97 board_82599 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000098 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 },
Don Skidmoredbfec662009-10-02 08:58:25 +0000100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
Peter P Waskiewicz Jr8911184f2009-09-14 07:47:49 +0000102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
103 board_82599 },
Don Skidmore312eb932009-10-02 08:58:04 +0000104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
Auke Kok9a799d72007-09-15 14:07:45 -0700106
107 /* required last entry */
108 {0, }
109};
110MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
111
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400112#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800113static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700114 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800115static struct notifier_block dca_notifier = {
116 .notifier_call = ixgbe_notify_dca,
117 .next = NULL,
118 .priority = 0
119};
120#endif
121
Auke Kok9a799d72007-09-15 14:07:45 -0700122MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
123MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
124MODULE_LICENSE("GPL");
125MODULE_VERSION(DRV_VERSION);
126
127#define DEFAULT_DEBUG_LEVEL_SHIFT 3
128
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800129static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
130{
131 u32 ctrl_ext;
132
133 /* Let firmware take over control of h/w */
134 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700136 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800137}
138
139static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
140{
141 u32 ctrl_ext;
142
143 /* Let firmware know the driver has taken over */
144 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
145 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700146 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800147}
Auke Kok9a799d72007-09-15 14:07:45 -0700148
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000149/*
150 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
151 * @adapter: pointer to adapter struct
152 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
153 * @queue: queue to map the corresponding interrupt to
154 * @msix_vector: the vector to map to the corresponding queue
155 *
156 */
157static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
158 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700159{
160 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000161 struct ixgbe_hw *hw = &adapter->hw;
162 switch (hw->mac.type) {
163 case ixgbe_mac_82598EB:
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 if (direction == -1)
166 direction = 0;
167 index = (((direction * 64) + queue) >> 2) & 0x1F;
168 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
169 ivar &= ~(0xFF << (8 * (queue & 0x3)));
170 ivar |= (msix_vector << (8 * (queue & 0x3)));
171 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
172 break;
173 case ixgbe_mac_82599EB:
174 if (direction == -1) {
175 /* other causes */
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 index = ((queue & 1) * 8);
178 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
179 ivar &= ~(0xFF << index);
180 ivar |= (msix_vector << index);
181 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
182 break;
183 } else {
184 /* tx or rx causes */
185 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
186 index = ((16 * (queue & 1)) + (8 * direction));
187 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
188 ivar &= ~(0xFF << index);
189 ivar |= (msix_vector << index);
190 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
191 break;
192 }
193 default:
194 break;
195 }
Auke Kok9a799d72007-09-15 14:07:45 -0700196}
197
Alexander Duyckfe49f042009-06-04 16:00:09 +0000198static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
199 u64 qmask)
200{
201 u32 mask;
202
203 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
204 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
205 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
206 } else {
207 mask = (qmask & 0xFFFFFFFF);
208 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
209 mask = (qmask >> 32);
210 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
211 }
212}
213
Auke Kok9a799d72007-09-15 14:07:45 -0700214static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700215 struct ixgbe_tx_buffer
216 *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700217{
Alexander Duyck44df32c2009-03-31 21:34:23 +0000218 tx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700219 if (tx_buffer_info->skb) {
Alexander Duyck44df32c2009-03-31 21:34:23 +0000220 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
221 DMA_TO_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700222 dev_kfree_skb_any(tx_buffer_info->skb);
223 tx_buffer_info->skb = NULL;
224 }
Alexander Duyck44df32c2009-03-31 21:34:23 +0000225 tx_buffer_info->time_stamp = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700226 /* tx_buffer_info must be completely set up in the transmit path */
227}
228
229static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700230 struct ixgbe_ring *tx_ring,
231 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700232{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700233 struct ixgbe_hw *hw = &adapter->hw;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700234
Auke Kok9a799d72007-09-15 14:07:45 -0700235 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700236 * check with the clearing of time_stamp and movement of eop */
Auke Kok9a799d72007-09-15 14:07:45 -0700237 adapter->detect_tx_hung = false;
Alexander Duyck44df32c2009-03-31 21:34:23 +0000238 if (tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700239 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
240 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
241 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700242 union ixgbe_adv_tx_desc *tx_desc;
243 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
Auke Kok9a799d72007-09-15 14:07:45 -0700244 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700245 " Tx Queue <%d>\n"
246 " TDH, TDT <%x>, <%x>\n"
Auke Kok9a799d72007-09-15 14:07:45 -0700247 " next_to_use <%x>\n"
248 " next_to_clean <%x>\n"
249 "tx_buffer_info[next_to_clean]\n"
250 " time_stamp <%lx>\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700251 " jiffies <%lx>\n",
252 tx_ring->queue_index,
Alexander Duyck44df32c2009-03-31 21:34:23 +0000253 IXGBE_READ_REG(hw, tx_ring->head),
254 IXGBE_READ_REG(hw, tx_ring->tail),
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700255 tx_ring->next_to_use, eop,
256 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700257 return true;
258 }
259
260 return false;
261}
262
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700263#define IXGBE_MAX_TXD_PWR 14
264#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800265
266/* Tx Descriptors needed, worst case */
267#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
268 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
269#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700270 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800271
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700272static void ixgbe_tx_timeout(struct net_device *netdev);
273
Auke Kok9a799d72007-09-15 14:07:45 -0700274/**
275 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfe49f042009-06-04 16:00:09 +0000276 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700277 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700278 **/
Alexander Duyckfe49f042009-06-04 16:00:09 +0000279static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700280 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700281{
Alexander Duyckfe49f042009-06-04 16:00:09 +0000282 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700283 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800284 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
285 struct ixgbe_tx_buffer *tx_buffer_info;
286 unsigned int i, eop, count = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700287 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700288
289 i = tx_ring->next_to_clean;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800290 eop = tx_ring->tx_buffer_info[i].next_to_watch;
291 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
292
293 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000294 (count < tx_ring->work_limit)) {
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800295 bool cleaned = false;
296 for ( ; !cleaned; count++) {
297 struct sk_buff *skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700298 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
299 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800300 cleaned = (i == eop);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700301 skb = tx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700302
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800303 if (cleaned && skb) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800304 unsigned int segs, bytecount;
Yi Zou3d8fd382009-06-08 14:38:44 +0000305 unsigned int hlen = skb_headlen(skb);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700306
307 /* gso_segs is currently only valid for tcp */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800308 segs = skb_shinfo(skb)->gso_segs ?: 1;
Yi Zou3d8fd382009-06-08 14:38:44 +0000309#ifdef IXGBE_FCOE
310 /* adjust for FCoE Sequence Offload */
311 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
312 && (skb->protocol == htons(ETH_P_FCOE)) &&
313 skb_is_gso(skb)) {
314 hlen = skb_transport_offset(skb) +
315 sizeof(struct fc_frame_header) +
316 sizeof(struct fcoe_crc_eof);
317 segs = DIV_ROUND_UP(skb->len - hlen,
318 skb_shinfo(skb)->gso_size);
319 }
320#endif /* IXGBE_FCOE */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800321 /* multiply data chunks by size of headers */
Yi Zou3d8fd382009-06-08 14:38:44 +0000322 bytecount = ((segs - 1) * hlen) + skb->len;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700323 total_packets += segs;
324 total_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800325 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700326
Auke Kok9a799d72007-09-15 14:07:45 -0700327 ixgbe_unmap_and_free_tx_resource(adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700328 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700329
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800330 tx_desc->wb.status = 0;
331
Auke Kok9a799d72007-09-15 14:07:45 -0700332 i++;
333 if (i == tx_ring->count)
334 i = 0;
335 }
336
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800337 eop = tx_ring->tx_buffer_info[i].next_to_watch;
338 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
339 }
340
Auke Kok9a799d72007-09-15 14:07:45 -0700341 tx_ring->next_to_clean = i;
342
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800343#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700344 if (unlikely(count && netif_carrier_ok(netdev) &&
345 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800346 /* Make sure that anybody stopping the queue after this
347 * sees the new next_to_clean.
348 */
349 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800350 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
351 !test_bit(__IXGBE_DOWN, &adapter->state)) {
352 netif_wake_subqueue(netdev, tx_ring->queue_index);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700353 ++adapter->restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800354 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800355 }
Auke Kok9a799d72007-09-15 14:07:45 -0700356
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700357 if (adapter->detect_tx_hung) {
358 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
359 /* schedule immediate reset if we believe we hung */
360 DPRINTK(PROBE, INFO,
361 "tx hang %d detected, resetting adapter\n",
362 adapter->tx_timeout_count + 1);
363 ixgbe_tx_timeout(adapter->netdev);
364 }
365 }
Auke Kok9a799d72007-09-15 14:07:45 -0700366
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700367 /* re-arm the interrupt */
Alexander Duyckfe49f042009-06-04 16:00:09 +0000368 if (count >= tx_ring->work_limit)
369 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -0700370
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700371 tx_ring->total_bytes += total_bytes;
372 tx_ring->total_packets += total_packets;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700373 tx_ring->stats.packets += total_packets;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800374 tx_ring->stats.bytes += total_bytes;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700375 adapter->net_stats.tx_bytes += total_bytes;
376 adapter->net_stats.tx_packets += total_packets;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000377 return (count < tx_ring->work_limit);
Auke Kok9a799d72007-09-15 14:07:45 -0700378}
379
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400380#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800381static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700382 struct ixgbe_ring *rx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800383{
384 u32 rxctrl;
385 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700386 int q = rx_ring - adapter->rx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800387
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700388 if (rx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800389 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000390 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
391 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
392 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
393 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
394 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
395 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
396 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
397 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800398 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
399 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
Don Skidmore15005a32009-01-19 16:54:13 -0800400 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
401 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000402 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800403 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700404 rx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800405 }
406 put_cpu();
407}
408
409static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700410 struct ixgbe_ring *tx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800411{
412 u32 txctrl;
413 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700414 int q = tx_ring - adapter->tx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800415
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700416 if (tx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800417 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000418 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
419 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
420 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
421 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
422 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
423 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
424 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
425 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800426 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
427 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700428 tx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800429 }
430 put_cpu();
431}
432
433static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
434{
435 int i;
436
437 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
438 return;
439
Alexander Duycke35ec122009-05-21 13:07:12 +0000440 /* always use CB2 mode, difference is masked in the CB driver */
441 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
442
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800443 for (i = 0; i < adapter->num_tx_queues; i++) {
444 adapter->tx_ring[i].cpu = -1;
445 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
446 }
447 for (i = 0; i < adapter->num_rx_queues; i++) {
448 adapter->rx_ring[i].cpu = -1;
449 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
450 }
451}
452
453static int __ixgbe_notify_dca(struct device *dev, void *data)
454{
455 struct net_device *netdev = dev_get_drvdata(dev);
456 struct ixgbe_adapter *adapter = netdev_priv(netdev);
457 unsigned long event = *(unsigned long *)data;
458
459 switch (event) {
460 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700461 /* if we're already enabled, don't do it again */
462 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
463 break;
Denis V. Lunev652f0932008-03-27 14:39:17 +0300464 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700465 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800466 ixgbe_setup_dca(adapter);
467 break;
468 }
469 /* Fall Through since DCA is disabled. */
470 case DCA_PROVIDER_REMOVE:
471 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
472 dca_remove_requester(dev);
473 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
474 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
475 }
476 break;
477 }
478
Denis V. Lunev652f0932008-03-27 14:39:17 +0300479 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800480}
481
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400482#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700483/**
484 * ixgbe_receive_skb - Send a completed packet up the stack
485 * @adapter: board private structure
486 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700487 * @status: hardware indication of status of receive
488 * @rx_ring: rx descriptor ring (for a specific queue) to setup
489 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700490 **/
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800491static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700492 struct sk_buff *skb, u8 status,
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000493 struct ixgbe_ring *ring,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700494 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700495{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800496 struct ixgbe_adapter *adapter = q_vector->adapter;
497 struct napi_struct *napi = &q_vector->napi;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700498 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
499 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700500
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000501 skb_record_rx_queue(skb, ring->queue_index);
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000502 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
Lucy Liu8a62bab2009-08-13 14:09:38 +0000503 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800504 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700505 else
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800506 napi_gro_receive(napi, skb);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700507 } else {
Lucy Liu8a62bab2009-08-13 14:09:38 +0000508 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000509 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
510 else
511 netif_rx(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700512 }
513}
514
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800515/**
516 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
517 * @adapter: address of board private structure
518 * @status_err: hardware indication of status of receive
519 * @skb: skb currently being received and modified
520 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700521static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Don Skidmore8bae1b22009-07-23 18:00:39 +0000522 union ixgbe_adv_rx_desc *rx_desc,
523 struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700524{
Don Skidmore8bae1b22009-07-23 18:00:39 +0000525 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
526
Auke Kok9a799d72007-09-15 14:07:45 -0700527 skb->ip_summed = CHECKSUM_NONE;
528
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700529 /* Rx csum disabled */
530 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700531 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800532
533 /* if IP and error */
534 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
535 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700536 adapter->hw_csum_rx_error++;
537 return;
538 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800539
540 if (!(status_err & IXGBE_RXD_STAT_L4CS))
541 return;
542
543 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
Don Skidmore8bae1b22009-07-23 18:00:39 +0000544 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
545
546 /*
547 * 82599 errata, UDP frames with a 0 checksum can be marked as
548 * checksum errors.
549 */
550 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
551 (adapter->hw.mac.type == ixgbe_mac_82599EB))
552 return;
553
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800554 adapter->hw_csum_rx_error++;
555 return;
556 }
557
Auke Kok9a799d72007-09-15 14:07:45 -0700558 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800559 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700560 adapter->hw_csum_rx_good++;
561}
562
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000563static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
564 struct ixgbe_ring *rx_ring, u32 val)
565{
566 /*
567 * Force memory writes to complete before letting h/w
568 * know there are new descriptors to fetch. (Only
569 * applicable for weak-ordered memory model archs,
570 * such as IA-64).
571 */
572 wmb();
573 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
574}
575
Auke Kok9a799d72007-09-15 14:07:45 -0700576/**
577 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
578 * @adapter: address of board private structure
579 **/
580static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700581 struct ixgbe_ring *rx_ring,
582 int cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -0700583{
Auke Kok9a799d72007-09-15 14:07:45 -0700584 struct pci_dev *pdev = adapter->pdev;
585 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700586 struct ixgbe_rx_buffer *bi;
Auke Kok9a799d72007-09-15 14:07:45 -0700587 unsigned int i;
Auke Kok9a799d72007-09-15 14:07:45 -0700588
589 i = rx_ring->next_to_use;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700590 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700591
592 while (cleaned_count--) {
593 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
594
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700595 if (!bi->page_dma &&
Yi Zou6e455b892009-08-06 13:05:44 +0000596 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700597 if (!bi->page) {
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700598 bi->page = alloc_page(GFP_ATOMIC);
599 if (!bi->page) {
600 adapter->alloc_rx_page_failed++;
601 goto no_buffers;
602 }
603 bi->page_offset = 0;
604 } else {
605 /* use a half page if we're re-using */
606 bi->page_offset ^= (PAGE_SIZE / 2);
Auke Kok9a799d72007-09-15 14:07:45 -0700607 }
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700608
609 bi->page_dma = pci_map_page(pdev, bi->page,
610 bi->page_offset,
611 (PAGE_SIZE / 2),
612 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700613 }
614
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700615 if (!bi->skb) {
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800616 struct sk_buff *skb;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000617 skb = netdev_alloc_skb(adapter->netdev,
618 (rx_ring->rx_buf_len +
619 NET_IP_ALIGN));
Auke Kok9a799d72007-09-15 14:07:45 -0700620
621 if (!skb) {
622 adapter->alloc_rx_buff_failed++;
623 goto no_buffers;
624 }
625
626 /*
627 * Make buffer alignment 2 beyond a 16 byte boundary
628 * this will result in a 16 byte aligned IP header after
629 * the 14 byte MAC header is removed
630 */
631 skb_reserve(skb, NET_IP_ALIGN);
632
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700633 bi->skb = skb;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000634 bi->dma = pci_map_single(pdev, skb->data,
635 rx_ring->rx_buf_len,
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700636 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700637 }
638 /* Refresh the desc even if buffer_addrs didn't change because
639 * each write-back erases this info. */
Yi Zou6e455b892009-08-06 13:05:44 +0000640 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700641 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
642 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700643 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700644 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700645 }
646
647 i++;
648 if (i == rx_ring->count)
649 i = 0;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700650 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700651 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700652
Auke Kok9a799d72007-09-15 14:07:45 -0700653no_buffers:
654 if (rx_ring->next_to_use != i) {
655 rx_ring->next_to_use = i;
656 if (i-- == 0)
657 i = (rx_ring->count - 1);
658
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000659 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -0700660 }
661}
662
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700663static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
664{
665 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
666}
667
668static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
669{
670 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
671}
672
Alexander Duyckf8212f92009-04-27 22:42:37 +0000673static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
674{
675 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
676 IXGBE_RXDADV_RSCCNT_MASK) >>
677 IXGBE_RXDADV_RSCCNT_SHIFT;
678}
679
680/**
681 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
682 * @skb: pointer to the last skb in the rsc queue
683 *
684 * This function changes a queue full of hw rsc buffers into a completed
685 * packet. It uses the ->prev pointers to find the first packet and then
686 * turns it into the frag list owner.
687 **/
688static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
689{
690 unsigned int frag_list_size = 0;
691
692 while (skb->prev) {
693 struct sk_buff *prev = skb->prev;
694 frag_list_size += skb->len;
695 skb->prev = NULL;
696 skb = prev;
697 }
698
699 skb_shinfo(skb)->frag_list = skb->next;
700 skb->next = NULL;
701 skb->len += frag_list_size;
702 skb->data_len += frag_list_size;
703 skb->truesize += frag_list_size;
704 return skb;
705}
706
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800707static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700708 struct ixgbe_ring *rx_ring,
709 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -0700710{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800711 struct ixgbe_adapter *adapter = q_vector->adapter;
Auke Kok9a799d72007-09-15 14:07:45 -0700712 struct pci_dev *pdev = adapter->pdev;
713 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
714 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
715 struct sk_buff *skb;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000716 unsigned int i, rsc_count = 0;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700717 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700718 u16 hdr_info;
719 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700720 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800721 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Yi Zou3d8fd382009-06-08 14:38:44 +0000722#ifdef IXGBE_FCOE
723 int ddp_bytes = 0;
724#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -0700725
726 i = rx_ring->next_to_clean;
Auke Kok9a799d72007-09-15 14:07:45 -0700727 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
728 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
729 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700730
731 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700732 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700733 if (*work_done >= work_to_do)
734 break;
735 (*work_done)++;
736
Yi Zou6e455b892009-08-06 13:05:44 +0000737 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700738 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
739 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700740 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -0700741 if (hdr_info & IXGBE_RXDADV_SPH)
742 adapter->rx_hdr_split++;
743 if (len > IXGBE_RX_HDR_SIZE)
744 len = IXGBE_RX_HDR_SIZE;
745 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700746 } else {
Auke Kok9a799d72007-09-15 14:07:45 -0700747 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700748 }
Auke Kok9a799d72007-09-15 14:07:45 -0700749
750 cleaned = true;
751 skb = rx_buffer_info->skb;
752 prefetch(skb->data - NET_IP_ALIGN);
753 rx_buffer_info->skb = NULL;
754
Alexander Duyck21fa4e62009-06-04 15:59:49 +0000755 if (rx_buffer_info->dma) {
Auke Kok9a799d72007-09-15 14:07:45 -0700756 pci_unmap_single(pdev, rx_buffer_info->dma,
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800757 rx_ring->rx_buf_len,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700758 PCI_DMA_FROMDEVICE);
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000759 rx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700760 skb_put(skb, len);
761 }
762
763 if (upper_len) {
764 pci_unmap_page(pdev, rx_buffer_info->page_dma,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700765 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700766 rx_buffer_info->page_dma = 0;
767 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700768 rx_buffer_info->page,
769 rx_buffer_info->page_offset,
770 upper_len);
771
772 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
773 (page_count(rx_buffer_info->page) != 1))
774 rx_buffer_info->page = NULL;
775 else
776 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -0700777
778 skb->len += upper_len;
779 skb->data_len += upper_len;
780 skb->truesize += upper_len;
781 }
782
783 i++;
784 if (i == rx_ring->count)
785 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700786
787 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
788 prefetch(next_rxd);
Auke Kok9a799d72007-09-15 14:07:45 -0700789 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000790
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +0000791 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
Alexander Duyckf8212f92009-04-27 22:42:37 +0000792 rsc_count = ixgbe_get_rsc_count(rx_desc);
793
794 if (rsc_count) {
795 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
796 IXGBE_RXDADV_NEXTP_SHIFT;
797 next_buffer = &rx_ring->rx_buffer_info[nextp];
798 rx_ring->rsc_count += (rsc_count - 1);
799 } else {
800 next_buffer = &rx_ring->rx_buffer_info[i];
801 }
802
Auke Kok9a799d72007-09-15 14:07:45 -0700803 if (staterr & IXGBE_RXD_STAT_EOP) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000804 if (skb->prev)
805 skb = ixgbe_transform_rsc_queue(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700806 rx_ring->stats.packets++;
807 rx_ring->stats.bytes += skb->len;
808 } else {
Yi Zou6e455b892009-08-06 13:05:44 +0000809 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000810 rx_buffer_info->skb = next_buffer->skb;
811 rx_buffer_info->dma = next_buffer->dma;
812 next_buffer->skb = skb;
813 next_buffer->dma = 0;
814 } else {
815 skb->next = next_buffer->skb;
816 skb->next->prev = skb;
817 }
Auke Kok9a799d72007-09-15 14:07:45 -0700818 adapter->non_eop_descs++;
819 goto next_desc;
820 }
821
822 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
823 dev_kfree_skb_irq(skb);
824 goto next_desc;
825 }
826
Don Skidmore8bae1b22009-07-23 18:00:39 +0000827 ixgbe_rx_checksum(adapter, rx_desc, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800828
829 /* probably a little skewed due to removing CRC */
830 total_rx_bytes += skb->len;
831 total_rx_packets++;
832
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -0700833 skb->protocol = eth_type_trans(skb, adapter->netdev);
Yi Zou332d4a72009-05-13 13:11:53 +0000834#ifdef IXGBE_FCOE
835 /* if ddp, not passing to ULD unless for FCP_RSP or error */
Yi Zou3d8fd382009-06-08 14:38:44 +0000836 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
837 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
838 if (!ddp_bytes)
Yi Zou332d4a72009-05-13 13:11:53 +0000839 goto next_desc;
Yi Zou3d8fd382009-06-08 14:38:44 +0000840 }
Yi Zou332d4a72009-05-13 13:11:53 +0000841#endif /* IXGBE_FCOE */
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000842 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700843
844next_desc:
845 rx_desc->wb.upper.status_error = 0;
846
847 /* return some buffers to hardware, one at a time is too slow */
848 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
849 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
850 cleaned_count = 0;
851 }
852
853 /* use prefetched values */
854 rx_desc = next_rxd;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000855 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700856
857 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700858 }
859
Auke Kok9a799d72007-09-15 14:07:45 -0700860 rx_ring->next_to_clean = i;
861 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
862
863 if (cleaned_count)
864 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
865
Yi Zou3d8fd382009-06-08 14:38:44 +0000866#ifdef IXGBE_FCOE
867 /* include DDPed FCoE data */
868 if (ddp_bytes > 0) {
869 unsigned int mss;
870
871 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
872 sizeof(struct fc_frame_header) -
873 sizeof(struct fcoe_crc_eof);
874 if (mss > 512)
875 mss &= ~511;
876 total_rx_bytes += ddp_bytes;
877 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
878 }
879#endif /* IXGBE_FCOE */
880
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800881 rx_ring->total_packets += total_rx_packets;
882 rx_ring->total_bytes += total_rx_bytes;
883 adapter->net_stats.rx_bytes += total_rx_bytes;
884 adapter->net_stats.rx_packets += total_rx_packets;
885
Auke Kok9a799d72007-09-15 14:07:45 -0700886 return cleaned;
887}
888
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800889static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700890/**
891 * ixgbe_configure_msix - Configure MSI-X hardware
892 * @adapter: board private structure
893 *
894 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
895 * interrupts.
896 **/
897static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
898{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800899 struct ixgbe_q_vector *q_vector;
900 int i, j, q_vectors, v_idx, r_idx;
901 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700902
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800903 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
904
Jesse Brandeburg4df10462009-03-13 22:15:31 +0000905 /*
906 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800907 * corresponding register.
908 */
909 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +0000910 q_vector = adapter->q_vector[v_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800911 /* XXX for_each_bit(...) */
912 r_idx = find_first_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700913 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800914
915 for (i = 0; i < q_vector->rxr_count; i++) {
916 j = adapter->rx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000917 ixgbe_set_ivar(adapter, 0, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800918 r_idx = find_next_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700919 adapter->num_rx_queues,
920 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800921 }
922 r_idx = find_first_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700923 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800924
925 for (i = 0; i < q_vector->txr_count; i++) {
926 j = adapter->tx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000927 ixgbe_set_ivar(adapter, 1, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800928 r_idx = find_next_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700929 adapter->num_tx_queues,
930 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800931 }
932
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800933 if (q_vector->txr_count && !q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +0000934 /* tx only */
935 q_vector->eitr = adapter->tx_eitr_param;
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000936 else if (q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +0000937 /* rx or mixed */
938 q_vector->eitr = adapter->rx_eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800939
Alexander Duyckfe49f042009-06-04 16:00:09 +0000940 ixgbe_write_eitr(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -0700941 }
942
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000943 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
944 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
945 v_idx);
946 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
947 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800948 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700949
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700950 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800951 mask = IXGBE_EIMS_ENABLE_MASK;
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700952 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -0700954}
955
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800956enum latency_range {
957 lowest_latency = 0,
958 low_latency = 1,
959 bulk_latency = 2,
960 latency_invalid = 255
961};
962
963/**
964 * ixgbe_update_itr - update the dynamic ITR value based on statistics
965 * @adapter: pointer to adapter
966 * @eitr: eitr setting (ints per sec) to give last timeslice
967 * @itr_setting: current throttle rate in ints/second
968 * @packets: the number of packets during this measurement interval
969 * @bytes: the number of bytes during this measurement interval
970 *
971 * Stores a new ITR value based on packets and byte
972 * counts during the last interrupt. The advantage of per interrupt
973 * computation is faster updates and more accurate ITR for the current
974 * traffic pattern. Constants in this function were computed
975 * based on theoretical maximum wire speed and thresholds were set based
976 * on testing data as well as attempting to minimize response time
977 * while increasing bulk throughput.
978 * this functionality is controlled by the InterruptThrottleRate module
979 * parameter (see ixgbe_param.c)
980 **/
981static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700982 u32 eitr, u8 itr_setting,
983 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800984{
985 unsigned int retval = itr_setting;
986 u32 timepassed_us;
987 u64 bytes_perint;
988
989 if (packets == 0)
990 goto update_itr_done;
991
992
993 /* simple throttlerate management
994 * 0-20MB/s lowest (100000 ints/s)
995 * 20-100MB/s low (20000 ints/s)
996 * 100-1249MB/s bulk (8000 ints/s)
997 */
998 /* what was last interrupt timeslice? */
999 timepassed_us = 1000000/eitr;
1000 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1001
1002 switch (itr_setting) {
1003 case lowest_latency:
1004 if (bytes_perint > adapter->eitr_low)
1005 retval = low_latency;
1006 break;
1007 case low_latency:
1008 if (bytes_perint > adapter->eitr_high)
1009 retval = bulk_latency;
1010 else if (bytes_perint <= adapter->eitr_low)
1011 retval = lowest_latency;
1012 break;
1013 case bulk_latency:
1014 if (bytes_perint <= adapter->eitr_high)
1015 retval = low_latency;
1016 break;
1017 }
1018
1019update_itr_done:
1020 return retval;
1021}
1022
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001023/**
1024 * ixgbe_write_eitr - write EITR register in hardware specific way
Alexander Duyckfe49f042009-06-04 16:00:09 +00001025 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001026 *
1027 * This function is made to be called by ethtool and by the driver
1028 * when it needs to update EITR registers at runtime. Hardware
1029 * specific quirks/differences are taken care of here.
1030 */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001031void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001032{
Alexander Duyckfe49f042009-06-04 16:00:09 +00001033 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001034 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001035 int v_idx = q_vector->v_idx;
1036 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1037
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001038 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1039 /* must write high and low 16 bits to reset counter */
1040 itr_reg |= (itr_reg << 16);
1041 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1042 /*
1043 * set the WDIS bit to not clear the timer bits and cause an
1044 * immediate assertion of the interrupt
1045 */
1046 itr_reg |= IXGBE_EITR_CNT_WDIS;
1047 }
1048 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1049}
1050
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001051static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1052{
1053 struct ixgbe_adapter *adapter = q_vector->adapter;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001054 u32 new_itr;
1055 u8 current_itr, ret_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001056 int i, r_idx;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001057 struct ixgbe_ring *rx_ring, *tx_ring;
1058
1059 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1060 for (i = 0; i < q_vector->txr_count; i++) {
1061 tx_ring = &(adapter->tx_ring[r_idx]);
1062 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001063 q_vector->tx_itr,
1064 tx_ring->total_packets,
1065 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001066 /* if the result for this queue would decrease interrupt
1067 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001068 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001069 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001070 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001071 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001072 }
1073
1074 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1075 for (i = 0; i < q_vector->rxr_count; i++) {
1076 rx_ring = &(adapter->rx_ring[r_idx]);
1077 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001078 q_vector->rx_itr,
1079 rx_ring->total_packets,
1080 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001081 /* if the result for this queue would decrease interrupt
1082 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001083 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001084 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001085 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001086 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001087 }
1088
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001089 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001090
1091 switch (current_itr) {
1092 /* counts and packets in update_itr are dependent on these numbers */
1093 case lowest_latency:
1094 new_itr = 100000;
1095 break;
1096 case low_latency:
1097 new_itr = 20000; /* aka hwitr = ~200 */
1098 break;
1099 case bulk_latency:
1100 default:
1101 new_itr = 8000;
1102 break;
1103 }
1104
1105 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001106 /* do an exponential smoothing */
1107 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001108
1109 /* save the algorithm value here, not the smoothed one */
1110 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001111
1112 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001113 }
1114
1115 return;
1116}
1117
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001118static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1119{
1120 struct ixgbe_hw *hw = &adapter->hw;
1121
1122 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1123 (eicr & IXGBE_EICR_GPI_SDP1)) {
1124 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1125 /* write to clear the interrupt */
1126 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1127 }
1128}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001129
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001130static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1131{
1132 struct ixgbe_hw *hw = &adapter->hw;
1133
1134 if (eicr & IXGBE_EICR_GPI_SDP1) {
1135 /* Clear the interrupt */
1136 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1137 schedule_work(&adapter->multispeed_fiber_task);
1138 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1139 /* Clear the interrupt */
1140 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1141 schedule_work(&adapter->sfp_config_module_task);
1142 } else {
1143 /* Interrupt isn't for us... */
1144 return;
1145 }
1146}
1147
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001148static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1149{
1150 struct ixgbe_hw *hw = &adapter->hw;
1151
1152 adapter->lsc_int++;
1153 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1154 adapter->link_check_timeout = jiffies;
1155 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1156 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1157 schedule_work(&adapter->watchdog_task);
1158 }
1159}
1160
Auke Kok9a799d72007-09-15 14:07:45 -07001161static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1162{
1163 struct net_device *netdev = data;
1164 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1165 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore54037502009-02-21 15:42:56 -08001166 u32 eicr;
1167
1168 /*
1169 * Workaround for Silicon errata. Use clear-by-write instead
1170 * of clear-by-read. Reading with EICS will return the
1171 * interrupt causes without clearing, which later be done
1172 * with the write to EICR.
1173 */
1174 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1175 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07001176
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001177 if (eicr & IXGBE_EICR_LSC)
1178 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001179
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001180 if (hw->mac.type == ixgbe_mac_82598EB)
1181 ixgbe_check_fan_failure(adapter, eicr);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001182
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001183 if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001184 ixgbe_check_sfp_event(adapter, eicr);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001185
1186 /* Handle Flow Director Full threshold interrupt */
1187 if (eicr & IXGBE_EICR_FLOW_DIR) {
1188 int i;
1189 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1190 /* Disable transmits before FDIR Re-initialization */
1191 netif_tx_stop_all_queues(netdev);
1192 for (i = 0; i < adapter->num_tx_queues; i++) {
1193 struct ixgbe_ring *tx_ring =
1194 &adapter->tx_ring[i];
1195 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1196 &tx_ring->reinit_state))
1197 schedule_work(&adapter->fdir_reinit_task);
1198 }
1199 }
1200 }
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001201 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1202 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -07001203
1204 return IRQ_HANDLED;
1205}
1206
Alexander Duyckfe49f042009-06-04 16:00:09 +00001207static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1208 u64 qmask)
1209{
1210 u32 mask;
1211
1212 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1213 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1214 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1215 } else {
1216 mask = (qmask & 0xFFFFFFFF);
1217 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1218 mask = (qmask >> 32);
1219 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1220 }
1221 /* skip the flush */
1222}
1223
1224static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1225 u64 qmask)
1226{
1227 u32 mask;
1228
1229 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1230 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1231 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1232 } else {
1233 mask = (qmask & 0xFFFFFFFF);
1234 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1235 mask = (qmask >> 32);
1236 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1237 }
1238 /* skip the flush */
1239}
1240
Auke Kok9a799d72007-09-15 14:07:45 -07001241static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1242{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001243 struct ixgbe_q_vector *q_vector = data;
1244 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001245 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001246 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001247
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001248 if (!q_vector->txr_count)
1249 return IRQ_HANDLED;
1250
1251 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1252 for (i = 0; i < q_vector->txr_count; i++) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001253 tx_ring = &(adapter->tx_ring[r_idx]);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001254 tx_ring->total_bytes = 0;
1255 tx_ring->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001256 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001257 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001258 }
1259
Alexander Duyck91281fd2009-06-04 16:00:27 +00001260 /* disable interrupts on this vector only */
1261 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1262 napi_schedule(&q_vector->napi);
1263
Auke Kok9a799d72007-09-15 14:07:45 -07001264 return IRQ_HANDLED;
1265}
1266
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001267/**
1268 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1269 * @irq: unused
1270 * @data: pointer to our q_vector struct for this interrupt vector
1271 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001272static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1273{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001274 struct ixgbe_q_vector *q_vector = data;
1275 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001276 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001277 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001278 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07001279
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001280 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001281 for (i = 0; i < q_vector->rxr_count; i++) {
1282 rx_ring = &(adapter->rx_ring[r_idx]);
1283 rx_ring->total_bytes = 0;
1284 rx_ring->total_packets = 0;
1285 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1286 r_idx + 1);
1287 }
1288
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001289 if (!q_vector->rxr_count)
1290 return IRQ_HANDLED;
1291
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001292 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001293 rx_ring = &(adapter->rx_ring[r_idx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001294 /* disable interrupts on this vector only */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001295 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
Ben Hutchings288379f2009-01-19 16:43:59 -08001296 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001297
Auke Kok9a799d72007-09-15 14:07:45 -07001298 return IRQ_HANDLED;
1299}
1300
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001301static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1302{
Alexander Duyck91281fd2009-06-04 16:00:27 +00001303 struct ixgbe_q_vector *q_vector = data;
1304 struct ixgbe_adapter *adapter = q_vector->adapter;
1305 struct ixgbe_ring *ring;
1306 int r_idx;
1307 int i;
1308
1309 if (!q_vector->txr_count && !q_vector->rxr_count)
1310 return IRQ_HANDLED;
1311
1312 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1313 for (i = 0; i < q_vector->txr_count; i++) {
1314 ring = &(adapter->tx_ring[r_idx]);
1315 ring->total_bytes = 0;
1316 ring->total_packets = 0;
1317 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1318 r_idx + 1);
1319 }
1320
1321 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1322 for (i = 0; i < q_vector->rxr_count; i++) {
1323 ring = &(adapter->rx_ring[r_idx]);
1324 ring->total_bytes = 0;
1325 ring->total_packets = 0;
1326 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1327 r_idx + 1);
1328 }
1329
1330 /* disable interrupts on this vector only */
1331 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1332 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001333
1334 return IRQ_HANDLED;
1335}
1336
1337/**
1338 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1339 * @napi: napi struct with our devices info in it
1340 * @budget: amount of work driver is allowed to do this pass, in packets
1341 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001342 * This function is optimized for cleaning one queue only on a single
1343 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001344 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001345static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1346{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001347 struct ixgbe_q_vector *q_vector =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001348 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001349 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001350 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001351 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001352 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001353
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001354 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001355 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001356#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001357 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001358 ixgbe_update_rx_dca(adapter, rx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001359#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001360
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001361 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001362
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001363 /* If all Rx work done, exit the polling mode */
1364 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001365 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001366 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001367 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001368 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001369 ixgbe_irq_enable_queues(adapter,
1370 ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -07001371 }
1372
1373 return work_done;
1374}
1375
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001376/**
Alexander Duyck91281fd2009-06-04 16:00:27 +00001377 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001378 * @napi: napi struct with our devices info in it
1379 * @budget: amount of work driver is allowed to do this pass, in packets
1380 *
1381 * This function will clean more than one rx queue associated with a
1382 * q_vector.
1383 **/
Alexander Duyck91281fd2009-06-04 16:00:27 +00001384static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001385{
1386 struct ixgbe_q_vector *q_vector =
1387 container_of(napi, struct ixgbe_q_vector, napi);
1388 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001389 struct ixgbe_ring *ring = NULL;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001390 int work_done = 0, i;
1391 long r_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001392 bool tx_clean_complete = true;
1393
1394 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1395 for (i = 0; i < q_vector->txr_count; i++) {
1396 ring = &(adapter->tx_ring[r_idx]);
1397#ifdef CONFIG_IXGBE_DCA
1398 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1399 ixgbe_update_tx_dca(adapter, ring);
1400#endif
1401 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1402 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1403 r_idx + 1);
1404 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001405
1406 /* attempt to distribute budget to each queue fairly, but don't allow
1407 * the budget to go below 1 because we'll exit polling */
1408 budget /= (q_vector->rxr_count ?: 1);
1409 budget = max(budget, 1);
1410 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1411 for (i = 0; i < q_vector->rxr_count; i++) {
Alexander Duyck91281fd2009-06-04 16:00:27 +00001412 ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001413#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001414 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck91281fd2009-06-04 16:00:27 +00001415 ixgbe_update_rx_dca(adapter, ring);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001416#endif
Alexander Duyck91281fd2009-06-04 16:00:27 +00001417 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001418 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1419 r_idx + 1);
1420 }
1421
1422 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Alexander Duyck91281fd2009-06-04 16:00:27 +00001423 ring = &(adapter->rx_ring[r_idx]);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001424 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07001425 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001426 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001427 if (adapter->rx_itr_setting & 1)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001428 ixgbe_set_itr_msix(q_vector);
1429 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001430 ixgbe_irq_enable_queues(adapter,
1431 ((u64)1 << q_vector->v_idx));
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001432 return 0;
1433 }
1434
1435 return work_done;
1436}
Alexander Duyck91281fd2009-06-04 16:00:27 +00001437
1438/**
1439 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1440 * @napi: napi struct with our devices info in it
1441 * @budget: amount of work driver is allowed to do this pass, in packets
1442 *
1443 * This function is optimized for cleaning one queue only on a single
1444 * q_vector!!!
1445 **/
1446static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1447{
1448 struct ixgbe_q_vector *q_vector =
1449 container_of(napi, struct ixgbe_q_vector, napi);
1450 struct ixgbe_adapter *adapter = q_vector->adapter;
1451 struct ixgbe_ring *tx_ring = NULL;
1452 int work_done = 0;
1453 long r_idx;
1454
1455 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1456 tx_ring = &(adapter->tx_ring[r_idx]);
1457#ifdef CONFIG_IXGBE_DCA
1458 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1459 ixgbe_update_tx_dca(adapter, tx_ring);
1460#endif
1461
1462 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1463 work_done = budget;
1464
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001465 /* If all Tx work done, exit the polling mode */
Alexander Duyck91281fd2009-06-04 16:00:27 +00001466 if (work_done < budget) {
1467 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001468 if (adapter->tx_itr_setting & 1)
Alexander Duyck91281fd2009-06-04 16:00:27 +00001469 ixgbe_set_itr_msix(q_vector);
1470 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1471 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1472 }
1473
1474 return work_done;
1475}
1476
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001477static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001478 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07001479{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001480 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1481
1482 set_bit(r_idx, q_vector->rxr_idx);
1483 q_vector->rxr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001484}
Auke Kok9a799d72007-09-15 14:07:45 -07001485
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001486static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001487 int t_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001488{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001489 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1490
1491 set_bit(t_idx, q_vector->txr_idx);
1492 q_vector->txr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001493}
Auke Kok9a799d72007-09-15 14:07:45 -07001494
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001495/**
1496 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1497 * @adapter: board private structure to initialize
1498 * @vectors: allotted vector count for descriptor rings
1499 *
1500 * This function maps descriptor rings to the queue-specific vectors
1501 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1502 * one vector per ring/queue, but on a constrained vector budget, we
1503 * group the rings as "efficiently" as possible. You would add new
1504 * mapping configurations in here.
1505 **/
1506static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001507 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001508{
1509 int v_start = 0;
1510 int rxr_idx = 0, txr_idx = 0;
1511 int rxr_remaining = adapter->num_rx_queues;
1512 int txr_remaining = adapter->num_tx_queues;
1513 int i, j;
1514 int rqpv, tqpv;
1515 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001516
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001517 /* No mapping required if MSI-X is disabled. */
1518 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001519 goto out;
1520
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001521 /*
1522 * The ideal configuration...
1523 * We have enough vectors to map one per queue.
1524 */
1525 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1526 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1527 map_vector_to_rxq(adapter, v_start, rxr_idx);
1528
1529 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1530 map_vector_to_txq(adapter, v_start, txr_idx);
1531
1532 goto out;
1533 }
1534
1535 /*
1536 * If we don't have enough vectors for a 1-to-1
1537 * mapping, we'll have to group them so there are
1538 * multiple queues per vector.
1539 */
1540 /* Re-adjusting *qpv takes care of the remainder. */
1541 for (i = v_start; i < vectors; i++) {
1542 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1543 for (j = 0; j < rqpv; j++) {
1544 map_vector_to_rxq(adapter, i, rxr_idx);
1545 rxr_idx++;
1546 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001547 }
Auke Kok9a799d72007-09-15 14:07:45 -07001548 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001549 for (i = v_start; i < vectors; i++) {
1550 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1551 for (j = 0; j < tqpv; j++) {
1552 map_vector_to_txq(adapter, i, txr_idx);
1553 txr_idx++;
1554 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001555 }
Auke Kok9a799d72007-09-15 14:07:45 -07001556 }
1557
Auke Kok9a799d72007-09-15 14:07:45 -07001558out:
Auke Kok9a799d72007-09-15 14:07:45 -07001559 return err;
1560}
1561
1562/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001563 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1564 * @adapter: board private structure
1565 *
1566 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1567 * interrupts from the kernel.
1568 **/
1569static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1570{
1571 struct net_device *netdev = adapter->netdev;
1572 irqreturn_t (*handler)(int, void *);
1573 int i, vector, q_vectors, err;
Robert Olssoncb13fc22008-11-25 16:43:52 -08001574 int ri=0, ti=0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001575
1576 /* Decrement for Other and TCP Timer vectors */
1577 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1578
1579 /* Map the Tx/Rx rings to the vectors we were allotted. */
1580 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1581 if (err)
1582 goto out;
1583
1584#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001585 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1586 &ixgbe_msix_clean_many)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001587 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001588 handler = SET_HANDLER(adapter->q_vector[vector]);
Robert Olssoncb13fc22008-11-25 16:43:52 -08001589
1590 if(handler == &ixgbe_msix_clean_rx) {
1591 sprintf(adapter->name[vector], "%s-%s-%d",
1592 netdev->name, "rx", ri++);
1593 }
1594 else if(handler == &ixgbe_msix_clean_tx) {
1595 sprintf(adapter->name[vector], "%s-%s-%d",
1596 netdev->name, "tx", ti++);
1597 }
1598 else
1599 sprintf(adapter->name[vector], "%s-%s-%d",
1600 netdev->name, "TxRx", vector);
1601
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001602 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001603 handler, 0, adapter->name[vector],
Alexander Duyck7a921c92009-05-06 10:43:28 +00001604 adapter->q_vector[vector]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001605 if (err) {
1606 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001607 "request_irq failed for MSIX interrupt "
1608 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001609 goto free_queue_irqs;
1610 }
1611 }
1612
1613 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1614 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001615 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001616 if (err) {
1617 DPRINTK(PROBE, ERR,
1618 "request_irq for msix_lsc failed: %d\n", err);
1619 goto free_queue_irqs;
1620 }
1621
1622 return 0;
1623
1624free_queue_irqs:
1625 for (i = vector - 1; i >= 0; i--)
1626 free_irq(adapter->msix_entries[--vector].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001627 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001628 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1629 pci_disable_msix(adapter->pdev);
1630 kfree(adapter->msix_entries);
1631 adapter->msix_entries = NULL;
1632out:
1633 return err;
1634}
1635
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001636static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1637{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001638 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001639 u8 current_itr;
1640 u32 new_itr = q_vector->eitr;
1641 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1642 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1643
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001644 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001645 q_vector->tx_itr,
1646 tx_ring->total_packets,
1647 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001648 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001649 q_vector->rx_itr,
1650 rx_ring->total_packets,
1651 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001652
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001653 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001654
1655 switch (current_itr) {
1656 /* counts and packets in update_itr are dependent on these numbers */
1657 case lowest_latency:
1658 new_itr = 100000;
1659 break;
1660 case low_latency:
1661 new_itr = 20000; /* aka hwitr = ~200 */
1662 break;
1663 case bulk_latency:
1664 new_itr = 8000;
1665 break;
1666 default:
1667 break;
1668 }
1669
1670 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001671 /* do an exponential smoothing */
1672 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001673
1674 /* save the algorithm value here, not the smoothed one */
1675 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001676
1677 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001678 }
1679
1680 return;
1681}
1682
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001683/**
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001684 * ixgbe_irq_enable - Enable default interrupt generation settings
1685 * @adapter: board private structure
1686 **/
1687static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1688{
1689 u32 mask;
Nelson, Shannon835462f2009-04-27 22:42:54 +00001690
1691 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
David S. Miller6ab33d52008-11-20 16:44:00 -08001692 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1693 mask |= IXGBE_EIMS_GPI_SDP1;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001694 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001695 mask |= IXGBE_EIMS_ECC;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001696 mask |= IXGBE_EIMS_GPI_SDP1;
1697 mask |= IXGBE_EIMS_GPI_SDP2;
1698 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001699 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1700 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1701 mask |= IXGBE_EIMS_FLOW_DIR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001702
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001703 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Nelson, Shannon835462f2009-04-27 22:42:54 +00001704 ixgbe_irq_enable_queues(adapter, ~0);
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001705 IXGBE_WRITE_FLUSH(&adapter->hw);
1706}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001707
1708/**
1709 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001710 * @irq: interrupt number
1711 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07001712 **/
1713static irqreturn_t ixgbe_intr(int irq, void *data)
1714{
1715 struct net_device *netdev = data;
1716 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1717 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00001718 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07001719 u32 eicr;
1720
Don Skidmore54037502009-02-21 15:42:56 -08001721 /*
1722 * Workaround for silicon errata. Mask the interrupts
1723 * before the read of EICR.
1724 */
1725 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1726
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001727 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1728 * therefore no explict interrupt disable is necessary */
1729 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001730 if (!eicr) {
1731 /* shared interrupt alert!
1732 * make sure interrupts are enabled because the read will
1733 * have disabled interrupts due to EIAM */
1734 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001735 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001736 }
Auke Kok9a799d72007-09-15 14:07:45 -07001737
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001738 if (eicr & IXGBE_EICR_LSC)
1739 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001740
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001741 if (hw->mac.type == ixgbe_mac_82599EB)
1742 ixgbe_check_sfp_event(adapter, eicr);
1743
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001744 ixgbe_check_fan_failure(adapter, eicr);
1745
Alexander Duyck7a921c92009-05-06 10:43:28 +00001746 if (napi_schedule_prep(&(q_vector->napi))) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001747 adapter->tx_ring[0].total_packets = 0;
1748 adapter->tx_ring[0].total_bytes = 0;
1749 adapter->rx_ring[0].total_packets = 0;
1750 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001751 /* would disable interrupts here but EIAM disabled it */
Alexander Duyck7a921c92009-05-06 10:43:28 +00001752 __napi_schedule(&(q_vector->napi));
Auke Kok9a799d72007-09-15 14:07:45 -07001753 }
1754
1755 return IRQ_HANDLED;
1756}
1757
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001758static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1759{
1760 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1761
1762 for (i = 0; i < q_vectors; i++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001763 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001764 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1765 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1766 q_vector->rxr_count = 0;
1767 q_vector->txr_count = 0;
1768 }
1769}
1770
Auke Kok9a799d72007-09-15 14:07:45 -07001771/**
1772 * ixgbe_request_irq - initialize interrupts
1773 * @adapter: board private structure
1774 *
1775 * Attempts to configure interrupts using the best available
1776 * capabilities of the hardware and kernel.
1777 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001778static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001779{
1780 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001781 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001782
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001783 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1784 err = ixgbe_request_msix_irqs(adapter);
1785 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1786 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001787 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001788 } else {
1789 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001790 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001791 }
1792
Auke Kok9a799d72007-09-15 14:07:45 -07001793 if (err)
1794 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1795
Auke Kok9a799d72007-09-15 14:07:45 -07001796 return err;
1797}
1798
1799static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1800{
1801 struct net_device *netdev = adapter->netdev;
1802
1803 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001804 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001805
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001806 q_vectors = adapter->num_msix_vectors;
1807
1808 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001809 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001810
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001811 i--;
1812 for (; i >= 0; i--) {
1813 free_irq(adapter->msix_entries[i].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001814 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001815 }
1816
1817 ixgbe_reset_q_vectors(adapter);
1818 } else {
1819 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001820 }
1821}
1822
1823/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001824 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1825 * @adapter: board private structure
1826 **/
1827static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1828{
Nelson, Shannon835462f2009-04-27 22:42:54 +00001829 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1830 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1831 } else {
1832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1833 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001834 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001835 }
1836 IXGBE_WRITE_FLUSH(&adapter->hw);
1837 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1838 int i;
1839 for (i = 0; i < adapter->num_msix_vectors; i++)
1840 synchronize_irq(adapter->msix_entries[i].vector);
1841 } else {
1842 synchronize_irq(adapter->pdev->irq);
1843 }
1844}
1845
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001846/**
Auke Kok9a799d72007-09-15 14:07:45 -07001847 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1848 *
1849 **/
1850static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1851{
Auke Kok9a799d72007-09-15 14:07:45 -07001852 struct ixgbe_hw *hw = &adapter->hw;
1853
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001854 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001855 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07001856
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001857 ixgbe_set_ivar(adapter, 0, 0, 0);
1858 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001859
1860 map_vector_to_rxq(adapter, 0, 0);
1861 map_vector_to_txq(adapter, 0, 0);
1862
1863 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001864}
1865
1866/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001867 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001868 * @adapter: board private structure
1869 *
1870 * Configure the Tx unit of the MAC after a reset.
1871 **/
1872static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1873{
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08001874 u64 tdba;
Auke Kok9a799d72007-09-15 14:07:45 -07001875 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001876 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001877
1878 /* Setup the HW Tx Head and Tail descriptor pointers */
1879 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001880 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1881 j = ring->reg_idx;
1882 tdba = ring->dma;
1883 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001884 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
Yang Hongyang284901a2009-04-06 19:01:15 -07001885 (tdba & DMA_BIT_MASK(32)));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001886 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1887 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1888 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1889 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1890 adapter->tx_ring[i].head = IXGBE_TDH(j);
1891 adapter->tx_ring[i].tail = IXGBE_TDT(j);
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001892 /*
1893 * Disable Tx Head Writeback RO bit, since this hoses
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001894 * bookkeeping if things aren't delivered in order.
1895 */
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001896 switch (hw->mac.type) {
1897 case ixgbe_mac_82598EB:
1898 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1899 break;
1900 case ixgbe_mac_82599EB:
1901 default:
1902 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1903 break;
1904 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001905 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001906 switch (hw->mac.type) {
1907 case ixgbe_mac_82598EB:
1908 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1909 break;
1910 case ixgbe_mac_82599EB:
1911 default:
1912 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1913 break;
1914 }
Auke Kok9a799d72007-09-15 14:07:45 -07001915 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001916 if (hw->mac.type == ixgbe_mac_82599EB) {
1917 /* We enable 8 traffic classes, DCB only */
1918 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1919 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1920 IXGBE_MTQC_8TC_8TQ));
1921 }
Auke Kok9a799d72007-09-15 14:07:45 -07001922}
1923
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001924#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07001925
Yi Zoua6616b42009-08-06 13:05:23 +00001926static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
1927 struct ixgbe_ring *rx_ring)
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001928{
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001929 u32 srrctl;
Yi Zoua6616b42009-08-06 13:05:23 +00001930 int index;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001931 struct ixgbe_ring_feature *feature = adapter->ring_feature;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001932
Yi Zoua6616b42009-08-06 13:05:23 +00001933 index = rx_ring->reg_idx;
1934 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1935 unsigned long mask;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001936 mask = (unsigned long) feature[RING_F_RSS].mask;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001937 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001938 }
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001939 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1940
1941 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1942 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1943
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001944 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1945 IXGBE_SRRCTL_BSIZEHDR_MASK;
1946
Yi Zou6e455b892009-08-06 13:05:44 +00001947 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001948#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
1949 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1950#else
1951 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1952#endif
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001953 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001954 } else {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001955 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1956 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001957 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001958 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001959
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001960 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1961}
1962
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001963static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
1964{
1965 u32 mrqc = 0;
1966 int mask;
1967
1968 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
1969 return mrqc;
1970
1971 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
1972#ifdef CONFIG_IXGBE_DCB
1973 | IXGBE_FLAG_DCB_ENABLED
1974#endif
1975 );
1976
1977 switch (mask) {
1978 case (IXGBE_FLAG_RSS_ENABLED):
1979 mrqc = IXGBE_MRQC_RSSEN;
1980 break;
1981#ifdef CONFIG_IXGBE_DCB
1982 case (IXGBE_FLAG_DCB_ENABLED):
1983 mrqc = IXGBE_MRQC_RT8TCEN;
1984 break;
1985#endif /* CONFIG_IXGBE_DCB */
1986 default:
1987 break;
1988 }
1989
1990 return mrqc;
1991}
1992
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001993/**
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00001994 * ixgbe_configure_rscctl - enable RSC for the indicated ring
1995 * @adapter: address of board private structure
1996 * @index: index of ring to set
1997 * @rx_buf_len: rx buffer length
1998 **/
1999static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
2000 int rx_buf_len)
2001{
2002 struct ixgbe_ring *rx_ring;
2003 struct ixgbe_hw *hw = &adapter->hw;
2004 int j;
2005 u32 rscctrl;
2006
2007 rx_ring = &adapter->rx_ring[index];
2008 j = rx_ring->reg_idx;
2009 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2010 rscctrl |= IXGBE_RSCCTL_RSCEN;
2011 /*
2012 * we must limit the number of descriptors so that the
2013 * total size of max desc * buf_len is not greater
2014 * than 65535
2015 */
2016 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2017#if (MAX_SKB_FRAGS > 16)
2018 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2019#elif (MAX_SKB_FRAGS > 8)
2020 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2021#elif (MAX_SKB_FRAGS > 4)
2022 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2023#else
2024 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2025#endif
2026 } else {
2027 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2028 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2029 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2030 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2031 else
2032 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2033 }
2034 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2035}
2036
2037/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002038 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07002039 * @adapter: board private structure
2040 *
2041 * Configure the Rx unit of the MAC after a reset.
2042 **/
2043static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2044{
2045 u64 rdba;
2046 struct ixgbe_hw *hw = &adapter->hw;
Yi Zoua6616b42009-08-06 13:05:23 +00002047 struct ixgbe_ring *rx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07002048 struct net_device *netdev = adapter->netdev;
2049 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002050 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002051 u32 rdlen, rxctrl, rxcsum;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002052 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2053 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2054 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Auke Kok9a799d72007-09-15 14:07:45 -07002055 u32 fctrl, hlreg0;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002056 u32 reta = 0, mrqc = 0;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002057 u32 rdrxctl;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002058 int rx_buf_len;
Auke Kok9a799d72007-09-15 14:07:45 -07002059
2060 /* Decide whether to use packet split mode or not */
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002061 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07002062
2063 /* Set the RX buffer length according to the mode */
2064 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002065 rx_buf_len = IXGBE_RX_HDR_SIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002066 if (hw->mac.type == ixgbe_mac_82599EB) {
2067 /* PSRTYPE must be initialized in 82599 */
2068 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2069 IXGBE_PSRTYPE_UDPHDR |
2070 IXGBE_PSRTYPE_IPV4HDR |
Yi Zoudfa12f02009-05-07 10:39:35 +00002071 IXGBE_PSRTYPE_IPV6HDR |
2072 IXGBE_PSRTYPE_L2HDR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002073 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2074 }
Auke Kok9a799d72007-09-15 14:07:45 -07002075 } else {
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002076 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
Alexander Duyckf8212f92009-04-27 22:42:37 +00002077 (netdev->mtu <= ETH_DATA_LEN))
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002078 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07002079 else
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002080 rx_buf_len = ALIGN(max_frame, 1024);
Auke Kok9a799d72007-09-15 14:07:45 -07002081 }
2082
2083 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2084 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002085 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002086 fctrl |= IXGBE_FCTRL_PMCF;
Auke Kok9a799d72007-09-15 14:07:45 -07002087 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2088
2089 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2090 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2091 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2092 else
2093 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
Yi Zou63f39bd2009-05-17 12:34:35 +00002094#ifdef IXGBE_FCOE
Yi Zouf34c5c82009-08-14 12:42:17 +00002095 if (netdev->features & NETIF_F_FCOE_MTU)
Yi Zou63f39bd2009-05-17 12:34:35 +00002096 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2097#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002098 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2099
Auke Kok9a799d72007-09-15 14:07:45 -07002100 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
2101 /* disable receives while setting up the descriptors */
2102 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2103 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2104
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002105 /*
2106 * Setup the HW Rx Head and Tail Descriptor Pointers and
2107 * the Base and Length of the Rx Descriptor Ring
2108 */
Auke Kok9a799d72007-09-15 14:07:45 -07002109 for (i = 0; i < adapter->num_rx_queues; i++) {
Yi Zoua6616b42009-08-06 13:05:23 +00002110 rx_ring = &adapter->rx_ring[i];
2111 rdba = rx_ring->dma;
2112 j = rx_ring->reg_idx;
Yang Hongyang284901a2009-04-06 19:01:15 -07002113 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002114 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2115 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2116 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2117 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
Yi Zoua6616b42009-08-06 13:05:23 +00002118 rx_ring->head = IXGBE_RDH(j);
2119 rx_ring->tail = IXGBE_RDT(j);
2120 rx_ring->rx_buf_len = rx_buf_len;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002121
Yi Zou6e455b892009-08-06 13:05:44 +00002122 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2123 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
Peter P Waskiewicz Jr1b3ff022009-09-14 07:47:27 +00002124 else
2125 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002126
Yi Zou63f39bd2009-05-17 12:34:35 +00002127#ifdef IXGBE_FCOE
Yi Zouf34c5c82009-08-14 12:42:17 +00002128 if (netdev->features & NETIF_F_FCOE_MTU) {
Yi Zou63f39bd2009-05-17 12:34:35 +00002129 struct ixgbe_ring_feature *f;
2130 f = &adapter->ring_feature[RING_F_FCOE];
Yi Zou6e455b892009-08-06 13:05:44 +00002131 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2132 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2133 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2134 rx_ring->rx_buf_len =
2135 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2136 }
Yi Zou63f39bd2009-05-17 12:34:35 +00002137 }
2138
2139#endif /* IXGBE_FCOE */
Yi Zoua6616b42009-08-06 13:05:23 +00002140 ixgbe_configure_srrctl(adapter, rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07002141 }
2142
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002143 if (hw->mac.type == ixgbe_mac_82598EB) {
2144 /*
2145 * For VMDq support of different descriptor types or
2146 * buffer sizes through the use of multiple SRRCTL
2147 * registers, RDRXCTL.MVMEN must be set to 1
2148 *
2149 * also, the manual doesn't mention it clearly but DCA hints
2150 * will only use queue 0's tags unless this bit is set. Side
2151 * effects of setting this bit are only that SRRCTL must be
2152 * fully programmed [0..15]
2153 */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002154 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2155 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2156 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
Alexander Duyck2f90b862008-11-20 20:52:10 -08002157 }
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002158
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002159 /* Program MRQC for the distribution of queues */
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002160 mrqc = ixgbe_setup_mrqc(adapter);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002161
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002162 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07002163 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002164 for (i = 0, j = 0; i < 128; i++, j++) {
2165 if (j == adapter->ring_feature[RING_F_RSS].indices)
2166 j = 0;
2167 /* reta = 4-byte sliding window of
2168 * 0x00..(indices-1)(indices-1)00..etc. */
2169 reta = (reta << 8) | (j * 0x11);
2170 if ((i & 3) == 3)
2171 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07002172 }
2173
2174 /* Fill out hash function seeds */
2175 for (i = 0; i < 10; i++)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002176 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07002177
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002178 if (hw->mac.type == ixgbe_mac_82598EB)
2179 mrqc |= IXGBE_MRQC_RSSEN;
Auke Kok9a799d72007-09-15 14:07:45 -07002180 /* Perform hash on these packet types */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002181 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2182 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2183 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2184 | IXGBE_MRQC_RSS_FIELD_IPV6
2185 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2186 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
Auke Kok9a799d72007-09-15 14:07:45 -07002187 }
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002188 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002189
2190 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2191
2192 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
2193 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2194 /* Disable indicating checksum in descriptor, enables
2195 * RSS hash */
2196 rxcsum |= IXGBE_RXCSUM_PCSD;
2197 }
2198 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2199 /* Enable IPv4 payload checksum for UDP fragments
2200 * if PCSD is not set */
2201 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2202 }
2203
2204 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002205
2206 if (hw->mac.type == ixgbe_mac_82599EB) {
2207 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2208 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002209 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002210 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2211 }
Alexander Duyckf8212f92009-04-27 22:42:37 +00002212
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002213 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002214 /* Enable 82599 HW-RSC */
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002215 for (i = 0; i < adapter->num_rx_queues; i++)
2216 ixgbe_configure_rscctl(adapter, i, rx_buf_len);
2217
Alexander Duyckf8212f92009-04-27 22:42:37 +00002218 /* Disable RSC for ACK packets */
2219 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2220 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2221 }
Auke Kok9a799d72007-09-15 14:07:45 -07002222}
2223
Auke Kok9a799d72007-09-15 14:07:45 -07002224static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2225{
2226 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002227 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002228
2229 /* add VID to filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002230 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
Auke Kok9a799d72007-09-15 14:07:45 -07002231}
2232
2233static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2234{
2235 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002236 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002237
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002238 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2239 ixgbe_irq_disable(adapter);
2240
Auke Kok9a799d72007-09-15 14:07:45 -07002241 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002242
2243 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2244 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002245
2246 /* remove VID from filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002247 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
Auke Kok9a799d72007-09-15 14:07:45 -07002248}
2249
Don Skidmore068c89b2009-01-19 16:54:36 -08002250static void ixgbe_vlan_rx_register(struct net_device *netdev,
2251 struct vlan_group *grp)
2252{
2253 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2254 u32 ctrl;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002255 int i, j;
Don Skidmore068c89b2009-01-19 16:54:36 -08002256
2257 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2258 ixgbe_irq_disable(adapter);
2259 adapter->vlgrp = grp;
2260
2261 /*
2262 * For a DCB driver, always enable VLAN tag stripping so we can
2263 * still receive traffic from a DCB-enabled host even if we're
2264 * not in DCB mode.
2265 */
2266 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002267 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2268 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
Don Skidmore068c89b2009-01-19 16:54:36 -08002269 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2270 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002271 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2272 ctrl |= IXGBE_VLNCTRL_VFE;
2273 /* enable VLAN tag insert/strip */
2274 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2275 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2276 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2277 for (i = 0; i < adapter->num_rx_queues; i++) {
2278 j = adapter->rx_ring[i].reg_idx;
2279 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2280 ctrl |= IXGBE_RXDCTL_VME;
2281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2282 }
Don Skidmore068c89b2009-01-19 16:54:36 -08002283 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002284 ixgbe_vlan_rx_add_vid(netdev, 0);
Don Skidmore068c89b2009-01-19 16:54:36 -08002285
2286 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2287 ixgbe_irq_enable(adapter);
2288}
2289
Auke Kok9a799d72007-09-15 14:07:45 -07002290static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2291{
2292 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2293
2294 if (adapter->vlgrp) {
2295 u16 vid;
2296 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2297 if (!vlan_group_get_device(adapter->vlgrp, vid))
2298 continue;
2299 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2300 }
2301 }
2302}
2303
Christopher Leech2c5645c2008-08-26 04:27:02 -07002304static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2305{
2306 struct dev_mc_list *mc_ptr;
2307 u8 *addr = *mc_addr_ptr;
2308 *vmdq = 0;
2309
2310 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2311 if (mc_ptr->next)
2312 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2313 else
2314 *mc_addr_ptr = NULL;
2315
2316 return addr;
2317}
2318
Auke Kok9a799d72007-09-15 14:07:45 -07002319/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07002320 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07002321 * @netdev: network interface device structure
2322 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07002323 * The set_rx_method entry point is called whenever the unicast/multicast
2324 * address list or the network interface flags are updated. This routine is
2325 * responsible for configuring the hardware for proper unicast, multicast and
2326 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07002327 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07002328static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07002329{
2330 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2331 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07002332 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002333 u8 *addr_list = NULL;
2334 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002335
2336 /* Check for Promiscuous and All Multicast modes */
2337
2338 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07002339 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07002340
2341 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07002342 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07002343 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07002344 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07002345 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002346 if (netdev->flags & IFF_ALLMULTI) {
2347 fctrl |= IXGBE_FCTRL_MPE;
2348 fctrl &= ~IXGBE_FCTRL_UPE;
2349 } else {
2350 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2351 }
Alexander Duyck3d016252008-08-26 18:30:04 -07002352 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002353 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002354 }
2355
2356 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07002357 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07002358
Christopher Leech2c5645c2008-08-26 04:27:02 -07002359 /* reprogram secondary unicast list */
Jiri Pirko31278e72009-06-17 01:12:19 +00002360 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
Auke Kok9a799d72007-09-15 14:07:45 -07002361
Christopher Leech2c5645c2008-08-26 04:27:02 -07002362 /* reprogram multicast list */
2363 addr_count = netdev->mc_count;
2364 if (addr_count)
2365 addr_list = netdev->mc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002366 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2367 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07002368}
2369
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002370static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2371{
2372 int q_idx;
2373 struct ixgbe_q_vector *q_vector;
2374 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2375
2376 /* legacy and MSI only use one vector */
2377 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2378 q_vectors = 1;
2379
2380 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002381 struct napi_struct *napi;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002382 q_vector = adapter->q_vector[q_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002383 napi = &q_vector->napi;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002384 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2385 if (!q_vector->rxr_count || !q_vector->txr_count) {
2386 if (q_vector->txr_count == 1)
2387 napi->poll = &ixgbe_clean_txonly;
2388 else if (q_vector->rxr_count == 1)
2389 napi->poll = &ixgbe_clean_rxonly;
2390 }
2391 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002392
2393 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002394 }
2395}
2396
2397static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2398{
2399 int q_idx;
2400 struct ixgbe_q_vector *q_vector;
2401 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2402
2403 /* legacy and MSI only use one vector */
2404 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2405 q_vectors = 1;
2406
2407 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002408 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002409 napi_disable(&q_vector->napi);
2410 }
2411}
2412
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002413#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002414/*
2415 * ixgbe_configure_dcb - Configure DCB hardware
2416 * @adapter: ixgbe adapter struct
2417 *
2418 * This is called by the driver on open to configure the DCB hardware.
2419 * This is also called by the gennetlink interface when reconfiguring
2420 * the DCB state.
2421 */
2422static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2423{
2424 struct ixgbe_hw *hw = &adapter->hw;
2425 u32 txdctl, vlnctrl;
2426 int i, j;
2427
2428 ixgbe_dcb_check_config(&adapter->dcb_cfg);
2429 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2430 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2431
2432 /* reconfigure the hardware */
2433 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2434
2435 for (i = 0; i < adapter->num_tx_queues; i++) {
2436 j = adapter->tx_ring[i].reg_idx;
2437 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2438 /* PThresh workaround for Tx hang with DFP enabled. */
2439 txdctl |= 32;
2440 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2441 }
2442 /* Enable VLAN tag insert/strip */
2443 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002444 if (hw->mac.type == ixgbe_mac_82598EB) {
2445 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2446 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2447 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2448 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2449 vlnctrl |= IXGBE_VLNCTRL_VFE;
2450 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2451 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2452 for (i = 0; i < adapter->num_rx_queues; i++) {
2453 j = adapter->rx_ring[i].reg_idx;
2454 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2455 vlnctrl |= IXGBE_RXDCTL_VME;
2456 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2457 }
2458 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08002459 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2460}
2461
2462#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002463static void ixgbe_configure(struct ixgbe_adapter *adapter)
2464{
2465 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002466 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002467 int i;
2468
Christopher Leech2c5645c2008-08-26 04:27:02 -07002469 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002470
2471 ixgbe_restore_vlan(adapter);
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002472#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002473 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zoub352e402009-11-06 12:55:38 +00002474 if (hw->mac.type == ixgbe_mac_82598EB)
2475 netif_set_gso_max_size(netdev, 32768);
2476 else
2477 netif_set_gso_max_size(netdev, 65536);
Alexander Duyck2f90b862008-11-20 20:52:10 -08002478 ixgbe_configure_dcb(adapter);
2479 } else {
2480 netif_set_gso_max_size(netdev, 65536);
2481 }
2482#else
2483 netif_set_gso_max_size(netdev, 65536);
2484#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002485
Yi Zoueacd73f2009-05-13 13:11:06 +00002486#ifdef IXGBE_FCOE
2487 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2488 ixgbe_configure_fcoe(adapter);
2489
2490#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002491 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2492 for (i = 0; i < adapter->num_tx_queues; i++)
2493 adapter->tx_ring[i].atr_sample_rate =
2494 adapter->atr_sample_rate;
2495 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2496 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2497 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2498 }
2499
Auke Kok9a799d72007-09-15 14:07:45 -07002500 ixgbe_configure_tx(adapter);
2501 ixgbe_configure_rx(adapter);
2502 for (i = 0; i < adapter->num_rx_queues; i++)
2503 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002504 (adapter->rx_ring[i].count - 1));
Auke Kok9a799d72007-09-15 14:07:45 -07002505}
2506
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002507static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2508{
2509 switch (hw->phy.type) {
2510 case ixgbe_phy_sfp_avago:
2511 case ixgbe_phy_sfp_ftl:
2512 case ixgbe_phy_sfp_intel:
2513 case ixgbe_phy_sfp_unknown:
2514 case ixgbe_phy_tw_tyco:
2515 case ixgbe_phy_tw_unknown:
2516 return true;
2517 default:
2518 return false;
2519 }
2520}
2521
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002522/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002523 * ixgbe_sfp_link_config - set up SFP+ link
2524 * @adapter: pointer to private adapter struct
2525 **/
2526static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2527{
2528 struct ixgbe_hw *hw = &adapter->hw;
2529
2530 if (hw->phy.multispeed_fiber) {
2531 /*
2532 * In multispeed fiber setups, the device may not have
2533 * had a physical connection when the driver loaded.
2534 * If that's the case, the initial link configuration
2535 * couldn't get the MAC into 10G or 1G mode, so we'll
2536 * never have a link status change interrupt fire.
2537 * We need to try and force an autonegotiation
2538 * session, then bring up link.
2539 */
2540 hw->mac.ops.setup_sfp(hw);
2541 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2542 schedule_work(&adapter->multispeed_fiber_task);
2543 } else {
2544 /*
2545 * Direct Attach Cu and non-multispeed fiber modules
2546 * still need to be configured properly prior to
2547 * attempting link.
2548 */
2549 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2550 schedule_work(&adapter->sfp_config_module_task);
2551 }
2552}
2553
2554/**
2555 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002556 * @hw: pointer to private hardware struct
2557 *
2558 * Returns 0 on success, negative on failure
2559 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002560static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002561{
2562 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002563 bool negotiation, link_up = false;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002564 u32 ret = IXGBE_ERR_LINK_SETUP;
2565
2566 if (hw->mac.ops.check_link)
2567 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2568
2569 if (ret)
2570 goto link_cfg_out;
2571
2572 if (hw->mac.ops.get_link_capabilities)
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002573 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002574 if (ret)
2575 goto link_cfg_out;
2576
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002577 if (hw->mac.ops.setup_link)
2578 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002579link_cfg_out:
2580 return ret;
2581}
2582
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002583#define IXGBE_MAX_RX_DESC_POLL 10
2584static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2585 int rxr)
2586{
2587 int j = adapter->rx_ring[rxr].reg_idx;
2588 int k;
2589
2590 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2591 if (IXGBE_READ_REG(&adapter->hw,
2592 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2593 break;
2594 else
2595 msleep(1);
2596 }
2597 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2598 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2599 "not set within the polling period\n", rxr);
2600 }
2601 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2602 (adapter->rx_ring[rxr].count - 1));
2603}
2604
Auke Kok9a799d72007-09-15 14:07:45 -07002605static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2606{
2607 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07002608 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002609 int i, j = 0;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002610 int num_rx_rings = adapter->num_rx_queues;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002611 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002612 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002613 u32 txdctl, rxdctl, mhadd;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002614 u32 dmatxctl;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002615 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07002616
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002617 ixgbe_get_hw_control(adapter);
2618
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002619 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2620 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002621 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2622 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002623 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
Auke Kok9a799d72007-09-15 14:07:45 -07002624 } else {
2625 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002626 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002627 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002628 /* XXX: to interrupt immediately for EICS writes, enable this */
2629 /* gpie |= IXGBE_GPIE_EIMEN; */
2630 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2631 }
2632
2633 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2634 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2635 * specifically only auto mask tx and rx interrupts */
2636 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07002637 }
2638
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002639 /* Enable fan failure interrupt if media type is copper */
2640 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2641 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2642 gpie |= IXGBE_SDP1_GPIEN;
2643 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2644 }
2645
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002646 if (hw->mac.type == ixgbe_mac_82599EB) {
2647 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2648 gpie |= IXGBE_SDP1_GPIEN;
2649 gpie |= IXGBE_SDP2_GPIEN;
2650 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2651 }
2652
Yi Zou63f39bd2009-05-17 12:34:35 +00002653#ifdef IXGBE_FCOE
2654 /* adjust max frame to be able to do baby jumbo for FCoE */
Yi Zouf34c5c82009-08-14 12:42:17 +00002655 if ((netdev->features & NETIF_F_FCOE_MTU) &&
Yi Zou63f39bd2009-05-17 12:34:35 +00002656 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2657 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2658
2659#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07002660 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07002661 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2662 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2663 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2664
2665 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2666 }
2667
2668 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002669 j = adapter->tx_ring[i].reg_idx;
2670 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002671 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2672 txdctl |= (8 << 16);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002673 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2674 }
2675
2676 if (hw->mac.type == ixgbe_mac_82599EB) {
2677 /* DMATXCTL.EN must be set after all Tx queue config is done */
2678 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2679 dmatxctl |= IXGBE_DMATXCTL_TE;
2680 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2681 }
2682 for (i = 0; i < adapter->num_tx_queues; i++) {
2683 j = adapter->tx_ring[i].reg_idx;
2684 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Auke Kok9a799d72007-09-15 14:07:45 -07002685 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002686 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002687 }
2688
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002689 for (i = 0; i < num_rx_rings; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002690 j = adapter->rx_ring[i].reg_idx;
2691 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2692 /* enable PTHRESH=32 descriptors (half the internal cache)
2693 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2694 * this also removes a pesky rx_no_buffer_count increment */
2695 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07002696 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002697 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002698 if (hw->mac.type == ixgbe_mac_82599EB)
2699 ixgbe_rx_desc_queue_enable(adapter, i);
Auke Kok9a799d72007-09-15 14:07:45 -07002700 }
2701 /* enable all receives */
2702 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002703 if (hw->mac.type == ixgbe_mac_82598EB)
2704 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2705 else
2706 rxdctl |= IXGBE_RXCTRL_RXEN;
2707 hw->mac.ops.enable_rx_dma(hw, rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002708
2709 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2710 ixgbe_configure_msix(adapter);
2711 else
2712 ixgbe_configure_msi_and_legacy(adapter);
2713
2714 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002715 ixgbe_napi_enable_all(adapter);
2716
2717 /* clear any pending interrupts, may auto mask */
2718 IXGBE_READ_REG(hw, IXGBE_EICR);
2719
Auke Kok9a799d72007-09-15 14:07:45 -07002720 ixgbe_irq_enable(adapter);
2721
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002722 /*
Don Skidmorebf069c92009-05-07 10:39:54 +00002723 * If this adapter has a fan, check to see if we had a failure
2724 * before we enabled the interrupt.
2725 */
2726 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2727 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2728 if (esdp & IXGBE_ESDP_SDP1)
2729 DPRINTK(DRV, CRIT,
2730 "Fan has stopped, replace the adapter\n");
2731 }
2732
2733 /*
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002734 * For hot-pluggable SFP+ devices, a new SFP+ module may have
Don Skidmore19343de2009-07-02 12:50:31 +00002735 * arrived before interrupts were enabled but after probe. Such
2736 * devices wouldn't have their type identified yet. We need to
2737 * kick off the SFP+ module setup first, then try to bring up link.
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002738 * If we're not hot-pluggable SFP+, we just need to configure link
2739 * and bring it up.
2740 */
Don Skidmore19343de2009-07-02 12:50:31 +00002741 if (hw->phy.type == ixgbe_phy_unknown) {
2742 err = hw->phy.ops.identify(hw);
2743 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore5da43c12009-07-02 12:50:52 +00002744 /*
2745 * Take the device down and schedule the sfp tasklet
2746 * which will unregister_netdev and log it.
2747 */
Don Skidmore19343de2009-07-02 12:50:31 +00002748 ixgbe_down(adapter);
Don Skidmore5da43c12009-07-02 12:50:52 +00002749 schedule_work(&adapter->sfp_config_module_task);
Don Skidmore19343de2009-07-02 12:50:31 +00002750 return err;
2751 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002752 }
2753
2754 if (ixgbe_is_sfp(hw)) {
2755 ixgbe_sfp_link_config(adapter);
2756 } else {
2757 err = ixgbe_non_sfp_link_config(hw);
2758 if (err)
2759 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2760 }
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002761
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002762 for (i = 0; i < adapter->num_tx_queues; i++)
2763 set_bit(__IXGBE_FDIR_INIT_DONE,
2764 &(adapter->tx_ring[i].reinit_state));
2765
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08002766 /* enable transmits */
2767 netif_tx_start_all_queues(netdev);
2768
Auke Kok9a799d72007-09-15 14:07:45 -07002769 /* bring the link up in the watchdog, this could race with our first
2770 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002771 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2772 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07002773 mod_timer(&adapter->watchdog_timer, jiffies);
2774 return 0;
2775}
2776
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002777void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2778{
2779 WARN_ON(in_interrupt());
2780 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2781 msleep(1);
2782 ixgbe_down(adapter);
2783 ixgbe_up(adapter);
2784 clear_bit(__IXGBE_RESETTING, &adapter->state);
2785}
2786
Auke Kok9a799d72007-09-15 14:07:45 -07002787int ixgbe_up(struct ixgbe_adapter *adapter)
2788{
2789 /* hardware has been reset, we need to reload some things */
2790 ixgbe_configure(adapter);
2791
2792 return ixgbe_up_complete(adapter);
2793}
2794
2795void ixgbe_reset(struct ixgbe_adapter *adapter)
2796{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002797 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore8ca783a2009-05-26 20:40:47 -07002798 int err;
2799
2800 err = hw->mac.ops.init_hw(hw);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00002801 switch (err) {
2802 case 0:
2803 case IXGBE_ERR_SFP_NOT_PRESENT:
2804 break;
2805 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
2806 dev_err(&adapter->pdev->dev, "master disable timed out\n");
2807 break;
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00002808 case IXGBE_ERR_EEPROM_VERSION:
2809 /* We are running on a pre-production device, log a warning */
2810 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
2811 "adapter/LOM. Please be aware there may be issues "
2812 "associated with your hardware. If you are "
2813 "experiencing problems please contact your Intel or "
2814 "hardware representative who provided you with this "
2815 "hardware.\n");
2816 break;
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00002817 default:
2818 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
2819 }
Auke Kok9a799d72007-09-15 14:07:45 -07002820
2821 /* reprogram the RAR[0] in case user changed it. */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002822 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07002823}
2824
Auke Kok9a799d72007-09-15 14:07:45 -07002825/**
2826 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2827 * @adapter: board private structure
2828 * @rx_ring: ring to free buffers from
2829 **/
2830static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002831 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002832{
2833 struct pci_dev *pdev = adapter->pdev;
2834 unsigned long size;
2835 unsigned int i;
2836
2837 /* Free all the Rx ring sk_buffs */
2838
2839 for (i = 0; i < rx_ring->count; i++) {
2840 struct ixgbe_rx_buffer *rx_buffer_info;
2841
2842 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2843 if (rx_buffer_info->dma) {
2844 pci_unmap_single(pdev, rx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002845 rx_ring->rx_buf_len,
2846 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002847 rx_buffer_info->dma = 0;
2848 }
2849 if (rx_buffer_info->skb) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002850 struct sk_buff *skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07002851 rx_buffer_info->skb = NULL;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002852 do {
2853 struct sk_buff *this = skb;
2854 skb = skb->prev;
2855 dev_kfree_skb(this);
2856 } while (skb);
Auke Kok9a799d72007-09-15 14:07:45 -07002857 }
2858 if (!rx_buffer_info->page)
2859 continue;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00002860 if (rx_buffer_info->page_dma) {
2861 pci_unmap_page(pdev, rx_buffer_info->page_dma,
2862 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
2863 rx_buffer_info->page_dma = 0;
2864 }
Auke Kok9a799d72007-09-15 14:07:45 -07002865 put_page(rx_buffer_info->page);
2866 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002867 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002868 }
2869
2870 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2871 memset(rx_ring->rx_buffer_info, 0, size);
2872
2873 /* Zero out the descriptor ring */
2874 memset(rx_ring->desc, 0, rx_ring->size);
2875
2876 rx_ring->next_to_clean = 0;
2877 rx_ring->next_to_use = 0;
2878
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002879 if (rx_ring->head)
2880 writel(0, adapter->hw.hw_addr + rx_ring->head);
2881 if (rx_ring->tail)
2882 writel(0, adapter->hw.hw_addr + rx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002883}
2884
2885/**
2886 * ixgbe_clean_tx_ring - Free Tx Buffers
2887 * @adapter: board private structure
2888 * @tx_ring: ring to be cleaned
2889 **/
2890static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002891 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002892{
2893 struct ixgbe_tx_buffer *tx_buffer_info;
2894 unsigned long size;
2895 unsigned int i;
2896
2897 /* Free all the Tx ring sk_buffs */
2898
2899 for (i = 0; i < tx_ring->count; i++) {
2900 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2901 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2902 }
2903
2904 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2905 memset(tx_ring->tx_buffer_info, 0, size);
2906
2907 /* Zero out the descriptor ring */
2908 memset(tx_ring->desc, 0, tx_ring->size);
2909
2910 tx_ring->next_to_use = 0;
2911 tx_ring->next_to_clean = 0;
2912
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002913 if (tx_ring->head)
2914 writel(0, adapter->hw.hw_addr + tx_ring->head);
2915 if (tx_ring->tail)
2916 writel(0, adapter->hw.hw_addr + tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002917}
2918
2919/**
Auke Kok9a799d72007-09-15 14:07:45 -07002920 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2921 * @adapter: board private structure
2922 **/
2923static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2924{
2925 int i;
2926
2927 for (i = 0; i < adapter->num_rx_queues; i++)
2928 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2929}
2930
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002931/**
2932 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2933 * @adapter: board private structure
2934 **/
2935static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2936{
2937 int i;
2938
2939 for (i = 0; i < adapter->num_tx_queues; i++)
2940 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2941}
2942
Auke Kok9a799d72007-09-15 14:07:45 -07002943void ixgbe_down(struct ixgbe_adapter *adapter)
2944{
2945 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002946 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002947 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002948 u32 txdctl;
2949 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002950
2951 /* signal that we are down to the interrupt handler */
2952 set_bit(__IXGBE_DOWN, &adapter->state);
2953
2954 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002955 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2956 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07002957
2958 netif_tx_disable(netdev);
2959
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002960 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07002961 msleep(10);
2962
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002963 netif_tx_stop_all_queues(netdev);
2964
Auke Kok9a799d72007-09-15 14:07:45 -07002965 ixgbe_irq_disable(adapter);
2966
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002967 ixgbe_napi_disable_all(adapter);
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002968
Don Skidmore0a1f87c2009-09-18 09:45:43 +00002969 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
2970 del_timer_sync(&adapter->sfp_timer);
Auke Kok9a799d72007-09-15 14:07:45 -07002971 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002972 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07002973
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002974 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2975 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2976 cancel_work_sync(&adapter->fdir_reinit_task);
2977
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002978 /* disable transmits in the hardware now that interrupts are off */
2979 for (i = 0; i < adapter->num_tx_queues; i++) {
2980 j = adapter->tx_ring[i].reg_idx;
2981 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2982 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2983 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2984 }
PJ Waskiewicz88512532009-03-13 22:15:10 +00002985 /* Disable the Tx DMA engine on 82599 */
2986 if (hw->mac.type == ixgbe_mac_82599EB)
2987 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
2988 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
2989 ~IXGBE_DMATXCTL_TE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002990
Auke Kok9a799d72007-09-15 14:07:45 -07002991 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002992
Paul Larson6f4a0e42008-06-24 17:00:56 -07002993 if (!pci_channel_offline(adapter->pdev))
2994 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002995 ixgbe_clean_all_tx_rings(adapter);
2996 ixgbe_clean_all_rx_rings(adapter);
2997
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002998#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002999 /* since we reset the hardware DCA settings were cleared */
Alexander Duycke35ec122009-05-21 13:07:12 +00003000 ixgbe_setup_dca(adapter);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07003001#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003002}
3003
Auke Kok9a799d72007-09-15 14:07:45 -07003004/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003005 * ixgbe_poll - NAPI Rx polling callback
3006 * @napi: structure for representing this polling device
3007 * @budget: how many packets driver is allowed to clean
3008 *
3009 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07003010 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003011static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07003012{
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003013 struct ixgbe_q_vector *q_vector =
3014 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003015 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003016 int tx_clean_complete, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003017
Jeff Garzik5dd2d332008-10-16 05:09:31 -04003018#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003019 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3020 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
3021 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
3022 }
3023#endif
3024
Alexander Duyckfe49f042009-06-04 16:00:09 +00003025 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
Herbert Xu78b6f4c2009-01-18 21:49:45 -08003026 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07003027
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003028 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003029 work_done = budget;
3030
David S. Miller53e52c72008-01-07 21:06:12 -08003031 /* If budget not fully consumed, exit the polling mode */
3032 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08003033 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003034 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08003035 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003036 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00003037 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07003038 }
Auke Kok9a799d72007-09-15 14:07:45 -07003039 return work_done;
3040}
3041
3042/**
3043 * ixgbe_tx_timeout - Respond to a Tx Hang
3044 * @netdev: network interface device structure
3045 **/
3046static void ixgbe_tx_timeout(struct net_device *netdev)
3047{
3048 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3049
3050 /* Do the reset outside of interrupt context */
3051 schedule_work(&adapter->reset_task);
3052}
3053
3054static void ixgbe_reset_task(struct work_struct *work)
3055{
3056 struct ixgbe_adapter *adapter;
3057 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3058
Alexander Duyck2f90b862008-11-20 20:52:10 -08003059 /* If we're already down or resetting, just bail */
3060 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3061 test_bit(__IXGBE_RESETTING, &adapter->state))
3062 return;
3063
Auke Kok9a799d72007-09-15 14:07:45 -07003064 adapter->tx_timeout_count++;
3065
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003066 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003067}
3068
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003069#ifdef CONFIG_IXGBE_DCB
3070static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003071{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003072 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003073 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003074
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003075 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3076 return ret;
3077
3078 f->mask = 0x7 << 3;
3079 adapter->num_rx_queues = f->indices;
3080 adapter->num_tx_queues = f->indices;
3081 ret = true;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003082
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003083 return ret;
3084}
3085#endif
3086
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003087/**
3088 * ixgbe_set_rss_queues: Allocate queues for RSS
3089 * @adapter: board private structure to initialize
3090 *
3091 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3092 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3093 *
3094 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003095static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3096{
3097 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003098 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003099
3100 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003101 f->mask = 0xF;
3102 adapter->num_rx_queues = f->indices;
3103 adapter->num_tx_queues = f->indices;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003104 ret = true;
3105 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003106 ret = false;
3107 }
3108
3109 return ret;
3110}
3111
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003112/**
3113 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3114 * @adapter: board private structure to initialize
3115 *
3116 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3117 * to the original CPU that initiated the Tx session. This runs in addition
3118 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3119 * Rx load across CPUs using RSS.
3120 *
3121 **/
3122static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3123{
3124 bool ret = false;
3125 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3126
3127 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3128 f_fdir->mask = 0;
3129
3130 /* Flow Director must have RSS enabled */
3131 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3132 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3133 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3134 adapter->num_tx_queues = f_fdir->indices;
3135 adapter->num_rx_queues = f_fdir->indices;
3136 ret = true;
3137 } else {
3138 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3139 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3140 }
3141 return ret;
3142}
3143
Yi Zou0331a832009-05-17 12:33:52 +00003144#ifdef IXGBE_FCOE
3145/**
3146 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
3147 * @adapter: board private structure to initialize
3148 *
3149 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3150 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3151 * rx queues out of the max number of rx queues, instead, it is used as the
3152 * index of the first rx queue used by FCoE.
3153 *
3154 **/
3155static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3156{
3157 bool ret = false;
3158 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3159
3160 f->indices = min((int)num_online_cpus(), f->indices);
3161 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003162 adapter->num_rx_queues = 1;
3163 adapter->num_tx_queues = 1;
Yi Zou0331a832009-05-17 12:33:52 +00003164#ifdef CONFIG_IXGBE_DCB
3165 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003166 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
Yi Zou0331a832009-05-17 12:33:52 +00003167 ixgbe_set_dcb_queues(adapter);
3168 }
3169#endif
3170 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003171 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
Yi Zou8faa2a72009-07-09 02:29:50 +00003172 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3173 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3174 ixgbe_set_fdir_queues(adapter);
3175 else
3176 ixgbe_set_rss_queues(adapter);
Yi Zou0331a832009-05-17 12:33:52 +00003177 }
3178 /* adding FCoE rx rings to the end */
3179 f->mask = adapter->num_rx_queues;
3180 adapter->num_rx_queues += f->indices;
Yi Zou8de8b2e2009-09-03 14:55:50 +00003181 adapter->num_tx_queues += f->indices;
Yi Zou0331a832009-05-17 12:33:52 +00003182
3183 ret = true;
3184 }
3185
3186 return ret;
3187}
3188
3189#endif /* IXGBE_FCOE */
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003190/*
3191 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3192 * @adapter: board private structure to initialize
3193 *
3194 * This is the top level queue allocation routine. The order here is very
3195 * important, starting with the "most" number of features turned on at once,
3196 * and ending with the smallest set of features. This way large combinations
3197 * can be allocated if they're turned on, and smaller combinations are the
3198 * fallthrough conditions.
3199 *
3200 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003201static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3202{
Yi Zou0331a832009-05-17 12:33:52 +00003203#ifdef IXGBE_FCOE
3204 if (ixgbe_set_fcoe_queues(adapter))
3205 goto done;
3206
3207#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003208#ifdef CONFIG_IXGBE_DCB
3209 if (ixgbe_set_dcb_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07003210 goto done;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003211
3212#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003213 if (ixgbe_set_fdir_queues(adapter))
3214 goto done;
3215
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003216 if (ixgbe_set_rss_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07003217 goto done;
3218
3219 /* fallback to base case */
3220 adapter->num_rx_queues = 1;
3221 adapter->num_tx_queues = 1;
3222
3223done:
3224 /* Notify the stack of the (possibly) reduced Tx Queue count. */
3225 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003226}
3227
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003228static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003229 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003230{
3231 int err, vector_threshold;
3232
3233 /* We'll want at least 3 (vector_threshold):
3234 * 1) TxQ[0] Cleanup
3235 * 2) RxQ[0] Cleanup
3236 * 3) Other (Link Status Change, etc.)
3237 * 4) TCP Timer (optional)
3238 */
3239 vector_threshold = MIN_MSIX_COUNT;
3240
3241 /* The more we get, the more we will assign to Tx/Rx Cleanup
3242 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
3243 * Right now, we simply care about how many we'll get; we'll
3244 * set them up later while requesting irq's.
3245 */
3246 while (vectors >= vector_threshold) {
3247 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003248 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003249 if (!err) /* Success in acquiring all requested vectors. */
3250 break;
3251 else if (err < 0)
3252 vectors = 0; /* Nasty failure, quit now */
3253 else /* err == number of vectors we should try again with */
3254 vectors = err;
3255 }
3256
3257 if (vectors < vector_threshold) {
3258 /* Can't allocate enough MSI-X interrupts? Oh well.
3259 * This just means we'll go with either a single MSI
3260 * vector or fall back to legacy interrupts.
3261 */
3262 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
3263 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3264 kfree(adapter->msix_entries);
3265 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003266 } else {
3267 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
Peter P Waskiewicz Jreb7f1392009-02-01 01:18:58 -08003268 /*
3269 * Adjust for only the vectors we'll use, which is minimum
3270 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
3271 * vectors we were allocated.
3272 */
3273 adapter->num_msix_vectors = min(vectors,
3274 adapter->max_msix_q_vectors + NON_Q_VECTORS);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003275 }
3276}
3277
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003278/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003279 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003280 * @adapter: board private structure to initialize
3281 *
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003282 * Cache the descriptor ring offsets for RSS to the assigned rings.
3283 *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003284 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003285static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003286{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003287 int i;
3288 bool ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003289
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003290 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3291 for (i = 0; i < adapter->num_rx_queues; i++)
3292 adapter->rx_ring[i].reg_idx = i;
3293 for (i = 0; i < adapter->num_tx_queues; i++)
3294 adapter->tx_ring[i].reg_idx = i;
3295 ret = true;
3296 } else {
3297 ret = false;
3298 }
3299
3300 return ret;
3301}
3302
3303#ifdef CONFIG_IXGBE_DCB
3304/**
3305 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
3306 * @adapter: board private structure to initialize
3307 *
3308 * Cache the descriptor ring offsets for DCB to the assigned rings.
3309 *
3310 **/
3311static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3312{
3313 int i;
3314 bool ret = false;
3315 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
3316
3317 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3318 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08003319 /* the number of queues is assumed to be symmetric */
3320 for (i = 0; i < dcb_i; i++) {
3321 adapter->rx_ring[i].reg_idx = i << 3;
3322 adapter->tx_ring[i].reg_idx = i << 2;
3323 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003324 ret = true;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003325 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00003326 if (dcb_i == 8) {
3327 /*
3328 * Tx TC0 starts at: descriptor queue 0
3329 * Tx TC1 starts at: descriptor queue 32
3330 * Tx TC2 starts at: descriptor queue 64
3331 * Tx TC3 starts at: descriptor queue 80
3332 * Tx TC4 starts at: descriptor queue 96
3333 * Tx TC5 starts at: descriptor queue 104
3334 * Tx TC6 starts at: descriptor queue 112
3335 * Tx TC7 starts at: descriptor queue 120
3336 *
3337 * Rx TC0-TC7 are offset by 16 queues each
3338 */
3339 for (i = 0; i < 3; i++) {
3340 adapter->tx_ring[i].reg_idx = i << 5;
3341 adapter->rx_ring[i].reg_idx = i << 4;
3342 }
3343 for ( ; i < 5; i++) {
3344 adapter->tx_ring[i].reg_idx =
3345 ((i + 2) << 4);
3346 adapter->rx_ring[i].reg_idx = i << 4;
3347 }
3348 for ( ; i < dcb_i; i++) {
3349 adapter->tx_ring[i].reg_idx =
3350 ((i + 8) << 3);
3351 adapter->rx_ring[i].reg_idx = i << 4;
3352 }
3353
3354 ret = true;
3355 } else if (dcb_i == 4) {
3356 /*
3357 * Tx TC0 starts at: descriptor queue 0
3358 * Tx TC1 starts at: descriptor queue 64
3359 * Tx TC2 starts at: descriptor queue 96
3360 * Tx TC3 starts at: descriptor queue 112
3361 *
3362 * Rx TC0-TC3 are offset by 32 queues each
3363 */
3364 adapter->tx_ring[0].reg_idx = 0;
3365 adapter->tx_ring[1].reg_idx = 64;
3366 adapter->tx_ring[2].reg_idx = 96;
3367 adapter->tx_ring[3].reg_idx = 112;
3368 for (i = 0 ; i < dcb_i; i++)
3369 adapter->rx_ring[i].reg_idx = i << 5;
3370
3371 ret = true;
3372 } else {
3373 ret = false;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003374 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003375 } else {
3376 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003377 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003378 } else {
3379 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003380 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003381
3382 return ret;
3383}
3384#endif
3385
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003386/**
3387 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3388 * @adapter: board private structure to initialize
3389 *
3390 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3391 *
3392 **/
3393static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3394{
3395 int i;
3396 bool ret = false;
3397
3398 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3399 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3400 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3401 for (i = 0; i < adapter->num_rx_queues; i++)
3402 adapter->rx_ring[i].reg_idx = i;
3403 for (i = 0; i < adapter->num_tx_queues; i++)
3404 adapter->tx_ring[i].reg_idx = i;
3405 ret = true;
3406 }
3407
3408 return ret;
3409}
3410
Yi Zou0331a832009-05-17 12:33:52 +00003411#ifdef IXGBE_FCOE
3412/**
3413 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
3414 * @adapter: board private structure to initialize
3415 *
3416 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
3417 *
3418 */
3419static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3420{
Yi Zou8de8b2e2009-09-03 14:55:50 +00003421 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
Yi Zou0331a832009-05-17 12:33:52 +00003422 bool ret = false;
3423 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3424
3425 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3426#ifdef CONFIG_IXGBE_DCB
3427 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003428 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
3429
Yi Zou0331a832009-05-17 12:33:52 +00003430 ixgbe_cache_ring_dcb(adapter);
Yi Zou8de8b2e2009-09-03 14:55:50 +00003431 /* find out queues in TC for FCoE */
3432 fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
3433 fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
3434 /*
3435 * In 82599, the number of Tx queues for each traffic
3436 * class for both 8-TC and 4-TC modes are:
3437 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
3438 * 8 TCs: 32 32 16 16 8 8 8 8
3439 * 4 TCs: 64 64 32 32
3440 * We have max 8 queues for FCoE, where 8 the is
3441 * FCoE redirection table size. If TC for FCoE is
3442 * less than or equal to TC3, we have enough queues
3443 * to add max of 8 queues for FCoE, so we start FCoE
3444 * tx descriptor from the next one, i.e., reg_idx + 1.
3445 * If TC for FCoE is above TC3, implying 8 TC mode,
3446 * and we need 8 for FCoE, we have to take all queues
3447 * in that traffic class for FCoE.
3448 */
3449 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
3450 fcoe_tx_i--;
Yi Zou0331a832009-05-17 12:33:52 +00003451 }
3452#endif /* CONFIG_IXGBE_DCB */
3453 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8faa2a72009-07-09 02:29:50 +00003454 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3455 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3456 ixgbe_cache_ring_fdir(adapter);
3457 else
3458 ixgbe_cache_ring_rss(adapter);
3459
Yi Zou8de8b2e2009-09-03 14:55:50 +00003460 fcoe_rx_i = f->mask;
3461 fcoe_tx_i = f->mask;
Yi Zou0331a832009-05-17 12:33:52 +00003462 }
Yi Zou8de8b2e2009-09-03 14:55:50 +00003463 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
3464 adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
3465 adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
3466 }
Yi Zou0331a832009-05-17 12:33:52 +00003467 ret = true;
3468 }
3469 return ret;
3470}
3471
3472#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003473/**
3474 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3475 * @adapter: board private structure to initialize
3476 *
3477 * Once we know the feature-set enabled for the device, we'll cache
3478 * the register offset the descriptor ring is assigned to.
3479 *
3480 * Note, the order the various feature calls is important. It must start with
3481 * the "most" features enabled at the same time, then trickle down to the
3482 * least amount of features turned on at once.
3483 **/
3484static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3485{
3486 /* start with default case */
3487 adapter->rx_ring[0].reg_idx = 0;
3488 adapter->tx_ring[0].reg_idx = 0;
3489
Yi Zou0331a832009-05-17 12:33:52 +00003490#ifdef IXGBE_FCOE
3491 if (ixgbe_cache_ring_fcoe(adapter))
3492 return;
3493
3494#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003495#ifdef CONFIG_IXGBE_DCB
3496 if (ixgbe_cache_ring_dcb(adapter))
3497 return;
3498
3499#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003500 if (ixgbe_cache_ring_fdir(adapter))
3501 return;
3502
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003503 if (ixgbe_cache_ring_rss(adapter))
3504 return;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003505}
3506
Auke Kok9a799d72007-09-15 14:07:45 -07003507/**
3508 * ixgbe_alloc_queues - Allocate memory for all rings
3509 * @adapter: board private structure to initialize
3510 *
3511 * We allocate one ring per queue at run-time since we don't know the
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003512 * number of queues at compile-time. The polling_netdev array is
3513 * intended for Multiqueue, but should work fine with a single queue.
Auke Kok9a799d72007-09-15 14:07:45 -07003514 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003515static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003516{
3517 int i;
3518
3519 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003520 sizeof(struct ixgbe_ring), GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07003521 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003522 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07003523
3524 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003525 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003526 if (!adapter->rx_ring)
3527 goto err_rx_ring_allocation;
3528
3529 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003530 adapter->tx_ring[i].count = adapter->tx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003531 adapter->tx_ring[i].queue_index = i;
3532 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003533
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003534 for (i = 0; i < adapter->num_rx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003535 adapter->rx_ring[i].count = adapter->rx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003536 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07003537 }
3538
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003539 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003540
3541 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003542
3543err_rx_ring_allocation:
3544 kfree(adapter->tx_ring);
3545err_tx_ring_allocation:
3546 return -ENOMEM;
3547}
3548
3549/**
3550 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3551 * @adapter: board private structure to initialize
3552 *
3553 * Attempt to configure the interrupts using the best available
3554 * capabilities of the hardware and the kernel.
3555 **/
Al Virofeea6a52008-11-27 15:34:07 -08003556static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003557{
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003558 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003559 int err = 0;
3560 int vector, v_budget;
3561
3562 /*
3563 * It's easy to be greedy for MSI-X vectors, but it really
3564 * doesn't do us much good if we have a lot more vectors
3565 * than CPU's. So let's be conservative and only ask for
3566 * (roughly) twice the number of vectors as there are CPU's.
3567 */
3568 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003569 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003570
3571 /*
3572 * At the same time, hardware can only support a maximum of
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003573 * hw.mac->max_msix_vectors vectors. With features
3574 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3575 * descriptor queues supported by our device. Thus, we cap it off in
3576 * those rare cases where the cpu count also exceeds our vector limit.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003577 */
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003578 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003579
3580 /* A failure in MSI-X entry allocation isn't fatal, but it does
3581 * mean we disable MSI-X capabilities of the adapter. */
3582 adapter->msix_entries = kcalloc(v_budget,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003583 sizeof(struct msix_entry), GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003584 if (adapter->msix_entries) {
3585 for (vector = 0; vector < v_budget; vector++)
3586 adapter->msix_entries[vector].entry = vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003587
Alexander Duyck7a921c92009-05-06 10:43:28 +00003588 ixgbe_acquire_msix_vectors(adapter, v_budget);
3589
3590 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3591 goto out;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003592 }
3593
Alexander Duyck7a921c92009-05-06 10:43:28 +00003594 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3595 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003596 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3597 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3598 adapter->atr_sample_rate = 0;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003599 ixgbe_set_num_queues(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003600
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003601 err = pci_enable_msi(adapter->pdev);
3602 if (!err) {
3603 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
3604 } else {
3605 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003606 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003607 /* reset err */
3608 err = 0;
3609 }
3610
3611out:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003612 return err;
3613}
3614
Alexander Duyck7a921c92009-05-06 10:43:28 +00003615/**
3616 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
3617 * @adapter: board private structure to initialize
3618 *
3619 * We allocate one q_vector per queue interrupt. If allocation fails we
3620 * return -ENOMEM.
3621 **/
3622static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3623{
3624 int q_idx, num_q_vectors;
3625 struct ixgbe_q_vector *q_vector;
3626 int napi_vectors;
3627 int (*poll)(struct napi_struct *, int);
3628
3629 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3630 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3631 napi_vectors = adapter->num_rx_queues;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003632 poll = &ixgbe_clean_rxtx_many;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003633 } else {
3634 num_q_vectors = 1;
3635 napi_vectors = 1;
3636 poll = &ixgbe_poll;
3637 }
3638
3639 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3640 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
3641 if (!q_vector)
3642 goto err_out;
3643 q_vector->adapter = adapter;
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003644 if (q_vector->txr_count && !q_vector->rxr_count)
3645 q_vector->eitr = adapter->tx_eitr_param;
3646 else
3647 q_vector->eitr = adapter->rx_eitr_param;
Alexander Duyckfe49f042009-06-04 16:00:09 +00003648 q_vector->v_idx = q_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003649 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003650 adapter->q_vector[q_idx] = q_vector;
3651 }
3652
3653 return 0;
3654
3655err_out:
3656 while (q_idx) {
3657 q_idx--;
3658 q_vector = adapter->q_vector[q_idx];
3659 netif_napi_del(&q_vector->napi);
3660 kfree(q_vector);
3661 adapter->q_vector[q_idx] = NULL;
3662 }
3663 return -ENOMEM;
3664}
3665
3666/**
3667 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
3668 * @adapter: board private structure to initialize
3669 *
3670 * This function frees the memory allocated to the q_vectors. In addition if
3671 * NAPI is enabled it will delete any references to the NAPI struct prior
3672 * to freeing the q_vector.
3673 **/
3674static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3675{
3676 int q_idx, num_q_vectors;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003677
Alexander Duyck91281fd2009-06-04 16:00:27 +00003678 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
Alexander Duyck7a921c92009-05-06 10:43:28 +00003679 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003680 else
Alexander Duyck7a921c92009-05-06 10:43:28 +00003681 num_q_vectors = 1;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003682
3683 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3684 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00003685 adapter->q_vector[q_idx] = NULL;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003686 netif_napi_del(&q_vector->napi);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003687 kfree(q_vector);
3688 }
3689}
3690
Don Skidmore7b25cdb2009-08-25 04:47:32 +00003691static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003692{
3693 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3694 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3695 pci_disable_msix(adapter->pdev);
3696 kfree(adapter->msix_entries);
3697 adapter->msix_entries = NULL;
3698 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
3699 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
3700 pci_disable_msi(adapter->pdev);
3701 }
3702 return;
3703}
3704
3705/**
3706 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
3707 * @adapter: board private structure to initialize
3708 *
3709 * We determine which interrupt scheme to use based on...
3710 * - Kernel support (MSI, MSI-X)
3711 * - which can be user-defined (via MODULE_PARAM)
3712 * - Hardware queue count (num_*_queues)
3713 * - defined by miscellaneous hardware support/features (RSS, etc.)
3714 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003715int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003716{
3717 int err;
3718
3719 /* Number of supported queues */
3720 ixgbe_set_num_queues(adapter);
3721
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003722 err = ixgbe_set_interrupt_capability(adapter);
3723 if (err) {
3724 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
3725 goto err_set_interrupt;
3726 }
3727
Alexander Duyck7a921c92009-05-06 10:43:28 +00003728 err = ixgbe_alloc_q_vectors(adapter);
3729 if (err) {
3730 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
3731 "vectors\n");
3732 goto err_alloc_q_vectors;
3733 }
3734
3735 err = ixgbe_alloc_queues(adapter);
3736 if (err) {
3737 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
3738 goto err_alloc_queues;
3739 }
3740
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003741 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003742 "Tx Queue count = %u\n",
3743 (adapter->num_rx_queues > 1) ? "Enabled" :
3744 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003745
3746 set_bit(__IXGBE_DOWN, &adapter->state);
3747
3748 return 0;
3749
Alexander Duyck7a921c92009-05-06 10:43:28 +00003750err_alloc_queues:
3751 ixgbe_free_q_vectors(adapter);
3752err_alloc_q_vectors:
3753 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003754err_set_interrupt:
Alexander Duyck7a921c92009-05-06 10:43:28 +00003755 return err;
3756}
3757
3758/**
3759 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
3760 * @adapter: board private structure to clear interrupt scheme on
3761 *
3762 * We go through and clear interrupt specific resources and reset the structure
3763 * to pre-load conditions
3764 **/
3765void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3766{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003767 kfree(adapter->tx_ring);
3768 kfree(adapter->rx_ring);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003769 adapter->tx_ring = NULL;
3770 adapter->rx_ring = NULL;
3771
3772 ixgbe_free_q_vectors(adapter);
3773 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003774}
3775
3776/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08003777 * ixgbe_sfp_timer - worker thread to find a missing module
3778 * @data: pointer to our adapter struct
3779 **/
3780static void ixgbe_sfp_timer(unsigned long data)
3781{
3782 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3783
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003784 /*
3785 * Do the sfp_timer outside of interrupt context due to the
Donald Skidmorec4900be2008-11-20 21:11:42 -08003786 * delays that sfp+ detection requires
3787 */
3788 schedule_work(&adapter->sfp_task);
3789}
3790
3791/**
3792 * ixgbe_sfp_task - worker thread to find a missing module
3793 * @work: pointer to work_struct containing our data
3794 **/
3795static void ixgbe_sfp_task(struct work_struct *work)
3796{
3797 struct ixgbe_adapter *adapter = container_of(work,
3798 struct ixgbe_adapter,
3799 sfp_task);
3800 struct ixgbe_hw *hw = &adapter->hw;
3801
3802 if ((hw->phy.type == ixgbe_phy_nl) &&
3803 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3804 s32 ret = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00003805 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
Donald Skidmorec4900be2008-11-20 21:11:42 -08003806 goto reschedule;
3807 ret = hw->phy.ops.reset(hw);
3808 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00003809 dev_err(&adapter->pdev->dev, "failed to initialize "
3810 "because an unsupported SFP+ module type "
3811 "was detected.\n"
3812 "Reload the driver after installing a "
3813 "supported module.\n");
Donald Skidmorec4900be2008-11-20 21:11:42 -08003814 unregister_netdev(adapter->netdev);
3815 } else {
3816 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
3817 hw->phy.sfp_type);
3818 }
3819 /* don't need this routine any more */
3820 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3821 }
3822 return;
3823reschedule:
3824 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
3825 mod_timer(&adapter->sfp_timer,
3826 round_jiffies(jiffies + (2 * HZ)));
3827}
3828
3829/**
Auke Kok9a799d72007-09-15 14:07:45 -07003830 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
3831 * @adapter: board private structure to initialize
3832 *
3833 * ixgbe_sw_init initializes the Adapter private data structure.
3834 * Fields are initialized based on PCI device information and
3835 * OS network device settings (MTU size).
3836 **/
3837static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3838{
3839 struct ixgbe_hw *hw = &adapter->hw;
3840 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003841 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003842#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003843 int j;
3844 struct tc_configuration *tc;
3845#endif
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003846
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003847 /* PCI config space info */
3848
3849 hw->vendor_id = pdev->vendor;
3850 hw->device_id = pdev->device;
3851 hw->revision_id = pdev->revision;
3852 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3853 hw->subsystem_device_id = pdev->subsystem_device;
3854
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003855 /* Set capability flags */
3856 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
3857 adapter->ring_feature[RING_F_RSS].indices = rss;
3858 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003859 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
Don Skidmorebf069c92009-05-07 10:39:54 +00003860 if (hw->mac.type == ixgbe_mac_82598EB) {
3861 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3862 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003863 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
Don Skidmorebf069c92009-05-07 10:39:54 +00003864 } else if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003865 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00003866 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3867 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003868 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3869 adapter->ring_feature[RING_F_FDIR].indices =
3870 IXGBE_MAX_FDIR_INDICES;
3871 adapter->atr_sample_rate = 20;
3872 adapter->fdir_pballoc = 0;
Yi Zoueacd73f2009-05-13 13:11:06 +00003873#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00003874 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
3875 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
3876 adapter->ring_feature[RING_F_FCOE].indices = 0;
Yi Zou6ee16522009-08-31 12:34:28 +00003877 /* Default traffic class to use for FCoE */
3878 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
Yi Zoueacd73f2009-05-13 13:11:06 +00003879#endif /* IXGBE_FCOE */
Alexander Duyckf8212f92009-04-27 22:42:37 +00003880 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08003881
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003882#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003883 /* Configure DCB traffic classes */
3884 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
3885 tc = &adapter->dcb_cfg.tc_config[j];
3886 tc->path[DCB_TX_CONFIG].bwg_id = 0;
3887 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
3888 tc->path[DCB_RX_CONFIG].bwg_id = 0;
3889 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
3890 tc->dcb_pfc = pfc_disabled;
3891 }
3892 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
3893 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
3894 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00003895 adapter->dcb_cfg.pfc_mode_enable = false;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003896 adapter->dcb_cfg.round_robin_enable = false;
3897 adapter->dcb_set_bitmap = 0x00;
3898 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
3899 adapter->ring_feature[RING_F_DCB].indices);
3900
3901#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003902
3903 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00003904 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00003905 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00003906#ifdef CONFIG_DCB
3907 adapter->last_lfc_mode = hw->fc.current_mode;
3908#endif
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07003909 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3910 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3911 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3912 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00003913 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07003914
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003915 /* enable itr by default in dynamic mode */
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003916 adapter->rx_itr_setting = 1;
3917 adapter->rx_eitr_param = 20000;
3918 adapter->tx_itr_setting = 1;
3919 adapter->tx_eitr_param = 10000;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003920
3921 /* set defaults for eitr in MegaBytes */
3922 adapter->eitr_low = 10;
3923 adapter->eitr_high = 20;
3924
3925 /* set default ring sizes */
3926 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
3927 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
3928
Auke Kok9a799d72007-09-15 14:07:45 -07003929 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003930 if (ixgbe_init_eeprom_params_generic(hw)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003931 dev_err(&pdev->dev, "EEPROM initialization failed\n");
3932 return -EIO;
3933 }
3934
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003935 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07003936 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3937
Auke Kok9a799d72007-09-15 14:07:45 -07003938 set_bit(__IXGBE_DOWN, &adapter->state);
3939
3940 return 0;
3941}
3942
3943/**
3944 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3945 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003946 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07003947 *
3948 * Return 0 on success, negative on failure
3949 **/
3950int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003951 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003952{
3953 struct pci_dev *pdev = adapter->pdev;
3954 int size;
3955
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003956 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3957 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003958 if (!tx_ring->tx_buffer_info)
3959 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003960 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07003961
3962 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08003963 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003964 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07003965
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003966 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
3967 &tx_ring->dma);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003968 if (!tx_ring->desc)
3969 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07003970
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003971 tx_ring->next_to_use = 0;
3972 tx_ring->next_to_clean = 0;
3973 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07003974 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003975
3976err:
3977 vfree(tx_ring->tx_buffer_info);
3978 tx_ring->tx_buffer_info = NULL;
3979 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
3980 "descriptor ring\n");
3981 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07003982}
3983
3984/**
Alexander Duyck69888672008-09-11 20:05:39 -07003985 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
3986 * @adapter: board private structure
3987 *
3988 * If this function returns with an error, then it's possible one or
3989 * more of the rings is populated (while the rest are not). It is the
3990 * callers duty to clean those orphaned rings.
3991 *
3992 * Return 0 on success, negative on failure
3993 **/
3994static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
3995{
3996 int i, err = 0;
3997
3998 for (i = 0; i < adapter->num_tx_queues; i++) {
3999 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
4000 if (!err)
4001 continue;
4002 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
4003 break;
4004 }
4005
4006 return err;
4007}
4008
4009/**
Auke Kok9a799d72007-09-15 14:07:45 -07004010 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
4011 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004012 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07004013 *
4014 * Returns 0 on success, negative on failure
4015 **/
4016int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004017 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004018{
4019 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004020 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07004021
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004022 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4023 rx_ring->rx_buffer_info = vmalloc(size);
4024 if (!rx_ring->rx_buffer_info) {
Auke Kok9a799d72007-09-15 14:07:45 -07004025 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004026 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004027 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07004028 }
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004029 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07004030
Auke Kok9a799d72007-09-15 14:07:45 -07004031 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004032 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4033 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004034
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004035 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07004036
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004037 if (!rx_ring->desc) {
Auke Kok9a799d72007-09-15 14:07:45 -07004038 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004039 "Memory allocation failed for the rx desc ring\n");
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004040 vfree(rx_ring->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004041 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07004042 }
4043
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004044 rx_ring->next_to_clean = 0;
4045 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004046
4047 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004048
4049alloc_failed:
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004050 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07004051}
4052
4053/**
Alexander Duyck69888672008-09-11 20:05:39 -07004054 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
4055 * @adapter: board private structure
4056 *
4057 * If this function returns with an error, then it's possible one or
4058 * more of the rings is populated (while the rest are not). It is the
4059 * callers duty to clean those orphaned rings.
4060 *
4061 * Return 0 on success, negative on failure
4062 **/
4063
4064static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4065{
4066 int i, err = 0;
4067
4068 for (i = 0; i < adapter->num_rx_queues; i++) {
4069 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
4070 if (!err)
4071 continue;
4072 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
4073 break;
4074 }
4075
4076 return err;
4077}
4078
4079/**
Auke Kok9a799d72007-09-15 14:07:45 -07004080 * ixgbe_free_tx_resources - Free Tx Resources per Queue
4081 * @adapter: board private structure
4082 * @tx_ring: Tx descriptor ring for a specific queue
4083 *
4084 * Free all transmit software resources
4085 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07004086void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4087 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004088{
4089 struct pci_dev *pdev = adapter->pdev;
4090
4091 ixgbe_clean_tx_ring(adapter, tx_ring);
4092
4093 vfree(tx_ring->tx_buffer_info);
4094 tx_ring->tx_buffer_info = NULL;
4095
4096 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
4097
4098 tx_ring->desc = NULL;
4099}
4100
4101/**
4102 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4103 * @adapter: board private structure
4104 *
4105 * Free all transmit software resources
4106 **/
4107static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4108{
4109 int i;
4110
4111 for (i = 0; i < adapter->num_tx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00004112 if (adapter->tx_ring[i].desc)
4113 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004114}
4115
4116/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004117 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07004118 * @adapter: board private structure
4119 * @rx_ring: ring to clean the resources from
4120 *
4121 * Free all receive software resources
4122 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07004123void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4124 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004125{
4126 struct pci_dev *pdev = adapter->pdev;
4127
4128 ixgbe_clean_rx_ring(adapter, rx_ring);
4129
4130 vfree(rx_ring->rx_buffer_info);
4131 rx_ring->rx_buffer_info = NULL;
4132
4133 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4134
4135 rx_ring->desc = NULL;
4136}
4137
4138/**
4139 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4140 * @adapter: board private structure
4141 *
4142 * Free all receive software resources
4143 **/
4144static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4145{
4146 int i;
4147
4148 for (i = 0; i < adapter->num_rx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00004149 if (adapter->rx_ring[i].desc)
4150 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004151}
4152
4153/**
Auke Kok9a799d72007-09-15 14:07:45 -07004154 * ixgbe_change_mtu - Change the Maximum Transfer Unit
4155 * @netdev: network interface device structure
4156 * @new_mtu: new value for maximum frame size
4157 *
4158 * Returns 0 on success, negative on failure
4159 **/
4160static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4161{
4162 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4163 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4164
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07004165 /* MTU < 68 is an error and causes problems on some kernels */
4166 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07004167 return -EINVAL;
4168
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004169 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004170 netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004171 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07004172 netdev->mtu = new_mtu;
4173
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004174 if (netif_running(netdev))
4175 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004176
4177 return 0;
4178}
4179
4180/**
4181 * ixgbe_open - Called when a network interface is made active
4182 * @netdev: network interface device structure
4183 *
4184 * Returns 0 on success, negative value on failure
4185 *
4186 * The open entry point is called when a network interface is made
4187 * active by the system (IFF_UP). At this point all resources needed
4188 * for transmit and receive operations are allocated, the interrupt
4189 * handler is registered with the OS, the watchdog timer is started,
4190 * and the stack is notified that the interface is ready.
4191 **/
4192static int ixgbe_open(struct net_device *netdev)
4193{
4194 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4195 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07004196
Auke Kok4bebfaa2008-02-11 09:26:01 -08004197 /* disallow open during test */
4198 if (test_bit(__IXGBE_TESTING, &adapter->state))
4199 return -EBUSY;
4200
Jesse Brandeburg54386462009-04-17 20:44:27 +00004201 netif_carrier_off(netdev);
4202
Auke Kok9a799d72007-09-15 14:07:45 -07004203 /* allocate transmit descriptors */
4204 err = ixgbe_setup_all_tx_resources(adapter);
4205 if (err)
4206 goto err_setup_tx;
4207
Auke Kok9a799d72007-09-15 14:07:45 -07004208 /* allocate receive descriptors */
4209 err = ixgbe_setup_all_rx_resources(adapter);
4210 if (err)
4211 goto err_setup_rx;
4212
4213 ixgbe_configure(adapter);
4214
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004215 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004216 if (err)
4217 goto err_req_irq;
4218
Auke Kok9a799d72007-09-15 14:07:45 -07004219 err = ixgbe_up_complete(adapter);
4220 if (err)
4221 goto err_up;
4222
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07004223 netif_tx_start_all_queues(netdev);
4224
Auke Kok9a799d72007-09-15 14:07:45 -07004225 return 0;
4226
4227err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004228 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004229 ixgbe_free_irq(adapter);
4230err_req_irq:
Auke Kok9a799d72007-09-15 14:07:45 -07004231err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004232 ixgbe_free_all_rx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004233err_setup_tx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004234 ixgbe_free_all_tx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004235 ixgbe_reset(adapter);
4236
4237 return err;
4238}
4239
4240/**
4241 * ixgbe_close - Disables a network interface
4242 * @netdev: network interface device structure
4243 *
4244 * Returns 0, this is not allowed to fail
4245 *
4246 * The close entry point is called when an interface is de-activated
4247 * by the OS. The hardware is still under the drivers control, but
4248 * needs to be disabled. A global MAC reset is issued to stop the
4249 * hardware, and all transmit and receive resources are freed.
4250 **/
4251static int ixgbe_close(struct net_device *netdev)
4252{
4253 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004254
4255 ixgbe_down(adapter);
4256 ixgbe_free_irq(adapter);
4257
4258 ixgbe_free_all_tx_resources(adapter);
4259 ixgbe_free_all_rx_resources(adapter);
4260
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004261 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004262
4263 return 0;
4264}
4265
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004266#ifdef CONFIG_PM
4267static int ixgbe_resume(struct pci_dev *pdev)
4268{
4269 struct net_device *netdev = pci_get_drvdata(pdev);
4270 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4271 u32 err;
4272
4273 pci_set_power_state(pdev, PCI_D0);
4274 pci_restore_state(pdev);
gouji-new9ce77662009-05-06 10:44:45 +00004275
4276 err = pci_enable_device_mem(pdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004277 if (err) {
Alexander Duyck69888672008-09-11 20:05:39 -07004278 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004279 "suspend\n");
4280 return err;
4281 }
4282 pci_set_master(pdev);
4283
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07004284 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004285
4286 err = ixgbe_init_interrupt_scheme(adapter);
4287 if (err) {
4288 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
4289 "device\n");
4290 return err;
4291 }
4292
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004293 ixgbe_reset(adapter);
4294
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00004295 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4296
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004297 if (netif_running(netdev)) {
4298 err = ixgbe_open(adapter->netdev);
4299 if (err)
4300 return err;
4301 }
4302
4303 netif_device_attach(netdev);
4304
4305 return 0;
4306}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004307#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004308
4309static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004310{
4311 struct net_device *netdev = pci_get_drvdata(pdev);
4312 struct ixgbe_adapter *adapter = netdev_priv(netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004313 struct ixgbe_hw *hw = &adapter->hw;
4314 u32 ctrl, fctrl;
4315 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004316#ifdef CONFIG_PM
4317 int retval = 0;
4318#endif
4319
4320 netif_device_detach(netdev);
4321
4322 if (netif_running(netdev)) {
4323 ixgbe_down(adapter);
4324 ixgbe_free_irq(adapter);
4325 ixgbe_free_all_tx_resources(adapter);
4326 ixgbe_free_all_rx_resources(adapter);
4327 }
Alexander Duyck7a921c92009-05-06 10:43:28 +00004328 ixgbe_clear_interrupt_scheme(adapter);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004329
4330#ifdef CONFIG_PM
4331 retval = pci_save_state(pdev);
4332 if (retval)
4333 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004334
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004335#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004336 if (wufc) {
4337 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004338
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004339 /* turn on all-multi mode if wake on multicast is enabled */
4340 if (wufc & IXGBE_WUFC_MC) {
4341 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4342 fctrl |= IXGBE_FCTRL_MPE;
4343 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4344 }
4345
4346 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4347 ctrl |= IXGBE_CTRL_GIO_DIS;
4348 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4349
4350 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
4351 } else {
4352 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
4353 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4354 }
4355
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07004356 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
4357 pci_wake_from_d3(pdev, true);
4358 else
4359 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004360
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004361 *enable_wake = !!wufc;
4362
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004363 ixgbe_release_hw_control(adapter);
4364
4365 pci_disable_device(pdev);
4366
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004367 return 0;
4368}
4369
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004370#ifdef CONFIG_PM
4371static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
4372{
4373 int retval;
4374 bool wake;
4375
4376 retval = __ixgbe_shutdown(pdev, &wake);
4377 if (retval)
4378 return retval;
4379
4380 if (wake) {
4381 pci_prepare_to_sleep(pdev);
4382 } else {
4383 pci_wake_from_d3(pdev, false);
4384 pci_set_power_state(pdev, PCI_D3hot);
4385 }
4386
4387 return 0;
4388}
4389#endif /* CONFIG_PM */
4390
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004391static void ixgbe_shutdown(struct pci_dev *pdev)
4392{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004393 bool wake;
4394
4395 __ixgbe_shutdown(pdev, &wake);
4396
4397 if (system_state == SYSTEM_POWER_OFF) {
4398 pci_wake_from_d3(pdev, wake);
4399 pci_set_power_state(pdev, PCI_D3hot);
4400 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004401}
4402
4403/**
Auke Kok9a799d72007-09-15 14:07:45 -07004404 * ixgbe_update_stats - Update the board statistics counters.
4405 * @adapter: board private structure
4406 **/
4407void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4408{
4409 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004410 u64 total_mpc = 0;
4411 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004412
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004413 if (hw->mac.type == ixgbe_mac_82599EB) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00004414 u64 rsc_count = 0;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004415 for (i = 0; i < 16; i++)
4416 adapter->hw_rx_no_dma_resources +=
4417 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Alexander Duyckf8212f92009-04-27 22:42:37 +00004418 for (i = 0; i < adapter->num_rx_queues; i++)
4419 rsc_count += adapter->rx_ring[i].rsc_count;
4420 adapter->rsc_count = rsc_count;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004421 }
4422
Auke Kok9a799d72007-09-15 14:07:45 -07004423 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004424 for (i = 0; i < 8; i++) {
4425 /* for packet buffers not used, the register should read 0 */
4426 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4427 missed_rx += mpc;
4428 adapter->stats.mpc[i] += mpc;
4429 total_mpc += adapter->stats.mpc[i];
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004430 if (hw->mac.type == ixgbe_mac_82598EB)
4431 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004432 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4433 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4434 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4435 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004436 if (hw->mac.type == ixgbe_mac_82599EB) {
4437 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4438 IXGBE_PXONRXCNT(i));
4439 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4440 IXGBE_PXOFFRXCNT(i));
4441 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004442 } else {
4443 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4444 IXGBE_PXONRXC(i));
4445 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4446 IXGBE_PXOFFRXC(i));
4447 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08004448 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
4449 IXGBE_PXONTXC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004450 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004451 IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004452 }
4453 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4454 /* work around hardware counting issue */
4455 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07004456
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004457 /* 82598 hardware only has a 32 bit counter in the high register */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004458 if (hw->mac.type == ixgbe_mac_82599EB) {
Ben Greearaad71912009-09-30 12:08:16 +00004459 u64 tmp;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004460 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
Ben Greearaad71912009-09-30 12:08:16 +00004461 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4462 adapter->stats.gorc += (tmp << 32);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004463 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
Ben Greearaad71912009-09-30 12:08:16 +00004464 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4465 adapter->stats.gotc += (tmp << 32);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004466 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4467 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4468 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4469 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004470 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4471 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
Yi Zou6d455222009-05-13 13:12:16 +00004472#ifdef IXGBE_FCOE
4473 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4474 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4475 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4476 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4477 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4478 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4479#endif /* IXGBE_FCOE */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004480 } else {
4481 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4482 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4483 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4484 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4485 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4486 }
Auke Kok9a799d72007-09-15 14:07:45 -07004487 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4488 adapter->stats.bprc += bprc;
4489 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004490 if (hw->mac.type == ixgbe_mac_82598EB)
4491 adapter->stats.mprc -= bprc;
Auke Kok9a799d72007-09-15 14:07:45 -07004492 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4493 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4494 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4495 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4496 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4497 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4498 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004499 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004500 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4501 adapter->stats.lxontxc += lxon;
4502 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4503 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07004504 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4505 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004506 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4507 /*
4508 * 82598 errata - tx of flow control packets is included in tx counters
4509 */
4510 xon_off_tot = lxon + lxoff;
4511 adapter->stats.gptc -= xon_off_tot;
4512 adapter->stats.mptc -= xon_off_tot;
4513 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07004514 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4515 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4516 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07004517 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4518 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004519 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004520 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4521 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4522 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4523 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4524 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004525 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4526
4527 /* Fill out the OS statistics structure */
Auke Kok9a799d72007-09-15 14:07:45 -07004528 adapter->net_stats.multicast = adapter->stats.mprc;
4529
4530 /* Rx Errors */
4531 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004532 adapter->stats.rlec;
Auke Kok9a799d72007-09-15 14:07:45 -07004533 adapter->net_stats.rx_dropped = 0;
4534 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
4535 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004536 adapter->net_stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07004537}
4538
4539/**
4540 * ixgbe_watchdog - Timer Call-back
4541 * @data: pointer to adapter cast into an unsigned long
4542 **/
4543static void ixgbe_watchdog(unsigned long data)
4544{
4545 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004546 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00004547 u64 eics = 0;
4548 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07004549
Alexander Duyckfe49f042009-06-04 16:00:09 +00004550 /*
4551 * Do the watchdog outside of interrupt context due to the lovely
4552 * delays that some of the newer hardware requires
4553 */
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004554
Alexander Duyckfe49f042009-06-04 16:00:09 +00004555 if (test_bit(__IXGBE_DOWN, &adapter->state))
4556 goto watchdog_short_circuit;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004557
Alexander Duyckfe49f042009-06-04 16:00:09 +00004558 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4559 /*
4560 * for legacy and MSI interrupts don't set any bits
4561 * that are enabled for EIAM, because this operation
4562 * would set *both* EIMS and EICS for any bit in EIAM
4563 */
4564 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4565 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4566 goto watchdog_reschedule;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004567 }
4568
Alexander Duyckfe49f042009-06-04 16:00:09 +00004569 /* get one bit for every active tx/rx interrupt vector */
4570 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4571 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4572 if (qv->rxr_count || qv->txr_count)
4573 eics |= ((u64)1 << i);
4574 }
4575
4576 /* Cause software interrupt to ensure rx rings are cleaned */
4577 ixgbe_irq_rearm_queues(adapter, eics);
4578
4579watchdog_reschedule:
4580 /* Reset the timer */
4581 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4582
4583watchdog_short_circuit:
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004584 schedule_work(&adapter->watchdog_task);
4585}
4586
4587/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004588 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
4589 * @work: pointer to work_struct containing our data
4590 **/
4591static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4592{
4593 struct ixgbe_adapter *adapter = container_of(work,
4594 struct ixgbe_adapter,
4595 multispeed_fiber_task);
4596 struct ixgbe_hw *hw = &adapter->hw;
4597 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004598 bool negotiation;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004599
4600 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
Mallikarjuna R Chilakalaa1f25322009-06-30 11:44:36 +00004601 autoneg = hw->phy.autoneg_advertised;
4602 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004603 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
4604 if (hw->mac.ops.setup_link)
4605 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004606 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4607 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
4608}
4609
4610/**
4611 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
4612 * @work: pointer to work_struct containing our data
4613 **/
4614static void ixgbe_sfp_config_module_task(struct work_struct *work)
4615{
4616 struct ixgbe_adapter *adapter = container_of(work,
4617 struct ixgbe_adapter,
4618 sfp_config_module_task);
4619 struct ixgbe_hw *hw = &adapter->hw;
4620 u32 err;
4621
4622 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004623
4624 /* Time for electrical oscillations to settle down */
4625 msleep(100);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004626 err = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004627
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004628 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00004629 dev_err(&adapter->pdev->dev, "failed to initialize because "
4630 "an unsupported SFP+ module type was detected.\n"
4631 "Reload the driver after installing a supported "
4632 "module.\n");
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004633 unregister_netdev(adapter->netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004634 return;
4635 }
4636 hw->mac.ops.setup_sfp(hw);
4637
Tony Breeds8d1c3c02009-04-09 22:29:10 +00004638 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004639 /* This will also work for DA Twinax connections */
4640 schedule_work(&adapter->multispeed_fiber_task);
4641 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
4642}
4643
4644/**
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004645 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
4646 * @work: pointer to work_struct containing our data
4647 **/
4648static void ixgbe_fdir_reinit_task(struct work_struct *work)
4649{
4650 struct ixgbe_adapter *adapter = container_of(work,
4651 struct ixgbe_adapter,
4652 fdir_reinit_task);
4653 struct ixgbe_hw *hw = &adapter->hw;
4654 int i;
4655
4656 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4657 for (i = 0; i < adapter->num_tx_queues; i++)
4658 set_bit(__IXGBE_FDIR_INIT_DONE,
4659 &(adapter->tx_ring[i].reinit_state));
4660 } else {
4661 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4662 "ignored adding FDIR ATR filters \n");
4663 }
4664 /* Done FDIR Re-initialization, enable transmits */
4665 netif_tx_start_all_queues(adapter->netdev);
4666}
4667
4668/**
Alexander Duyck69888672008-09-11 20:05:39 -07004669 * ixgbe_watchdog_task - worker thread to bring link up
4670 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004671 **/
4672static void ixgbe_watchdog_task(struct work_struct *work)
4673{
4674 struct ixgbe_adapter *adapter = container_of(work,
4675 struct ixgbe_adapter,
4676 watchdog_task);
4677 struct net_device *netdev = adapter->netdev;
4678 struct ixgbe_hw *hw = &adapter->hw;
4679 u32 link_speed = adapter->link_speed;
4680 bool link_up = adapter->link_up;
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004681 int i;
4682 struct ixgbe_ring *tx_ring;
4683 int some_tx_pending = 0;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004684
4685 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
4686
4687 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4688 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004689 if (link_up) {
4690#ifdef CONFIG_DCB
4691 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4692 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004693 hw->mac.ops.fc_enable(hw, i);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004694 } else {
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004695 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004696 }
4697#else
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004698 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004699#endif
4700 }
4701
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004702 if (link_up ||
4703 time_after(jiffies, (adapter->link_check_timeout +
4704 IXGBE_TRY_LINK_TIMEOUT))) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004705 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004706 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004707 }
4708 adapter->link_up = link_up;
4709 adapter->link_speed = link_speed;
4710 }
Auke Kok9a799d72007-09-15 14:07:45 -07004711
4712 if (link_up) {
4713 if (!netif_carrier_ok(netdev)) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004714 bool flow_rx, flow_tx;
4715
4716 if (hw->mac.type == ixgbe_mac_82599EB) {
4717 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4718 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00004719 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
4720 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004721 } else {
4722 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4723 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00004724 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
4725 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004726 }
4727
Jeff Kirshera46e5342008-11-27 00:22:21 -08004728 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
4729 "Flow Control: %s\n",
4730 netdev->name,
4731 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
4732 "10 Gbps" :
4733 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
4734 "1 Gbps" : "unknown speed")),
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004735 ((flow_rx && flow_tx) ? "RX/TX" :
4736 (flow_rx ? "RX" :
4737 (flow_tx ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07004738
4739 netif_carrier_on(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004740 } else {
4741 /* Force detection of hung controller */
4742 adapter->detect_tx_hung = true;
4743 }
4744 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004745 adapter->link_up = false;
4746 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004747 if (netif_carrier_ok(netdev)) {
Jeff Kirshera46e5342008-11-27 00:22:21 -08004748 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
4749 netdev->name);
Auke Kok9a799d72007-09-15 14:07:45 -07004750 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004751 }
4752 }
4753
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004754 if (!netif_carrier_ok(netdev)) {
4755 for (i = 0; i < adapter->num_tx_queues; i++) {
4756 tx_ring = &adapter->tx_ring[i];
4757 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4758 some_tx_pending = 1;
4759 break;
4760 }
4761 }
4762
4763 if (some_tx_pending) {
4764 /* We've lost link, so the controller stops DMA,
4765 * but we've got queued Tx work that's never going
4766 * to get done, so reset controller to flush Tx.
4767 * (Do the reset outside of interrupt context).
4768 */
4769 schedule_work(&adapter->reset_task);
4770 }
4771 }
4772
Auke Kok9a799d72007-09-15 14:07:45 -07004773 ixgbe_update_stats(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004774 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
Auke Kok9a799d72007-09-15 14:07:45 -07004775}
4776
Auke Kok9a799d72007-09-15 14:07:45 -07004777static int ixgbe_tso(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004778 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
4779 u32 tx_flags, u8 *hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07004780{
4781 struct ixgbe_adv_tx_context_desc *context_desc;
4782 unsigned int i;
4783 int err;
4784 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004785 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
4786 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07004787
4788 if (skb_is_gso(skb)) {
4789 if (skb_header_cloned(skb)) {
4790 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4791 if (err)
4792 return err;
4793 }
4794 l4len = tcp_hdrlen(skb);
4795 *hdr_len += l4len;
4796
Al Viro8327d002007-12-10 18:54:12 +00004797 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07004798 struct iphdr *iph = ip_hdr(skb);
4799 iph->tot_len = 0;
4800 iph->check = 0;
4801 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004802 iph->daddr, 0,
4803 IPPROTO_TCP,
4804 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004805 adapter->hw_tso_ctxt++;
4806 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
4807 ipv6_hdr(skb)->payload_len = 0;
4808 tcp_hdr(skb)->check =
4809 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004810 &ipv6_hdr(skb)->daddr,
4811 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004812 adapter->hw_tso6_ctxt++;
4813 }
4814
4815 i = tx_ring->next_to_use;
4816
4817 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4818 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4819
4820 /* VLAN MACLEN IPLEN */
4821 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4822 vlan_macip_lens |=
4823 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4824 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004825 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004826 *hdr_len += skb_network_offset(skb);
4827 vlan_macip_lens |=
4828 (skb_transport_header(skb) - skb_network_header(skb));
4829 *hdr_len +=
4830 (skb_transport_header(skb) - skb_network_header(skb));
4831 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4832 context_desc->seqnum_seed = 0;
4833
4834 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004835 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004836 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004837
Al Viro8327d002007-12-10 18:54:12 +00004838 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07004839 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4840 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
4841 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4842
4843 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004844 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07004845 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
4846 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004847 /* use index 1 for TSO */
4848 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004849 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4850
4851 tx_buffer_info->time_stamp = jiffies;
4852 tx_buffer_info->next_to_watch = i;
4853
4854 i++;
4855 if (i == tx_ring->count)
4856 i = 0;
4857 tx_ring->next_to_use = i;
4858
4859 return true;
4860 }
4861 return false;
4862}
4863
4864static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004865 struct ixgbe_ring *tx_ring,
4866 struct sk_buff *skb, u32 tx_flags)
Auke Kok9a799d72007-09-15 14:07:45 -07004867{
4868 struct ixgbe_adv_tx_context_desc *context_desc;
4869 unsigned int i;
4870 struct ixgbe_tx_buffer *tx_buffer_info;
4871 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4872
4873 if (skb->ip_summed == CHECKSUM_PARTIAL ||
4874 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
4875 i = tx_ring->next_to_use;
4876 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4877 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4878
4879 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4880 vlan_macip_lens |=
4881 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4882 vlan_macip_lens |= (skb_network_offset(skb) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004883 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004884 if (skb->ip_summed == CHECKSUM_PARTIAL)
4885 vlan_macip_lens |= (skb_transport_header(skb) -
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004886 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07004887
4888 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4889 context_desc->seqnum_seed = 0;
4890
4891 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004892 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004893
4894 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08004895 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004896 case cpu_to_be16(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07004897 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08004898 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4899 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004900 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004901 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4902 type_tucmd_mlhl |=
4903 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004904 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004905 case cpu_to_be16(ETH_P_IPV6):
Auke Kok41825d72008-02-12 15:20:33 -08004906 /* XXX what about other V6 headers?? */
4907 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4908 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004909 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004910 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4911 type_tucmd_mlhl |=
4912 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004913 break;
Auke Kok41825d72008-02-12 15:20:33 -08004914 default:
4915 if (unlikely(net_ratelimit())) {
4916 DPRINTK(PROBE, WARNING,
4917 "partial checksum but proto=%x!\n",
4918 skb->protocol);
4919 }
4920 break;
4921 }
Auke Kok9a799d72007-09-15 14:07:45 -07004922 }
4923
4924 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004925 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07004926 context_desc->mss_l4len_idx = 0;
4927
4928 tx_buffer_info->time_stamp = jiffies;
4929 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004930
Auke Kok9a799d72007-09-15 14:07:45 -07004931 adapter->hw_csum_tx_good++;
4932 i++;
4933 if (i == tx_ring->count)
4934 i = 0;
4935 tx_ring->next_to_use = i;
4936
4937 return true;
4938 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004939
Auke Kok9a799d72007-09-15 14:07:45 -07004940 return false;
4941}
4942
4943static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004944 struct ixgbe_ring *tx_ring,
Yi Zoueacd73f2009-05-13 13:11:06 +00004945 struct sk_buff *skb, u32 tx_flags,
4946 unsigned int first)
Auke Kok9a799d72007-09-15 14:07:45 -07004947{
4948 struct ixgbe_tx_buffer *tx_buffer_info;
Yi Zoueacd73f2009-05-13 13:11:06 +00004949 unsigned int len;
4950 unsigned int total = skb->len;
Auke Kok9a799d72007-09-15 14:07:45 -07004951 unsigned int offset = 0, size, count = 0, i;
4952 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4953 unsigned int f;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004954 dma_addr_t *map;
Auke Kok9a799d72007-09-15 14:07:45 -07004955
4956 i = tx_ring->next_to_use;
4957
Alexander Duyck44df32c2009-03-31 21:34:23 +00004958 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4959 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4960 return 0;
4961 }
4962
4963 map = skb_shinfo(skb)->dma_maps;
4964
Yi Zoueacd73f2009-05-13 13:11:06 +00004965 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
4966 /* excluding fcoe_crc_eof for FCoE */
4967 total -= sizeof(struct fcoe_crc_eof);
4968
4969 len = min(skb_headlen(skb), total);
Auke Kok9a799d72007-09-15 14:07:45 -07004970 while (len) {
4971 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4972 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4973
4974 tx_buffer_info->length = size;
Eric Dumazet042a53a2009-06-05 04:04:16 +00004975 tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07004976 tx_buffer_info->time_stamp = jiffies;
4977 tx_buffer_info->next_to_watch = i;
4978
4979 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00004980 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07004981 offset += size;
4982 count++;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004983
4984 if (len) {
4985 i++;
4986 if (i == tx_ring->count)
4987 i = 0;
4988 }
Auke Kok9a799d72007-09-15 14:07:45 -07004989 }
4990
4991 for (f = 0; f < nr_frags; f++) {
4992 struct skb_frag_struct *frag;
4993
4994 frag = &skb_shinfo(skb)->frags[f];
Yi Zoueacd73f2009-05-13 13:11:06 +00004995 len = min((unsigned int)frag->size, total);
Alexander Duyck44df32c2009-03-31 21:34:23 +00004996 offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004997
4998 while (len) {
Alexander Duyck44df32c2009-03-31 21:34:23 +00004999 i++;
5000 if (i == tx_ring->count)
5001 i = 0;
5002
Auke Kok9a799d72007-09-15 14:07:45 -07005003 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5004 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5005
5006 tx_buffer_info->length = size;
Eric Dumazet042a53a2009-06-05 04:04:16 +00005007 tx_buffer_info->dma = map[f] + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07005008 tx_buffer_info->time_stamp = jiffies;
5009 tx_buffer_info->next_to_watch = i;
5010
5011 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00005012 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07005013 offset += size;
5014 count++;
Auke Kok9a799d72007-09-15 14:07:45 -07005015 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005016 if (total == 0)
5017 break;
Auke Kok9a799d72007-09-15 14:07:45 -07005018 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00005019
Auke Kok9a799d72007-09-15 14:07:45 -07005020 tx_ring->tx_buffer_info[i].skb = skb;
5021 tx_ring->tx_buffer_info[first].next_to_watch = i;
5022
5023 return count;
5024}
5025
5026static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005027 struct ixgbe_ring *tx_ring,
5028 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07005029{
5030 union ixgbe_adv_tx_desc *tx_desc = NULL;
5031 struct ixgbe_tx_buffer *tx_buffer_info;
5032 u32 olinfo_status = 0, cmd_type_len = 0;
5033 unsigned int i;
5034 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
5035
5036 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
5037
5038 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
5039
5040 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5041 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
5042
5043 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
5044 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5045
5046 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005047 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005048
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07005049 /* use index 1 context for tso */
5050 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07005051 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
5052 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005053 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005054
5055 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
5056 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005057 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005058
Yi Zoueacd73f2009-05-13 13:11:06 +00005059 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5060 olinfo_status |= IXGBE_ADVTXD_CC;
5061 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5062 if (tx_flags & IXGBE_TX_FLAGS_FSO)
5063 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5064 }
5065
Auke Kok9a799d72007-09-15 14:07:45 -07005066 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
5067
5068 i = tx_ring->next_to_use;
5069 while (count--) {
5070 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5071 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
5072 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
5073 tx_desc->read.cmd_type_len =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005074 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07005075 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07005076 i++;
5077 if (i == tx_ring->count)
5078 i = 0;
5079 }
5080
5081 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
5082
5083 /*
5084 * Force memory writes to complete before letting h/w
5085 * know there are new descriptors to fetch. (Only
5086 * applicable for weak-ordered memory model archs,
5087 * such as IA-64).
5088 */
5089 wmb();
5090
5091 tx_ring->next_to_use = i;
5092 writel(i, adapter->hw.hw_addr + tx_ring->tail);
5093}
5094
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005095static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5096 int queue, u32 tx_flags)
5097{
5098 /* Right now, we support IPv4 only */
5099 struct ixgbe_atr_input atr_input;
5100 struct tcphdr *th;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005101 struct iphdr *iph = ip_hdr(skb);
5102 struct ethhdr *eth = (struct ethhdr *)skb->data;
5103 u16 vlan_id, src_port, dst_port, flex_bytes;
5104 u32 src_ipv4_addr, dst_ipv4_addr;
5105 u8 l4type = 0;
5106
5107 /* check if we're UDP or TCP */
5108 if (iph->protocol == IPPROTO_TCP) {
5109 th = tcp_hdr(skb);
5110 src_port = th->source;
5111 dst_port = th->dest;
5112 l4type |= IXGBE_ATR_L4TYPE_TCP;
5113 /* l4type IPv4 type is 0, no need to assign */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005114 } else {
5115 /* Unsupported L4 header, just bail here */
5116 return;
5117 }
5118
5119 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5120
5121 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5122 IXGBE_TX_FLAGS_VLAN_SHIFT;
5123 src_ipv4_addr = iph->saddr;
5124 dst_ipv4_addr = iph->daddr;
5125 flex_bytes = eth->h_proto;
5126
5127 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5128 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5129 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5130 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5131 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5132 /* src and dst are inverted, think how the receiver sees them */
5133 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5134 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5135
5136 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5137 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5138}
5139
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005140static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005141 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005142{
5143 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5144
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005145 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005146 /* Herbert's original patch had:
5147 * smp_mb__after_netif_stop_queue();
5148 * but since that doesn't exist yet, just open code it. */
5149 smp_mb();
5150
5151 /* We need to check again in a case another CPU has just
5152 * made room available. */
5153 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
5154 return -EBUSY;
5155
5156 /* A reprieve! - use start_queue because it doesn't call schedule */
Jesse Brandeburgaf721662008-09-11 19:54:23 -07005157 netif_start_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005158 ++adapter->restart_queue;
5159 return 0;
5160}
5161
5162static int ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005163 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005164{
5165 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
5166 return 0;
5167 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
5168}
5169
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005170static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5171{
5172 struct ixgbe_adapter *adapter = netdev_priv(dev);
5173
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005174 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
5175 return smp_processor_id();
5176
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005177 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
Lucy Liu36e89d72009-08-05 13:06:34 -07005178 return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005179
5180 return skb_tx_hash(dev, skb);
5181}
5182
Stephen Hemminger3b29a562009-08-31 19:50:55 +00005183static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5184 struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07005185{
5186 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5187 struct ixgbe_ring *tx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07005188 unsigned int first;
5189 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005190 u8 hdr_len = 0;
5191 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07005192 int count = 0;
5193 unsigned int f;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005194
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005195 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5196 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08005197 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5198 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5199 tx_flags |= (skb->queue_mapping << 13);
5200 }
5201 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5202 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5203 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Lucy Liu60127862009-07-22 14:07:33 +00005204 if (skb->priority != TC_PRIO_CONTROL) {
5205 tx_flags |= (skb->queue_mapping << 13);
5206 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5207 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5208 } else {
5209 skb->queue_mapping =
5210 adapter->ring_feature[RING_F_DCB].indices-1;
5211 }
Auke Kok9a799d72007-09-15 14:07:45 -07005212 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005213
Lucy Liu60127862009-07-22 14:07:33 +00005214 r_idx = skb->queue_mapping;
5215 tx_ring = &adapter->tx_ring[r_idx];
5216
Yi Zoueacd73f2009-05-13 13:11:06 +00005217 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
Yi Zou09ad1cc2009-09-03 14:56:10 +00005218 (skb->protocol == htons(ETH_P_FCOE))) {
Yi Zoueacd73f2009-05-13 13:11:06 +00005219 tx_flags |= IXGBE_TX_FLAGS_FCOE;
Yi Zou09ad1cc2009-09-03 14:56:10 +00005220#ifdef IXGBE_FCOE
5221 r_idx = smp_processor_id();
5222 r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5223 r_idx += adapter->ring_feature[RING_F_FCOE].mask;
5224 tx_ring = &adapter->tx_ring[r_idx];
5225#endif
5226 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005227 /* four things can cause us to need a context descriptor */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005228 if (skb_is_gso(skb) ||
5229 (skb->ip_summed == CHECKSUM_PARTIAL) ||
Yi Zoueacd73f2009-05-13 13:11:06 +00005230 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
5231 (tx_flags & IXGBE_TX_FLAGS_FCOE))
Auke Kok9a799d72007-09-15 14:07:45 -07005232 count++;
5233
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005234 count += TXD_USE_COUNT(skb_headlen(skb));
5235 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07005236 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5237
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005238 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005239 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07005240 return NETDEV_TX_BUSY;
5241 }
Auke Kok9a799d72007-09-15 14:07:45 -07005242
Auke Kok9a799d72007-09-15 14:07:45 -07005243 first = tx_ring->next_to_use;
Yi Zoueacd73f2009-05-13 13:11:06 +00005244 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5245#ifdef IXGBE_FCOE
5246 /* setup tx offload for FCoE */
5247 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5248 if (tso < 0) {
5249 dev_kfree_skb_any(skb);
5250 return NETDEV_TX_OK;
5251 }
5252 if (tso)
5253 tx_flags |= IXGBE_TX_FLAGS_FSO;
5254#endif /* IXGBE_FCOE */
5255 } else {
5256 if (skb->protocol == htons(ETH_P_IP))
5257 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5258 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5259 if (tso < 0) {
5260 dev_kfree_skb_any(skb);
5261 return NETDEV_TX_OK;
5262 }
5263
5264 if (tso)
5265 tx_flags |= IXGBE_TX_FLAGS_TSO;
5266 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
5267 (skb->ip_summed == CHECKSUM_PARTIAL))
5268 tx_flags |= IXGBE_TX_FLAGS_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07005269 }
5270
Yi Zoueacd73f2009-05-13 13:11:06 +00005271 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005272 if (count) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005273 /* add the ATR filter if ATR is on */
5274 if (tx_ring->atr_sample_rate) {
5275 ++tx_ring->atr_count;
5276 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5277 test_bit(__IXGBE_FDIR_INIT_DONE,
5278 &tx_ring->reinit_state)) {
5279 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5280 tx_flags);
5281 tx_ring->atr_count = 0;
5282 }
5283 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00005284 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
5285 hdr_len);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005286 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07005287
Alexander Duyck44df32c2009-03-31 21:34:23 +00005288 } else {
5289 dev_kfree_skb_any(skb);
5290 tx_ring->tx_buffer_info[first].time_stamp = 0;
5291 tx_ring->next_to_use = first;
5292 }
Auke Kok9a799d72007-09-15 14:07:45 -07005293
5294 return NETDEV_TX_OK;
5295}
5296
5297/**
5298 * ixgbe_get_stats - Get System Network Statistics
5299 * @netdev: network interface device structure
5300 *
5301 * Returns the address of the device statistics structure.
5302 * The statistics are actually updated from the timer callback.
5303 **/
5304static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5305{
5306 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5307
5308 /* only return the current stats */
5309 return &adapter->net_stats;
5310}
5311
5312/**
5313 * ixgbe_set_mac - Change the Ethernet Address of the NIC
5314 * @netdev: network interface device structure
5315 * @p: pointer to an address structure
5316 *
5317 * Returns 0 on success, negative on failure
5318 **/
5319static int ixgbe_set_mac(struct net_device *netdev, void *p)
5320{
5321 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005322 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07005323 struct sockaddr *addr = p;
5324
5325 if (!is_valid_ether_addr(addr->sa_data))
5326 return -EADDRNOTAVAIL;
5327
5328 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005329 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07005330
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005331 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07005332
5333 return 0;
5334}
5335
Ben Hutchings6b73e102009-04-29 08:08:58 +00005336static int
5337ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
5338{
5339 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5340 struct ixgbe_hw *hw = &adapter->hw;
5341 u16 value;
5342 int rc;
5343
5344 if (prtad != hw->phy.mdio.prtad)
5345 return -EINVAL;
5346 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
5347 if (!rc)
5348 rc = value;
5349 return rc;
5350}
5351
5352static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
5353 u16 addr, u16 value)
5354{
5355 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5356 struct ixgbe_hw *hw = &adapter->hw;
5357
5358 if (prtad != hw->phy.mdio.prtad)
5359 return -EINVAL;
5360 return hw->phy.ops.write_reg(hw, addr, devad, value);
5361}
5362
5363static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5364{
5365 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5366
5367 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
5368}
5369
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005370/**
5371 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00005372 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005373 * @netdev: network interface device structure
5374 *
5375 * Returns non-zero on failure
5376 **/
5377static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5378{
5379 int err = 0;
5380 struct ixgbe_adapter *adapter = netdev_priv(dev);
5381 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5382
5383 if (is_valid_ether_addr(mac->san_addr)) {
5384 rtnl_lock();
5385 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5386 rtnl_unlock();
5387 }
5388 return err;
5389}
5390
5391/**
5392 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00005393 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005394 * @netdev: network interface device structure
5395 *
5396 * Returns non-zero on failure
5397 **/
5398static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5399{
5400 int err = 0;
5401 struct ixgbe_adapter *adapter = netdev_priv(dev);
5402 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5403
5404 if (is_valid_ether_addr(mac->san_addr)) {
5405 rtnl_lock();
5406 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5407 rtnl_unlock();
5408 }
5409 return err;
5410}
5411
Auke Kok9a799d72007-09-15 14:07:45 -07005412#ifdef CONFIG_NET_POLL_CONTROLLER
5413/*
5414 * Polling 'interrupt' - used by things like netconsole to send skbs
5415 * without having to re-enable interrupts. It's not called while
5416 * the interrupt routine is executing.
5417 */
5418static void ixgbe_netpoll(struct net_device *netdev)
5419{
5420 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00005421 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07005422
Auke Kok9a799d72007-09-15 14:07:45 -07005423 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00005424 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5425 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5426 for (i = 0; i < num_q_vectors; i++) {
5427 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5428 ixgbe_msix_clean_many(0, q_vector);
5429 }
5430 } else {
5431 ixgbe_intr(adapter->pdev->irq, netdev);
5432 }
Auke Kok9a799d72007-09-15 14:07:45 -07005433 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Auke Kok9a799d72007-09-15 14:07:45 -07005434}
5435#endif
5436
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005437static const struct net_device_ops ixgbe_netdev_ops = {
5438 .ndo_open = ixgbe_open,
5439 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08005440 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005441 .ndo_select_queue = ixgbe_select_queue,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005442 .ndo_get_stats = ixgbe_get_stats,
Chris Leeche90d4002009-03-10 16:00:24 +00005443 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005444 .ndo_set_multicast_list = ixgbe_set_rx_mode,
5445 .ndo_validate_addr = eth_validate_addr,
5446 .ndo_set_mac_address = ixgbe_set_mac,
5447 .ndo_change_mtu = ixgbe_change_mtu,
5448 .ndo_tx_timeout = ixgbe_tx_timeout,
5449 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
5450 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5451 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00005452 .ndo_do_ioctl = ixgbe_ioctl,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005453#ifdef CONFIG_NET_POLL_CONTROLLER
5454 .ndo_poll_controller = ixgbe_netpoll,
5455#endif
Yi Zou332d4a72009-05-13 13:11:53 +00005456#ifdef IXGBE_FCOE
5457 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
5458 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
Yi Zou8450ff82009-08-31 12:32:14 +00005459 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5460 .ndo_fcoe_disable = ixgbe_fcoe_disable,
Yi Zou332d4a72009-05-13 13:11:53 +00005461#endif /* IXGBE_FCOE */
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005462};
5463
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005464/**
Auke Kok9a799d72007-09-15 14:07:45 -07005465 * ixgbe_probe - Device Initialization Routine
5466 * @pdev: PCI device information struct
5467 * @ent: entry in ixgbe_pci_tbl
5468 *
5469 * Returns 0 on success, negative on failure
5470 *
5471 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
5472 * The OS initialization, configuring of the adapter private structure,
5473 * and a hardware reset occur.
5474 **/
5475static int __devinit ixgbe_probe(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005476 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07005477{
5478 struct net_device *netdev;
5479 struct ixgbe_adapter *adapter = NULL;
5480 struct ixgbe_hw *hw;
5481 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07005482 static int cards_found;
5483 int i, err, pci_using_dac;
Yi Zoueacd73f2009-05-13 13:11:06 +00005484#ifdef IXGBE_FCOE
5485 u16 device_caps;
5486#endif
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005487 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07005488
gouji-new9ce77662009-05-06 10:44:45 +00005489 err = pci_enable_device_mem(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005490 if (err)
5491 return err;
5492
Yang Hongyang6a355282009-04-06 19:01:13 -07005493 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
5494 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07005495 pci_using_dac = 1;
5496 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07005497 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07005498 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07005499 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07005500 if (err) {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005501 dev_err(&pdev->dev, "No usable DMA "
5502 "configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005503 goto err_dma;
5504 }
5505 }
5506 pci_using_dac = 0;
5507 }
5508
gouji-new9ce77662009-05-06 10:44:45 +00005509 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
5510 IORESOURCE_MEM), ixgbe_driver_name);
Auke Kok9a799d72007-09-15 14:07:45 -07005511 if (err) {
gouji-new9ce77662009-05-06 10:44:45 +00005512 dev_err(&pdev->dev,
5513 "pci_request_selected_regions failed 0x%x\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07005514 goto err_pci_reg;
5515 }
5516
Frans Pop19d5afd2009-10-02 10:04:12 -07005517 pci_enable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005518
Auke Kok9a799d72007-09-15 14:07:45 -07005519 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07005520 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005521
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005522 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07005523 if (!netdev) {
5524 err = -ENOMEM;
5525 goto err_alloc_etherdev;
5526 }
5527
Auke Kok9a799d72007-09-15 14:07:45 -07005528 SET_NETDEV_DEV(netdev, &pdev->dev);
5529
5530 pci_set_drvdata(pdev, netdev);
5531 adapter = netdev_priv(netdev);
5532
5533 adapter->netdev = netdev;
5534 adapter->pdev = pdev;
5535 hw = &adapter->hw;
5536 hw->back = adapter;
5537 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5538
Jeff Kirsher05857982008-09-11 19:57:00 -07005539 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5540 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07005541 if (!hw->hw_addr) {
5542 err = -EIO;
5543 goto err_ioremap;
5544 }
5545
5546 for (i = 1; i <= 5; i++) {
5547 if (pci_resource_len(pdev, i) == 0)
5548 continue;
5549 }
5550
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005551 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07005552 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005553 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07005554 strcpy(netdev->name, pci_name(pdev));
5555
Auke Kok9a799d72007-09-15 14:07:45 -07005556 adapter->bd_number = cards_found;
5557
Auke Kok9a799d72007-09-15 14:07:45 -07005558 /* Setup hw api */
5559 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005560 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07005561
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005562 /* EEPROM */
5563 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
5564 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5565 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
5566 if (!(eec & (1 << 8)))
5567 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
5568
5569 /* PHY */
5570 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08005571 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00005572 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
5573 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
5574 hw->phy.mdio.mmds = 0;
5575 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
5576 hw->phy.mdio.dev = netdev;
5577 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
5578 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08005579
5580 /* set up this timer and work struct before calling get_invariants
5581 * which might start the timer
5582 */
5583 init_timer(&adapter->sfp_timer);
5584 adapter->sfp_timer.function = &ixgbe_sfp_timer;
5585 adapter->sfp_timer.data = (unsigned long) adapter;
5586
5587 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005588
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005589 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
5590 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
5591
5592 /* a new SFP+ module arrival, called from GPI SDP2 context */
5593 INIT_WORK(&adapter->sfp_config_module_task,
5594 ixgbe_sfp_config_module_task);
5595
Don Skidmore8ca783a2009-05-26 20:40:47 -07005596 ii->get_invariants(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07005597
5598 /* setup the private structure */
5599 err = ixgbe_sw_init(adapter);
5600 if (err)
5601 goto err_sw_init;
5602
Don Skidmorebf069c92009-05-07 10:39:54 +00005603 /*
5604 * If there is a fan on this device and it has failed log the
5605 * failure.
5606 */
5607 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5608 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5609 if (esdp & IXGBE_ESDP_SDP1)
5610 DPRINTK(PROBE, CRIT,
5611 "Fan has stopped, replace the adapter\n");
5612 }
5613
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005614 /* reset_hw fills in the perm_addr as well */
5615 err = hw->mac.ops.reset_hw(hw);
Don Skidmore8ca783a2009-05-26 20:40:47 -07005616 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
5617 hw->mac.type == ixgbe_mac_82598EB) {
5618 /*
5619 * Start a kernel thread to watch for a module to arrive.
5620 * Only do this for 82598, since 82599 will generate
5621 * interrupts on module arrival.
5622 */
5623 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5624 mod_timer(&adapter->sfp_timer,
5625 round_jiffies(jiffies + (2 * HZ)));
5626 err = 0;
5627 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00005628 dev_err(&adapter->pdev->dev, "failed to initialize because "
5629 "an unsupported SFP+ module type was detected.\n"
5630 "Reload the driver after installing a supported "
5631 "module.\n");
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00005632 goto err_sw_init;
5633 } else if (err) {
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005634 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
5635 goto err_sw_init;
5636 }
5637
Auke Kok9a799d72007-09-15 14:07:45 -07005638 netdev->features = NETIF_F_SG |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005639 NETIF_F_IP_CSUM |
5640 NETIF_F_HW_VLAN_TX |
5641 NETIF_F_HW_VLAN_RX |
5642 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07005643
Jesse Brandeburge9990a92008-08-26 04:27:24 -07005644 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07005645 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07005646 netdev->features |= NETIF_F_TSO6;
Herbert Xu78b6f4c2009-01-18 21:49:45 -08005647 netdev->features |= NETIF_F_GRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07005648
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00005649 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5650 netdev->features |= NETIF_F_SCTP_CSUM;
5651
Jeff Kirsherad31c402008-06-05 04:05:30 -07005652 netdev->vlan_features |= NETIF_F_TSO;
5653 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07005654 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00005655 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07005656 netdev->vlan_features |= NETIF_F_SG;
5657
Alexander Duyck2f90b862008-11-20 20:52:10 -08005658 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5659 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5660
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08005661#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08005662 netdev->dcbnl_ops = &dcbnl_ops;
5663#endif
5664
Yi Zoueacd73f2009-05-13 13:11:06 +00005665#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00005666 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
Yi Zoueacd73f2009-05-13 13:11:06 +00005667 if (hw->mac.ops.get_device_caps) {
5668 hw->mac.ops.get_device_caps(hw, &device_caps);
Yi Zou0d551582009-07-22 14:07:12 +00005669 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
5670 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
Yi Zoueacd73f2009-05-13 13:11:06 +00005671 }
5672 }
5673#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07005674 if (pci_using_dac)
5675 netdev->features |= NETIF_F_HIGHDMA;
5676
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00005677 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Alexander Duyckf8212f92009-04-27 22:42:37 +00005678 netdev->features |= NETIF_F_LRO;
5679
Auke Kok9a799d72007-09-15 14:07:45 -07005680 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005681 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Auke Kok9a799d72007-09-15 14:07:45 -07005682 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
5683 err = -EIO;
5684 goto err_eeprom;
5685 }
5686
5687 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
5688 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
5689
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005690 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
5691 dev_err(&pdev->dev, "invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005692 err = -EIO;
5693 goto err_eeprom;
5694 }
5695
5696 init_timer(&adapter->watchdog_timer);
5697 adapter->watchdog_timer.function = &ixgbe_watchdog;
5698 adapter->watchdog_timer.data = (unsigned long)adapter;
5699
5700 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005701 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005702
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005703 err = ixgbe_init_interrupt_scheme(adapter);
5704 if (err)
5705 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07005706
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005707 switch (pdev->device) {
5708 case IXGBE_DEV_ID_82599_KX4:
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00005709 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
5710 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
Peter P Waskiewicz Jrbdf0a552009-06-04 11:09:58 +00005711 /* Enable ACPI wakeup in GRC */
5712 IXGBE_WRITE_REG(hw, IXGBE_GRC,
5713 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005714 break;
5715 default:
5716 adapter->wol = 0;
5717 break;
5718 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005719 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5720
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00005721 /* pick up the PCI bus settings for reporting later */
5722 hw->mac.ops.get_bus_info(hw);
5723
Auke Kok9a799d72007-09-15 14:07:45 -07005724 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07005725 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005726 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
5727 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
5728 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
5729 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
5730 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005731 "Unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07005732 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005733 ixgbe_read_pba_num_generic(hw, &part_num);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005734 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
5735 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
5736 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
5737 (part_num >> 8), (part_num & 0xff));
5738 else
5739 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5740 hw->mac.type, hw->phy.type,
5741 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07005742
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005743 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Auke Kok0c254d82008-02-11 09:25:56 -08005744 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005745 "this card is not sufficient for optimal "
5746 "performance.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005747 dev_warn(&pdev->dev, "For optimal performance a x8 "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005748 "PCI-Express slot is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005749 }
5750
Peter P Waskiewicz Jr34b03682009-02-05 23:54:42 -08005751 /* save off EEPROM version number */
5752 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5753
Auke Kok9a799d72007-09-15 14:07:45 -07005754 /* reset the hardware with the new settings */
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00005755 err = hw->mac.ops.start_hw(hw);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005756
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00005757 if (err == IXGBE_ERR_EEPROM_VERSION) {
5758 /* We are running on a pre-production device, log a warning */
5759 dev_warn(&pdev->dev, "This device is a pre-production "
5760 "adapter/LOM. Please be aware there may be issues "
5761 "associated with your hardware. If you are "
5762 "experiencing problems please contact your Intel or "
5763 "hardware representative who provided you with this "
5764 "hardware.\n");
5765 }
Auke Kok9a799d72007-09-15 14:07:45 -07005766 strcpy(netdev->name, "eth%d");
5767 err = register_netdev(netdev);
5768 if (err)
5769 goto err_register;
5770
Jesse Brandeburg54386462009-04-17 20:44:27 +00005771 /* carrier off reporting is important to ethtool even BEFORE open */
5772 netif_carrier_off(netdev);
5773
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005774 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5775 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5776 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
5777
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005778#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03005779 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005780 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005781 ixgbe_setup_dca(adapter);
5782 }
5783#endif
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005784 /* add san mac addr to netdev */
5785 ixgbe_add_sanmac_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005786
5787 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
5788 cards_found++;
5789 return 0;
5790
5791err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005792 ixgbe_release_hw_control(adapter);
Alexander Duyck7a921c92009-05-06 10:43:28 +00005793 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005794err_sw_init:
5795err_eeprom:
Donald Skidmorec4900be2008-11-20 21:11:42 -08005796 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5797 del_timer_sync(&adapter->sfp_timer);
5798 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005799 cancel_work_sync(&adapter->multispeed_fiber_task);
5800 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005801 iounmap(hw->hw_addr);
5802err_ioremap:
5803 free_netdev(netdev);
5804err_alloc_etherdev:
gouji-new9ce77662009-05-06 10:44:45 +00005805 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5806 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07005807err_pci_reg:
5808err_dma:
5809 pci_disable_device(pdev);
5810 return err;
5811}
5812
5813/**
5814 * ixgbe_remove - Device Removal Routine
5815 * @pdev: PCI device information struct
5816 *
5817 * ixgbe_remove is called by the PCI subsystem to alert the driver
5818 * that it should release a PCI device. The could be caused by a
5819 * Hot-Plug event, or because the driver is going to be removed from
5820 * memory.
5821 **/
5822static void __devexit ixgbe_remove(struct pci_dev *pdev)
5823{
5824 struct net_device *netdev = pci_get_drvdata(pdev);
5825 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5826
5827 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08005828 /* clear the module not found bit to make sure the worker won't
5829 * reschedule
5830 */
5831 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07005832 del_timer_sync(&adapter->watchdog_timer);
5833
Donald Skidmorec4900be2008-11-20 21:11:42 -08005834 del_timer_sync(&adapter->sfp_timer);
5835 cancel_work_sync(&adapter->watchdog_task);
5836 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005837 cancel_work_sync(&adapter->multispeed_fiber_task);
5838 cancel_work_sync(&adapter->sfp_config_module_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005839 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5840 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5841 cancel_work_sync(&adapter->fdir_reinit_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005842 flush_scheduled_work();
5843
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005844#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005845 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5846 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5847 dca_remove_requester(&pdev->dev);
5848 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
5849 }
5850
5851#endif
Yi Zou332d4a72009-05-13 13:11:53 +00005852#ifdef IXGBE_FCOE
5853 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
5854 ixgbe_cleanup_fcoe(adapter);
5855
5856#endif /* IXGBE_FCOE */
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005857
5858 /* remove the added san mac */
5859 ixgbe_del_sanmac_netdev(netdev);
5860
Donald Skidmorec4900be2008-11-20 21:11:42 -08005861 if (netdev->reg_state == NETREG_REGISTERED)
5862 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005863
Alexander Duyck7a921c92009-05-06 10:43:28 +00005864 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005865
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005866 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005867
5868 iounmap(adapter->hw.hw_addr);
gouji-new9ce77662009-05-06 10:44:45 +00005869 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5870 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07005871
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005872 DPRINTK(PROBE, INFO, "complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005873
Auke Kok9a799d72007-09-15 14:07:45 -07005874 free_netdev(netdev);
5875
Frans Pop19d5afd2009-10-02 10:04:12 -07005876 pci_disable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005877
Auke Kok9a799d72007-09-15 14:07:45 -07005878 pci_disable_device(pdev);
5879}
5880
5881/**
5882 * ixgbe_io_error_detected - called when PCI error is detected
5883 * @pdev: Pointer to PCI device
5884 * @state: The current pci connection state
5885 *
5886 * This function is called after a PCI bus error affecting
5887 * this device has been detected.
5888 */
5889static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005890 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07005891{
5892 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005893 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005894
5895 netif_device_detach(netdev);
5896
Breno Leitao3044b8d2009-05-06 10:44:26 +00005897 if (state == pci_channel_io_perm_failure)
5898 return PCI_ERS_RESULT_DISCONNECT;
5899
Auke Kok9a799d72007-09-15 14:07:45 -07005900 if (netif_running(netdev))
5901 ixgbe_down(adapter);
5902 pci_disable_device(pdev);
5903
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005904 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07005905 return PCI_ERS_RESULT_NEED_RESET;
5906}
5907
5908/**
5909 * ixgbe_io_slot_reset - called after the pci bus has been reset.
5910 * @pdev: Pointer to PCI device
5911 *
5912 * Restart the card from scratch, as if from a cold-boot.
5913 */
5914static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5915{
5916 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005917 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005918 pci_ers_result_t result;
5919 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005920
gouji-new9ce77662009-05-06 10:44:45 +00005921 if (pci_enable_device_mem(pdev)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005922 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005923 "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005924 result = PCI_ERS_RESULT_DISCONNECT;
5925 } else {
5926 pci_set_master(pdev);
5927 pci_restore_state(pdev);
5928
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005929 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005930
5931 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00005932 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005933 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07005934 }
Auke Kok9a799d72007-09-15 14:07:45 -07005935
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005936 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5937 if (err) {
5938 dev_err(&pdev->dev,
5939 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
5940 /* non-fatal, continue */
5941 }
Auke Kok9a799d72007-09-15 14:07:45 -07005942
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005943 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07005944}
5945
5946/**
5947 * ixgbe_io_resume - called when traffic can start flowing again.
5948 * @pdev: Pointer to PCI device
5949 *
5950 * This callback is called when the error recovery driver tells us that
5951 * its OK to resume normal operation.
5952 */
5953static void ixgbe_io_resume(struct pci_dev *pdev)
5954{
5955 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005956 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005957
5958 if (netif_running(netdev)) {
5959 if (ixgbe_up(adapter)) {
5960 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
5961 return;
5962 }
5963 }
5964
5965 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005966}
5967
5968static struct pci_error_handlers ixgbe_err_handler = {
5969 .error_detected = ixgbe_io_error_detected,
5970 .slot_reset = ixgbe_io_slot_reset,
5971 .resume = ixgbe_io_resume,
5972};
5973
5974static struct pci_driver ixgbe_driver = {
5975 .name = ixgbe_driver_name,
5976 .id_table = ixgbe_pci_tbl,
5977 .probe = ixgbe_probe,
5978 .remove = __devexit_p(ixgbe_remove),
5979#ifdef CONFIG_PM
5980 .suspend = ixgbe_suspend,
5981 .resume = ixgbe_resume,
5982#endif
5983 .shutdown = ixgbe_shutdown,
5984 .err_handler = &ixgbe_err_handler
5985};
5986
5987/**
5988 * ixgbe_init_module - Driver Registration Routine
5989 *
5990 * ixgbe_init_module is the first routine called when the driver is
5991 * loaded. All it does is register with the PCI subsystem.
5992 **/
5993static int __init ixgbe_init_module(void)
5994{
5995 int ret;
5996 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
5997 ixgbe_driver_string, ixgbe_driver_version);
5998
5999 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
6000
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006001#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006002 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006003#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006004
Auke Kok9a799d72007-09-15 14:07:45 -07006005 ret = pci_register_driver(&ixgbe_driver);
6006 return ret;
6007}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006008
Auke Kok9a799d72007-09-15 14:07:45 -07006009module_init(ixgbe_init_module);
6010
6011/**
6012 * ixgbe_exit_module - Driver Exit Cleanup Routine
6013 *
6014 * ixgbe_exit_module is called just before the driver is removed
6015 * from memory.
6016 **/
6017static void __exit ixgbe_exit_module(void)
6018{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006019#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006020 dca_unregister_notify(&dca_notifier);
6021#endif
Auke Kok9a799d72007-09-15 14:07:45 -07006022 pci_unregister_driver(&ixgbe_driver);
6023}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006024
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006025#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006026static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006027 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006028{
6029 int ret_val;
6030
6031 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006032 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006033
6034 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6035}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006036
Alexander Duyckb4533682009-03-31 21:32:42 +00006037#endif /* CONFIG_IXGBE_DCA */
6038#ifdef DEBUG
6039/**
6040 * ixgbe_get_hw_dev_name - return device name string
6041 * used by hardware layer to print debugging information
6042 **/
6043char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
6044{
6045 struct ixgbe_adapter *adapter = hw->back;
6046 return adapter->netdev->name;
6047}
6048
6049#endif
Auke Kok9a799d72007-09-15 14:07:45 -07006050module_exit(ixgbe_exit_module);
6051
6052/* ixgbe_main.c */