blob: 2d0f618b4a9de42c59ef37d5812cc17b0d3adc6e [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -08004 Copyright(c) 1999 - 2009 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
Lucy Liu60127862009-07-22 14:07:33 +000037#include <linux/pkt_sched.h>
Auke Kok9a799d72007-09-15 14:07:45 -070038#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
Yi Zoueacd73f2009-05-13 13:11:06 +000043#include <scsi/fc/fc_fcoe.h>
Auke Kok9a799d72007-09-15 14:07:45 -070044
45#include "ixgbe.h"
46#include "ixgbe_common.h"
47
48char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070049static const char ixgbe_driver_string[] =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070050 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070051
Peter P Waskiewicz Jre0f4daf2009-09-30 12:07:57 +000052#define DRV_VERSION "2.0.44-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070053const char ixgbe_driver_version[] = DRV_VERSION;
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -080054static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070055
56static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070057 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000058 [board_82599] = &ixgbe_82599_info,
Auke Kok9a799d72007-09-15 14:07:45 -070059};
60
61/* ixgbe_pci_tbl - PCI Device ID Table
62 *
63 * Wildcard entries (PCI_ANY_ID) should come last
64 * Last entry must be all 0s
65 *
66 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67 * Class, Class Mask, private data (not used) }
68 */
69static struct pci_device_id ixgbe_pci_tbl[] = {
Don Skidmore1e336d02009-01-26 20:57:51 -080070 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
71 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070072 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070073 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070074 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070075 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070076 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
77 board_82598 },
Peter P Waskiewicz Jr3845bec2009-07-16 15:50:52 +000078 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
79 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070081 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070082 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
83 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080084 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
85 board_82598 },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
87 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
89 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
91 board_82598 },
Don Skidmore2f21bdd2009-02-01 01:18:23 -080092 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
93 board_82598 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000094 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
95 board_82599 },
Peter P Waskiewicz Jr1fcf03e2009-05-17 20:58:04 +000096 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
97 board_82599 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000098 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99 board_82599 },
Don Skidmoredbfec662009-10-02 08:58:25 +0000100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
101 board_82599 },
Peter P Waskiewicz Jr8911184f2009-09-14 07:47:49 +0000102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
103 board_82599 },
Don Skidmore312eb932009-10-02 08:58:04 +0000104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
105 board_82599 },
Auke Kok9a799d72007-09-15 14:07:45 -0700106
107 /* required last entry */
108 {0, }
109};
110MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
111
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400112#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800113static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700114 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800115static struct notifier_block dca_notifier = {
116 .notifier_call = ixgbe_notify_dca,
117 .next = NULL,
118 .priority = 0
119};
120#endif
121
Auke Kok9a799d72007-09-15 14:07:45 -0700122MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
123MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
124MODULE_LICENSE("GPL");
125MODULE_VERSION(DRV_VERSION);
126
127#define DEFAULT_DEBUG_LEVEL_SHIFT 3
128
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800129static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
130{
131 u32 ctrl_ext;
132
133 /* Let firmware take over control of h/w */
134 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
135 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700136 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800137}
138
139static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
140{
141 u32 ctrl_ext;
142
143 /* Let firmware know the driver has taken over */
144 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
145 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700146 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800147}
Auke Kok9a799d72007-09-15 14:07:45 -0700148
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000149/*
150 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
151 * @adapter: pointer to adapter struct
152 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
153 * @queue: queue to map the corresponding interrupt to
154 * @msix_vector: the vector to map to the corresponding queue
155 *
156 */
157static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
158 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700159{
160 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000161 struct ixgbe_hw *hw = &adapter->hw;
162 switch (hw->mac.type) {
163 case ixgbe_mac_82598EB:
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 if (direction == -1)
166 direction = 0;
167 index = (((direction * 64) + queue) >> 2) & 0x1F;
168 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
169 ivar &= ~(0xFF << (8 * (queue & 0x3)));
170 ivar |= (msix_vector << (8 * (queue & 0x3)));
171 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
172 break;
173 case ixgbe_mac_82599EB:
174 if (direction == -1) {
175 /* other causes */
176 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
177 index = ((queue & 1) * 8);
178 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
179 ivar &= ~(0xFF << index);
180 ivar |= (msix_vector << index);
181 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
182 break;
183 } else {
184 /* tx or rx causes */
185 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
186 index = ((16 * (queue & 1)) + (8 * direction));
187 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
188 ivar &= ~(0xFF << index);
189 ivar |= (msix_vector << index);
190 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
191 break;
192 }
193 default:
194 break;
195 }
Auke Kok9a799d72007-09-15 14:07:45 -0700196}
197
Alexander Duyckfe49f042009-06-04 16:00:09 +0000198static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
199 u64 qmask)
200{
201 u32 mask;
202
203 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
204 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
205 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
206 } else {
207 mask = (qmask & 0xFFFFFFFF);
208 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
209 mask = (qmask >> 32);
210 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
211 }
212}
213
Auke Kok9a799d72007-09-15 14:07:45 -0700214static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700215 struct ixgbe_tx_buffer
216 *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700217{
Alexander Duyck44df32c2009-03-31 21:34:23 +0000218 tx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700219 if (tx_buffer_info->skb) {
Alexander Duyck44df32c2009-03-31 21:34:23 +0000220 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
221 DMA_TO_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700222 dev_kfree_skb_any(tx_buffer_info->skb);
223 tx_buffer_info->skb = NULL;
224 }
Alexander Duyck44df32c2009-03-31 21:34:23 +0000225 tx_buffer_info->time_stamp = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700226 /* tx_buffer_info must be completely set up in the transmit path */
227}
228
Yi Zou26f23d82009-11-06 12:56:00 +0000229/**
230 * ixgbe_tx_is_paused - check if the tx ring is paused
231 * @adapter: the ixgbe adapter
232 * @tx_ring: the corresponding tx_ring
233 *
234 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
235 * corresponding TC of this tx_ring when checking TFCS.
236 *
237 * Returns : true if paused
238 */
239static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
240 struct ixgbe_ring *tx_ring)
241{
242 int tc;
243 u32 txoff = IXGBE_TFCS_TXOFF;
244
245#ifdef CONFIG_IXGBE_DCB
246 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
247 int reg_idx = tx_ring->reg_idx;
248 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
249
250 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
251 tc = reg_idx >> 2;
252 txoff = IXGBE_TFCS_TXOFF0;
253 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
254 tc = 0;
255 txoff = IXGBE_TFCS_TXOFF;
256 if (dcb_i == 8) {
257 /* TC0, TC1 */
258 tc = reg_idx >> 5;
259 if (tc == 2) /* TC2, TC3 */
260 tc += (reg_idx - 64) >> 4;
261 else if (tc == 3) /* TC4, TC5, TC6, TC7 */
262 tc += 1 + ((reg_idx - 96) >> 3);
263 } else if (dcb_i == 4) {
264 /* TC0, TC1 */
265 tc = reg_idx >> 6;
266 if (tc == 1) {
267 tc += (reg_idx - 64) >> 5;
268 if (tc == 2) /* TC2, TC3 */
269 tc += (reg_idx - 96) >> 4;
270 }
271 }
272 }
273 txoff <<= tc;
274 }
275#endif
276 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
277}
278
Auke Kok9a799d72007-09-15 14:07:45 -0700279static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700280 struct ixgbe_ring *tx_ring,
281 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700282{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700283 struct ixgbe_hw *hw = &adapter->hw;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700284
Auke Kok9a799d72007-09-15 14:07:45 -0700285 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700286 * check with the clearing of time_stamp and movement of eop */
Auke Kok9a799d72007-09-15 14:07:45 -0700287 adapter->detect_tx_hung = false;
Alexander Duyck44df32c2009-03-31 21:34:23 +0000288 if (tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700289 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
Yi Zou26f23d82009-11-06 12:56:00 +0000290 !ixgbe_tx_is_paused(adapter, tx_ring)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700291 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700292 union ixgbe_adv_tx_desc *tx_desc;
293 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
Auke Kok9a799d72007-09-15 14:07:45 -0700294 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700295 " Tx Queue <%d>\n"
296 " TDH, TDT <%x>, <%x>\n"
Auke Kok9a799d72007-09-15 14:07:45 -0700297 " next_to_use <%x>\n"
298 " next_to_clean <%x>\n"
299 "tx_buffer_info[next_to_clean]\n"
300 " time_stamp <%lx>\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700301 " jiffies <%lx>\n",
302 tx_ring->queue_index,
Alexander Duyck44df32c2009-03-31 21:34:23 +0000303 IXGBE_READ_REG(hw, tx_ring->head),
304 IXGBE_READ_REG(hw, tx_ring->tail),
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700305 tx_ring->next_to_use, eop,
306 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700307 return true;
308 }
309
310 return false;
311}
312
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700313#define IXGBE_MAX_TXD_PWR 14
314#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800315
316/* Tx Descriptors needed, worst case */
317#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
318 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
319#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700320 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800321
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700322static void ixgbe_tx_timeout(struct net_device *netdev);
323
Auke Kok9a799d72007-09-15 14:07:45 -0700324/**
325 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
Alexander Duyckfe49f042009-06-04 16:00:09 +0000326 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700327 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700328 **/
Alexander Duyckfe49f042009-06-04 16:00:09 +0000329static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700330 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700331{
Alexander Duyckfe49f042009-06-04 16:00:09 +0000332 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700333 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800334 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
335 struct ixgbe_tx_buffer *tx_buffer_info;
336 unsigned int i, eop, count = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700337 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700338
339 i = tx_ring->next_to_clean;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800340 eop = tx_ring->tx_buffer_info[i].next_to_watch;
341 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
342
343 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000344 (count < tx_ring->work_limit)) {
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800345 bool cleaned = false;
346 for ( ; !cleaned; count++) {
347 struct sk_buff *skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700348 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
349 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800350 cleaned = (i == eop);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700351 skb = tx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700352
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800353 if (cleaned && skb) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800354 unsigned int segs, bytecount;
Yi Zou3d8fd382009-06-08 14:38:44 +0000355 unsigned int hlen = skb_headlen(skb);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700356
357 /* gso_segs is currently only valid for tcp */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800358 segs = skb_shinfo(skb)->gso_segs ?: 1;
Yi Zou3d8fd382009-06-08 14:38:44 +0000359#ifdef IXGBE_FCOE
360 /* adjust for FCoE Sequence Offload */
361 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
362 && (skb->protocol == htons(ETH_P_FCOE)) &&
363 skb_is_gso(skb)) {
364 hlen = skb_transport_offset(skb) +
365 sizeof(struct fc_frame_header) +
366 sizeof(struct fcoe_crc_eof);
367 segs = DIV_ROUND_UP(skb->len - hlen,
368 skb_shinfo(skb)->gso_size);
369 }
370#endif /* IXGBE_FCOE */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800371 /* multiply data chunks by size of headers */
Yi Zou3d8fd382009-06-08 14:38:44 +0000372 bytecount = ((segs - 1) * hlen) + skb->len;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700373 total_packets += segs;
374 total_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800375 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700376
Auke Kok9a799d72007-09-15 14:07:45 -0700377 ixgbe_unmap_and_free_tx_resource(adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700378 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700379
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800380 tx_desc->wb.status = 0;
381
Auke Kok9a799d72007-09-15 14:07:45 -0700382 i++;
383 if (i == tx_ring->count)
384 i = 0;
385 }
386
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800387 eop = tx_ring->tx_buffer_info[i].next_to_watch;
388 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
389 }
390
Auke Kok9a799d72007-09-15 14:07:45 -0700391 tx_ring->next_to_clean = i;
392
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800393#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700394 if (unlikely(count && netif_carrier_ok(netdev) &&
395 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800396 /* Make sure that anybody stopping the queue after this
397 * sees the new next_to_clean.
398 */
399 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800400 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
401 !test_bit(__IXGBE_DOWN, &adapter->state)) {
402 netif_wake_subqueue(netdev, tx_ring->queue_index);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700403 ++adapter->restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800404 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800405 }
Auke Kok9a799d72007-09-15 14:07:45 -0700406
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700407 if (adapter->detect_tx_hung) {
408 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
409 /* schedule immediate reset if we believe we hung */
410 DPRINTK(PROBE, INFO,
411 "tx hang %d detected, resetting adapter\n",
412 adapter->tx_timeout_count + 1);
413 ixgbe_tx_timeout(adapter->netdev);
414 }
415 }
Auke Kok9a799d72007-09-15 14:07:45 -0700416
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700417 /* re-arm the interrupt */
Alexander Duyckfe49f042009-06-04 16:00:09 +0000418 if (count >= tx_ring->work_limit)
419 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -0700420
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700421 tx_ring->total_bytes += total_bytes;
422 tx_ring->total_packets += total_packets;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700423 tx_ring->stats.packets += total_packets;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800424 tx_ring->stats.bytes += total_bytes;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700425 adapter->net_stats.tx_bytes += total_bytes;
426 adapter->net_stats.tx_packets += total_packets;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000427 return (count < tx_ring->work_limit);
Auke Kok9a799d72007-09-15 14:07:45 -0700428}
429
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400430#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800431static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700432 struct ixgbe_ring *rx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800433{
434 u32 rxctrl;
435 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700436 int q = rx_ring - adapter->rx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800437
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700438 if (rx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800439 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000440 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
441 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
442 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
443 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
444 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
445 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
446 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
447 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800448 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
449 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
Don Skidmore15005a32009-01-19 16:54:13 -0800450 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
451 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000452 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800453 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700454 rx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800455 }
456 put_cpu();
457}
458
459static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700460 struct ixgbe_ring *tx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800461{
462 u32 txctrl;
463 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700464 int q = tx_ring - adapter->tx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800465
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700466 if (tx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800467 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000468 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
469 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
470 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
471 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
472 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
473 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
474 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
475 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800476 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
477 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700478 tx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800479 }
480 put_cpu();
481}
482
483static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
484{
485 int i;
486
487 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
488 return;
489
Alexander Duycke35ec122009-05-21 13:07:12 +0000490 /* always use CB2 mode, difference is masked in the CB driver */
491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
492
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800493 for (i = 0; i < adapter->num_tx_queues; i++) {
494 adapter->tx_ring[i].cpu = -1;
495 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
496 }
497 for (i = 0; i < adapter->num_rx_queues; i++) {
498 adapter->rx_ring[i].cpu = -1;
499 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
500 }
501}
502
503static int __ixgbe_notify_dca(struct device *dev, void *data)
504{
505 struct net_device *netdev = dev_get_drvdata(dev);
506 struct ixgbe_adapter *adapter = netdev_priv(netdev);
507 unsigned long event = *(unsigned long *)data;
508
509 switch (event) {
510 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700511 /* if we're already enabled, don't do it again */
512 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
513 break;
Denis V. Lunev652f0932008-03-27 14:39:17 +0300514 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700515 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800516 ixgbe_setup_dca(adapter);
517 break;
518 }
519 /* Fall Through since DCA is disabled. */
520 case DCA_PROVIDER_REMOVE:
521 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
522 dca_remove_requester(dev);
523 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
524 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
525 }
526 break;
527 }
528
Denis V. Lunev652f0932008-03-27 14:39:17 +0300529 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800530}
531
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400532#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700533/**
534 * ixgbe_receive_skb - Send a completed packet up the stack
535 * @adapter: board private structure
536 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700537 * @status: hardware indication of status of receive
538 * @rx_ring: rx descriptor ring (for a specific queue) to setup
539 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700540 **/
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800541static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700542 struct sk_buff *skb, u8 status,
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000543 struct ixgbe_ring *ring,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700544 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700545{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800546 struct ixgbe_adapter *adapter = q_vector->adapter;
547 struct napi_struct *napi = &q_vector->napi;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700548 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
549 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700550
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000551 skb_record_rx_queue(skb, ring->queue_index);
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000552 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
Lucy Liu8a62bab2009-08-13 14:09:38 +0000553 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800554 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700555 else
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800556 napi_gro_receive(napi, skb);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700557 } else {
Lucy Liu8a62bab2009-08-13 14:09:38 +0000558 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000559 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
560 else
561 netif_rx(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700562 }
563}
564
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800565/**
566 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
567 * @adapter: address of board private structure
568 * @status_err: hardware indication of status of receive
569 * @skb: skb currently being received and modified
570 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700571static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Don Skidmore8bae1b22009-07-23 18:00:39 +0000572 union ixgbe_adv_rx_desc *rx_desc,
573 struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700574{
Don Skidmore8bae1b22009-07-23 18:00:39 +0000575 u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
576
Auke Kok9a799d72007-09-15 14:07:45 -0700577 skb->ip_summed = CHECKSUM_NONE;
578
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700579 /* Rx csum disabled */
580 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700581 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800582
583 /* if IP and error */
584 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
585 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700586 adapter->hw_csum_rx_error++;
587 return;
588 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800589
590 if (!(status_err & IXGBE_RXD_STAT_L4CS))
591 return;
592
593 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
Don Skidmore8bae1b22009-07-23 18:00:39 +0000594 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
595
596 /*
597 * 82599 errata, UDP frames with a 0 checksum can be marked as
598 * checksum errors.
599 */
600 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
601 (adapter->hw.mac.type == ixgbe_mac_82599EB))
602 return;
603
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800604 adapter->hw_csum_rx_error++;
605 return;
606 }
607
Auke Kok9a799d72007-09-15 14:07:45 -0700608 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800609 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700610 adapter->hw_csum_rx_good++;
611}
612
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000613static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
614 struct ixgbe_ring *rx_ring, u32 val)
615{
616 /*
617 * Force memory writes to complete before letting h/w
618 * know there are new descriptors to fetch. (Only
619 * applicable for weak-ordered memory model archs,
620 * such as IA-64).
621 */
622 wmb();
623 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
624}
625
Auke Kok9a799d72007-09-15 14:07:45 -0700626/**
627 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
628 * @adapter: address of board private structure
629 **/
630static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700631 struct ixgbe_ring *rx_ring,
632 int cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -0700633{
Auke Kok9a799d72007-09-15 14:07:45 -0700634 struct pci_dev *pdev = adapter->pdev;
635 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700636 struct ixgbe_rx_buffer *bi;
Auke Kok9a799d72007-09-15 14:07:45 -0700637 unsigned int i;
Auke Kok9a799d72007-09-15 14:07:45 -0700638
639 i = rx_ring->next_to_use;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700640 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700641
642 while (cleaned_count--) {
643 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
644
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700645 if (!bi->page_dma &&
Yi Zou6e455b892009-08-06 13:05:44 +0000646 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700647 if (!bi->page) {
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700648 bi->page = alloc_page(GFP_ATOMIC);
649 if (!bi->page) {
650 adapter->alloc_rx_page_failed++;
651 goto no_buffers;
652 }
653 bi->page_offset = 0;
654 } else {
655 /* use a half page if we're re-using */
656 bi->page_offset ^= (PAGE_SIZE / 2);
Auke Kok9a799d72007-09-15 14:07:45 -0700657 }
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700658
659 bi->page_dma = pci_map_page(pdev, bi->page,
660 bi->page_offset,
661 (PAGE_SIZE / 2),
662 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700663 }
664
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700665 if (!bi->skb) {
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800666 struct sk_buff *skb;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000667 skb = netdev_alloc_skb(adapter->netdev,
668 (rx_ring->rx_buf_len +
669 NET_IP_ALIGN));
Auke Kok9a799d72007-09-15 14:07:45 -0700670
671 if (!skb) {
672 adapter->alloc_rx_buff_failed++;
673 goto no_buffers;
674 }
675
676 /*
677 * Make buffer alignment 2 beyond a 16 byte boundary
678 * this will result in a 16 byte aligned IP header after
679 * the 14 byte MAC header is removed
680 */
681 skb_reserve(skb, NET_IP_ALIGN);
682
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700683 bi->skb = skb;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000684 bi->dma = pci_map_single(pdev, skb->data,
685 rx_ring->rx_buf_len,
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700686 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700687 }
688 /* Refresh the desc even if buffer_addrs didn't change because
689 * each write-back erases this info. */
Yi Zou6e455b892009-08-06 13:05:44 +0000690 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700691 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
692 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700693 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700694 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700695 }
696
697 i++;
698 if (i == rx_ring->count)
699 i = 0;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700700 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700701 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700702
Auke Kok9a799d72007-09-15 14:07:45 -0700703no_buffers:
704 if (rx_ring->next_to_use != i) {
705 rx_ring->next_to_use = i;
706 if (i-- == 0)
707 i = (rx_ring->count - 1);
708
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000709 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -0700710 }
711}
712
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700713static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
714{
715 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
716}
717
718static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
719{
720 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
721}
722
Alexander Duyckf8212f92009-04-27 22:42:37 +0000723static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
724{
725 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
726 IXGBE_RXDADV_RSCCNT_MASK) >>
727 IXGBE_RXDADV_RSCCNT_SHIFT;
728}
729
730/**
731 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
732 * @skb: pointer to the last skb in the rsc queue
733 *
734 * This function changes a queue full of hw rsc buffers into a completed
735 * packet. It uses the ->prev pointers to find the first packet and then
736 * turns it into the frag list owner.
737 **/
738static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
739{
740 unsigned int frag_list_size = 0;
741
742 while (skb->prev) {
743 struct sk_buff *prev = skb->prev;
744 frag_list_size += skb->len;
745 skb->prev = NULL;
746 skb = prev;
747 }
748
749 skb_shinfo(skb)->frag_list = skb->next;
750 skb->next = NULL;
751 skb->len += frag_list_size;
752 skb->data_len += frag_list_size;
753 skb->truesize += frag_list_size;
754 return skb;
755}
756
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800757static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700758 struct ixgbe_ring *rx_ring,
759 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -0700760{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800761 struct ixgbe_adapter *adapter = q_vector->adapter;
Auke Kok9a799d72007-09-15 14:07:45 -0700762 struct pci_dev *pdev = adapter->pdev;
763 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
764 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
765 struct sk_buff *skb;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000766 unsigned int i, rsc_count = 0;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700767 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700768 u16 hdr_info;
769 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700770 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800771 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Yi Zou3d8fd382009-06-08 14:38:44 +0000772#ifdef IXGBE_FCOE
773 int ddp_bytes = 0;
774#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -0700775
776 i = rx_ring->next_to_clean;
Auke Kok9a799d72007-09-15 14:07:45 -0700777 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
778 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
779 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700780
781 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700782 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700783 if (*work_done >= work_to_do)
784 break;
785 (*work_done)++;
786
Yi Zou6e455b892009-08-06 13:05:44 +0000787 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700788 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
789 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700790 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -0700791 if (hdr_info & IXGBE_RXDADV_SPH)
792 adapter->rx_hdr_split++;
793 if (len > IXGBE_RX_HDR_SIZE)
794 len = IXGBE_RX_HDR_SIZE;
795 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700796 } else {
Auke Kok9a799d72007-09-15 14:07:45 -0700797 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700798 }
Auke Kok9a799d72007-09-15 14:07:45 -0700799
800 cleaned = true;
801 skb = rx_buffer_info->skb;
802 prefetch(skb->data - NET_IP_ALIGN);
803 rx_buffer_info->skb = NULL;
804
Alexander Duyck21fa4e62009-06-04 15:59:49 +0000805 if (rx_buffer_info->dma) {
Auke Kok9a799d72007-09-15 14:07:45 -0700806 pci_unmap_single(pdev, rx_buffer_info->dma,
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800807 rx_ring->rx_buf_len,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700808 PCI_DMA_FROMDEVICE);
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +0000809 rx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700810 skb_put(skb, len);
811 }
812
813 if (upper_len) {
814 pci_unmap_page(pdev, rx_buffer_info->page_dma,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700815 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700816 rx_buffer_info->page_dma = 0;
817 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700818 rx_buffer_info->page,
819 rx_buffer_info->page_offset,
820 upper_len);
821
822 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
823 (page_count(rx_buffer_info->page) != 1))
824 rx_buffer_info->page = NULL;
825 else
826 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -0700827
828 skb->len += upper_len;
829 skb->data_len += upper_len;
830 skb->truesize += upper_len;
831 }
832
833 i++;
834 if (i == rx_ring->count)
835 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700836
837 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
838 prefetch(next_rxd);
Auke Kok9a799d72007-09-15 14:07:45 -0700839 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000840
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +0000841 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
Alexander Duyckf8212f92009-04-27 22:42:37 +0000842 rsc_count = ixgbe_get_rsc_count(rx_desc);
843
844 if (rsc_count) {
845 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
846 IXGBE_RXDADV_NEXTP_SHIFT;
847 next_buffer = &rx_ring->rx_buffer_info[nextp];
848 rx_ring->rsc_count += (rsc_count - 1);
849 } else {
850 next_buffer = &rx_ring->rx_buffer_info[i];
851 }
852
Auke Kok9a799d72007-09-15 14:07:45 -0700853 if (staterr & IXGBE_RXD_STAT_EOP) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000854 if (skb->prev)
855 skb = ixgbe_transform_rsc_queue(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700856 rx_ring->stats.packets++;
857 rx_ring->stats.bytes += skb->len;
858 } else {
Yi Zou6e455b892009-08-06 13:05:44 +0000859 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000860 rx_buffer_info->skb = next_buffer->skb;
861 rx_buffer_info->dma = next_buffer->dma;
862 next_buffer->skb = skb;
863 next_buffer->dma = 0;
864 } else {
865 skb->next = next_buffer->skb;
866 skb->next->prev = skb;
867 }
Auke Kok9a799d72007-09-15 14:07:45 -0700868 adapter->non_eop_descs++;
869 goto next_desc;
870 }
871
872 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
873 dev_kfree_skb_irq(skb);
874 goto next_desc;
875 }
876
Don Skidmore8bae1b22009-07-23 18:00:39 +0000877 ixgbe_rx_checksum(adapter, rx_desc, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800878
879 /* probably a little skewed due to removing CRC */
880 total_rx_bytes += skb->len;
881 total_rx_packets++;
882
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -0700883 skb->protocol = eth_type_trans(skb, adapter->netdev);
Yi Zou332d4a72009-05-13 13:11:53 +0000884#ifdef IXGBE_FCOE
885 /* if ddp, not passing to ULD unless for FCP_RSP or error */
Yi Zou3d8fd382009-06-08 14:38:44 +0000886 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
887 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
888 if (!ddp_bytes)
Yi Zou332d4a72009-05-13 13:11:53 +0000889 goto next_desc;
Yi Zou3d8fd382009-06-08 14:38:44 +0000890 }
Yi Zou332d4a72009-05-13 13:11:53 +0000891#endif /* IXGBE_FCOE */
Alexander Duyckfdaff1c2009-05-06 10:43:47 +0000892 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700893
894next_desc:
895 rx_desc->wb.upper.status_error = 0;
896
897 /* return some buffers to hardware, one at a time is too slow */
898 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
899 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
900 cleaned_count = 0;
901 }
902
903 /* use prefetched values */
904 rx_desc = next_rxd;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000905 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700906
907 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700908 }
909
Auke Kok9a799d72007-09-15 14:07:45 -0700910 rx_ring->next_to_clean = i;
911 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
912
913 if (cleaned_count)
914 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
915
Yi Zou3d8fd382009-06-08 14:38:44 +0000916#ifdef IXGBE_FCOE
917 /* include DDPed FCoE data */
918 if (ddp_bytes > 0) {
919 unsigned int mss;
920
921 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
922 sizeof(struct fc_frame_header) -
923 sizeof(struct fcoe_crc_eof);
924 if (mss > 512)
925 mss &= ~511;
926 total_rx_bytes += ddp_bytes;
927 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
928 }
929#endif /* IXGBE_FCOE */
930
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800931 rx_ring->total_packets += total_rx_packets;
932 rx_ring->total_bytes += total_rx_bytes;
933 adapter->net_stats.rx_bytes += total_rx_bytes;
934 adapter->net_stats.rx_packets += total_rx_packets;
935
Auke Kok9a799d72007-09-15 14:07:45 -0700936 return cleaned;
937}
938
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800939static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700940/**
941 * ixgbe_configure_msix - Configure MSI-X hardware
942 * @adapter: board private structure
943 *
944 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
945 * interrupts.
946 **/
947static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
948{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800949 struct ixgbe_q_vector *q_vector;
950 int i, j, q_vectors, v_idx, r_idx;
951 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700952
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800953 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
954
Jesse Brandeburg4df10462009-03-13 22:15:31 +0000955 /*
956 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800957 * corresponding register.
958 */
959 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +0000960 q_vector = adapter->q_vector[v_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800961 /* XXX for_each_bit(...) */
962 r_idx = find_first_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700963 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800964
965 for (i = 0; i < q_vector->rxr_count; i++) {
966 j = adapter->rx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000967 ixgbe_set_ivar(adapter, 0, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800968 r_idx = find_next_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700969 adapter->num_rx_queues,
970 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800971 }
972 r_idx = find_first_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700973 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800974
975 for (i = 0; i < q_vector->txr_count; i++) {
976 j = adapter->tx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000977 ixgbe_set_ivar(adapter, 1, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800978 r_idx = find_next_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700979 adapter->num_tx_queues,
980 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800981 }
982
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800983 if (q_vector->txr_count && !q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +0000984 /* tx only */
985 q_vector->eitr = adapter->tx_eitr_param;
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000986 else if (q_vector->rxr_count)
Nelson, Shannonf7554a22009-09-18 09:46:06 +0000987 /* rx or mixed */
988 q_vector->eitr = adapter->rx_eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800989
Alexander Duyckfe49f042009-06-04 16:00:09 +0000990 ixgbe_write_eitr(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -0700991 }
992
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000993 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
994 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
995 v_idx);
996 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
997 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800998 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700999
Jesse Brandeburg41fb9242008-09-11 19:55:58 -07001000 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001001 mask = IXGBE_EIMS_ENABLE_MASK;
Jesse Brandeburg41fb9242008-09-11 19:55:58 -07001002 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001003 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -07001004}
1005
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001006enum latency_range {
1007 lowest_latency = 0,
1008 low_latency = 1,
1009 bulk_latency = 2,
1010 latency_invalid = 255
1011};
1012
1013/**
1014 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1015 * @adapter: pointer to adapter
1016 * @eitr: eitr setting (ints per sec) to give last timeslice
1017 * @itr_setting: current throttle rate in ints/second
1018 * @packets: the number of packets during this measurement interval
1019 * @bytes: the number of bytes during this measurement interval
1020 *
1021 * Stores a new ITR value based on packets and byte
1022 * counts during the last interrupt. The advantage of per interrupt
1023 * computation is faster updates and more accurate ITR for the current
1024 * traffic pattern. Constants in this function were computed
1025 * based on theoretical maximum wire speed and thresholds were set based
1026 * on testing data as well as attempting to minimize response time
1027 * while increasing bulk throughput.
1028 * this functionality is controlled by the InterruptThrottleRate module
1029 * parameter (see ixgbe_param.c)
1030 **/
1031static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001032 u32 eitr, u8 itr_setting,
1033 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001034{
1035 unsigned int retval = itr_setting;
1036 u32 timepassed_us;
1037 u64 bytes_perint;
1038
1039 if (packets == 0)
1040 goto update_itr_done;
1041
1042
1043 /* simple throttlerate management
1044 * 0-20MB/s lowest (100000 ints/s)
1045 * 20-100MB/s low (20000 ints/s)
1046 * 100-1249MB/s bulk (8000 ints/s)
1047 */
1048 /* what was last interrupt timeslice? */
1049 timepassed_us = 1000000/eitr;
1050 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1051
1052 switch (itr_setting) {
1053 case lowest_latency:
1054 if (bytes_perint > adapter->eitr_low)
1055 retval = low_latency;
1056 break;
1057 case low_latency:
1058 if (bytes_perint > adapter->eitr_high)
1059 retval = bulk_latency;
1060 else if (bytes_perint <= adapter->eitr_low)
1061 retval = lowest_latency;
1062 break;
1063 case bulk_latency:
1064 if (bytes_perint <= adapter->eitr_high)
1065 retval = low_latency;
1066 break;
1067 }
1068
1069update_itr_done:
1070 return retval;
1071}
1072
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001073/**
1074 * ixgbe_write_eitr - write EITR register in hardware specific way
Alexander Duyckfe49f042009-06-04 16:00:09 +00001075 * @q_vector: structure containing interrupt and ring information
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001076 *
1077 * This function is made to be called by ethtool and by the driver
1078 * when it needs to update EITR registers at runtime. Hardware
1079 * specific quirks/differences are taken care of here.
1080 */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001081void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001082{
Alexander Duyckfe49f042009-06-04 16:00:09 +00001083 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001084 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001085 int v_idx = q_vector->v_idx;
1086 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1087
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001088 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1089 /* must write high and low 16 bits to reset counter */
1090 itr_reg |= (itr_reg << 16);
1091 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1092 /*
1093 * set the WDIS bit to not clear the timer bits and cause an
1094 * immediate assertion of the interrupt
1095 */
1096 itr_reg |= IXGBE_EITR_CNT_WDIS;
1097 }
1098 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1099}
1100
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001101static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1102{
1103 struct ixgbe_adapter *adapter = q_vector->adapter;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001104 u32 new_itr;
1105 u8 current_itr, ret_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001106 int i, r_idx;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001107 struct ixgbe_ring *rx_ring, *tx_ring;
1108
1109 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1110 for (i = 0; i < q_vector->txr_count; i++) {
1111 tx_ring = &(adapter->tx_ring[r_idx]);
1112 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001113 q_vector->tx_itr,
1114 tx_ring->total_packets,
1115 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001116 /* if the result for this queue would decrease interrupt
1117 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001118 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001119 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001120 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001121 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001122 }
1123
1124 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1125 for (i = 0; i < q_vector->rxr_count; i++) {
1126 rx_ring = &(adapter->rx_ring[r_idx]);
1127 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001128 q_vector->rx_itr,
1129 rx_ring->total_packets,
1130 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001131 /* if the result for this queue would decrease interrupt
1132 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001133 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001134 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001135 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001136 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001137 }
1138
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001139 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001140
1141 switch (current_itr) {
1142 /* counts and packets in update_itr are dependent on these numbers */
1143 case lowest_latency:
1144 new_itr = 100000;
1145 break;
1146 case low_latency:
1147 new_itr = 20000; /* aka hwitr = ~200 */
1148 break;
1149 case bulk_latency:
1150 default:
1151 new_itr = 8000;
1152 break;
1153 }
1154
1155 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001156 /* do an exponential smoothing */
1157 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001158
1159 /* save the algorithm value here, not the smoothed one */
1160 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001161
1162 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001163 }
1164
1165 return;
1166}
1167
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001168static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1169{
1170 struct ixgbe_hw *hw = &adapter->hw;
1171
1172 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1173 (eicr & IXGBE_EICR_GPI_SDP1)) {
1174 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1175 /* write to clear the interrupt */
1176 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1177 }
1178}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001179
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001180static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1181{
1182 struct ixgbe_hw *hw = &adapter->hw;
1183
1184 if (eicr & IXGBE_EICR_GPI_SDP1) {
1185 /* Clear the interrupt */
1186 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1187 schedule_work(&adapter->multispeed_fiber_task);
1188 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1189 /* Clear the interrupt */
1190 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1191 schedule_work(&adapter->sfp_config_module_task);
1192 } else {
1193 /* Interrupt isn't for us... */
1194 return;
1195 }
1196}
1197
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001198static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1199{
1200 struct ixgbe_hw *hw = &adapter->hw;
1201
1202 adapter->lsc_int++;
1203 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1204 adapter->link_check_timeout = jiffies;
1205 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1206 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1207 schedule_work(&adapter->watchdog_task);
1208 }
1209}
1210
Auke Kok9a799d72007-09-15 14:07:45 -07001211static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1212{
1213 struct net_device *netdev = data;
1214 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1215 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore54037502009-02-21 15:42:56 -08001216 u32 eicr;
1217
1218 /*
1219 * Workaround for Silicon errata. Use clear-by-write instead
1220 * of clear-by-read. Reading with EICS will return the
1221 * interrupt causes without clearing, which later be done
1222 * with the write to EICR.
1223 */
1224 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1225 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07001226
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001227 if (eicr & IXGBE_EICR_LSC)
1228 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001229
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001230 if (hw->mac.type == ixgbe_mac_82598EB)
1231 ixgbe_check_fan_failure(adapter, eicr);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001232
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001233 if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001234 ixgbe_check_sfp_event(adapter, eicr);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001235
1236 /* Handle Flow Director Full threshold interrupt */
1237 if (eicr & IXGBE_EICR_FLOW_DIR) {
1238 int i;
1239 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1240 /* Disable transmits before FDIR Re-initialization */
1241 netif_tx_stop_all_queues(netdev);
1242 for (i = 0; i < adapter->num_tx_queues; i++) {
1243 struct ixgbe_ring *tx_ring =
1244 &adapter->tx_ring[i];
1245 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1246 &tx_ring->reinit_state))
1247 schedule_work(&adapter->fdir_reinit_task);
1248 }
1249 }
1250 }
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001251 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1252 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -07001253
1254 return IRQ_HANDLED;
1255}
1256
Alexander Duyckfe49f042009-06-04 16:00:09 +00001257static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1258 u64 qmask)
1259{
1260 u32 mask;
1261
1262 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1263 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1264 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1265 } else {
1266 mask = (qmask & 0xFFFFFFFF);
1267 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1268 mask = (qmask >> 32);
1269 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1270 }
1271 /* skip the flush */
1272}
1273
1274static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1275 u64 qmask)
1276{
1277 u32 mask;
1278
1279 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1280 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1282 } else {
1283 mask = (qmask & 0xFFFFFFFF);
1284 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1285 mask = (qmask >> 32);
1286 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1287 }
1288 /* skip the flush */
1289}
1290
Auke Kok9a799d72007-09-15 14:07:45 -07001291static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1292{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001293 struct ixgbe_q_vector *q_vector = data;
1294 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001295 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001296 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001297
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001298 if (!q_vector->txr_count)
1299 return IRQ_HANDLED;
1300
1301 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1302 for (i = 0; i < q_vector->txr_count; i++) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001303 tx_ring = &(adapter->tx_ring[r_idx]);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001304 tx_ring->total_bytes = 0;
1305 tx_ring->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001306 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001307 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001308 }
1309
Alexander Duyck91281fd2009-06-04 16:00:27 +00001310 /* disable interrupts on this vector only */
1311 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1312 napi_schedule(&q_vector->napi);
1313
Auke Kok9a799d72007-09-15 14:07:45 -07001314 return IRQ_HANDLED;
1315}
1316
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001317/**
1318 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1319 * @irq: unused
1320 * @data: pointer to our q_vector struct for this interrupt vector
1321 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001322static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1323{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001324 struct ixgbe_q_vector *q_vector = data;
1325 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001326 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001327 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001328 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07001329
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001330 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001331 for (i = 0; i < q_vector->rxr_count; i++) {
1332 rx_ring = &(adapter->rx_ring[r_idx]);
1333 rx_ring->total_bytes = 0;
1334 rx_ring->total_packets = 0;
1335 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1336 r_idx + 1);
1337 }
1338
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001339 if (!q_vector->rxr_count)
1340 return IRQ_HANDLED;
1341
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001342 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001343 rx_ring = &(adapter->rx_ring[r_idx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001344 /* disable interrupts on this vector only */
Alexander Duyckfe49f042009-06-04 16:00:09 +00001345 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
Ben Hutchings288379f2009-01-19 16:43:59 -08001346 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001347
Auke Kok9a799d72007-09-15 14:07:45 -07001348 return IRQ_HANDLED;
1349}
1350
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001351static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1352{
Alexander Duyck91281fd2009-06-04 16:00:27 +00001353 struct ixgbe_q_vector *q_vector = data;
1354 struct ixgbe_adapter *adapter = q_vector->adapter;
1355 struct ixgbe_ring *ring;
1356 int r_idx;
1357 int i;
1358
1359 if (!q_vector->txr_count && !q_vector->rxr_count)
1360 return IRQ_HANDLED;
1361
1362 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1363 for (i = 0; i < q_vector->txr_count; i++) {
1364 ring = &(adapter->tx_ring[r_idx]);
1365 ring->total_bytes = 0;
1366 ring->total_packets = 0;
1367 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1368 r_idx + 1);
1369 }
1370
1371 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1372 for (i = 0; i < q_vector->rxr_count; i++) {
1373 ring = &(adapter->rx_ring[r_idx]);
1374 ring->total_bytes = 0;
1375 ring->total_packets = 0;
1376 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1377 r_idx + 1);
1378 }
1379
1380 /* disable interrupts on this vector only */
1381 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1382 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001383
1384 return IRQ_HANDLED;
1385}
1386
1387/**
1388 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1389 * @napi: napi struct with our devices info in it
1390 * @budget: amount of work driver is allowed to do this pass, in packets
1391 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001392 * This function is optimized for cleaning one queue only on a single
1393 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001394 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001395static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1396{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001397 struct ixgbe_q_vector *q_vector =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001398 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001399 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001400 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001401 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001402 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001403
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001404 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001405 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001406#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001407 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001408 ixgbe_update_rx_dca(adapter, rx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001409#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001410
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001411 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001412
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001413 /* If all Rx work done, exit the polling mode */
1414 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001415 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001416 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001417 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001418 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001419 ixgbe_irq_enable_queues(adapter,
1420 ((u64)1 << q_vector->v_idx));
Auke Kok9a799d72007-09-15 14:07:45 -07001421 }
1422
1423 return work_done;
1424}
1425
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001426/**
Alexander Duyck91281fd2009-06-04 16:00:27 +00001427 * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001428 * @napi: napi struct with our devices info in it
1429 * @budget: amount of work driver is allowed to do this pass, in packets
1430 *
1431 * This function will clean more than one rx queue associated with a
1432 * q_vector.
1433 **/
Alexander Duyck91281fd2009-06-04 16:00:27 +00001434static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001435{
1436 struct ixgbe_q_vector *q_vector =
1437 container_of(napi, struct ixgbe_q_vector, napi);
1438 struct ixgbe_adapter *adapter = q_vector->adapter;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001439 struct ixgbe_ring *ring = NULL;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001440 int work_done = 0, i;
1441 long r_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00001442 bool tx_clean_complete = true;
1443
1444 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1445 for (i = 0; i < q_vector->txr_count; i++) {
1446 ring = &(adapter->tx_ring[r_idx]);
1447#ifdef CONFIG_IXGBE_DCA
1448 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1449 ixgbe_update_tx_dca(adapter, ring);
1450#endif
1451 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1452 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1453 r_idx + 1);
1454 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001455
1456 /* attempt to distribute budget to each queue fairly, but don't allow
1457 * the budget to go below 1 because we'll exit polling */
1458 budget /= (q_vector->rxr_count ?: 1);
1459 budget = max(budget, 1);
1460 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1461 for (i = 0; i < q_vector->rxr_count; i++) {
Alexander Duyck91281fd2009-06-04 16:00:27 +00001462 ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001463#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001464 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Alexander Duyck91281fd2009-06-04 16:00:27 +00001465 ixgbe_update_rx_dca(adapter, ring);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001466#endif
Alexander Duyck91281fd2009-06-04 16:00:27 +00001467 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001468 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1469 r_idx + 1);
1470 }
1471
1472 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Alexander Duyck91281fd2009-06-04 16:00:27 +00001473 ring = &(adapter->rx_ring[r_idx]);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001474 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07001475 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001476 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001477 if (adapter->rx_itr_setting & 1)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001478 ixgbe_set_itr_msix(q_vector);
1479 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Alexander Duyckfe49f042009-06-04 16:00:09 +00001480 ixgbe_irq_enable_queues(adapter,
1481 ((u64)1 << q_vector->v_idx));
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001482 return 0;
1483 }
1484
1485 return work_done;
1486}
Alexander Duyck91281fd2009-06-04 16:00:27 +00001487
1488/**
1489 * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1490 * @napi: napi struct with our devices info in it
1491 * @budget: amount of work driver is allowed to do this pass, in packets
1492 *
1493 * This function is optimized for cleaning one queue only on a single
1494 * q_vector!!!
1495 **/
1496static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1497{
1498 struct ixgbe_q_vector *q_vector =
1499 container_of(napi, struct ixgbe_q_vector, napi);
1500 struct ixgbe_adapter *adapter = q_vector->adapter;
1501 struct ixgbe_ring *tx_ring = NULL;
1502 int work_done = 0;
1503 long r_idx;
1504
1505 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1506 tx_ring = &(adapter->tx_ring[r_idx]);
1507#ifdef CONFIG_IXGBE_DCA
1508 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1509 ixgbe_update_tx_dca(adapter, tx_ring);
1510#endif
1511
1512 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1513 work_done = budget;
1514
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001515 /* If all Tx work done, exit the polling mode */
Alexander Duyck91281fd2009-06-04 16:00:27 +00001516 if (work_done < budget) {
1517 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001518 if (adapter->tx_itr_setting & 1)
Alexander Duyck91281fd2009-06-04 16:00:27 +00001519 ixgbe_set_itr_msix(q_vector);
1520 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1521 ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1522 }
1523
1524 return work_done;
1525}
1526
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001527static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001528 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07001529{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001530 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1531
1532 set_bit(r_idx, q_vector->rxr_idx);
1533 q_vector->rxr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001534}
Auke Kok9a799d72007-09-15 14:07:45 -07001535
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001536static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001537 int t_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001538{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001539 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1540
1541 set_bit(t_idx, q_vector->txr_idx);
1542 q_vector->txr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001543}
Auke Kok9a799d72007-09-15 14:07:45 -07001544
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001545/**
1546 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1547 * @adapter: board private structure to initialize
1548 * @vectors: allotted vector count for descriptor rings
1549 *
1550 * This function maps descriptor rings to the queue-specific vectors
1551 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1552 * one vector per ring/queue, but on a constrained vector budget, we
1553 * group the rings as "efficiently" as possible. You would add new
1554 * mapping configurations in here.
1555 **/
1556static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001557 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001558{
1559 int v_start = 0;
1560 int rxr_idx = 0, txr_idx = 0;
1561 int rxr_remaining = adapter->num_rx_queues;
1562 int txr_remaining = adapter->num_tx_queues;
1563 int i, j;
1564 int rqpv, tqpv;
1565 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001566
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001567 /* No mapping required if MSI-X is disabled. */
1568 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001569 goto out;
1570
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001571 /*
1572 * The ideal configuration...
1573 * We have enough vectors to map one per queue.
1574 */
1575 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1576 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1577 map_vector_to_rxq(adapter, v_start, rxr_idx);
1578
1579 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1580 map_vector_to_txq(adapter, v_start, txr_idx);
1581
1582 goto out;
1583 }
1584
1585 /*
1586 * If we don't have enough vectors for a 1-to-1
1587 * mapping, we'll have to group them so there are
1588 * multiple queues per vector.
1589 */
1590 /* Re-adjusting *qpv takes care of the remainder. */
1591 for (i = v_start; i < vectors; i++) {
1592 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1593 for (j = 0; j < rqpv; j++) {
1594 map_vector_to_rxq(adapter, i, rxr_idx);
1595 rxr_idx++;
1596 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001597 }
Auke Kok9a799d72007-09-15 14:07:45 -07001598 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001599 for (i = v_start; i < vectors; i++) {
1600 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1601 for (j = 0; j < tqpv; j++) {
1602 map_vector_to_txq(adapter, i, txr_idx);
1603 txr_idx++;
1604 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001605 }
Auke Kok9a799d72007-09-15 14:07:45 -07001606 }
1607
Auke Kok9a799d72007-09-15 14:07:45 -07001608out:
Auke Kok9a799d72007-09-15 14:07:45 -07001609 return err;
1610}
1611
1612/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001613 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1614 * @adapter: board private structure
1615 *
1616 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1617 * interrupts from the kernel.
1618 **/
1619static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1620{
1621 struct net_device *netdev = adapter->netdev;
1622 irqreturn_t (*handler)(int, void *);
1623 int i, vector, q_vectors, err;
Robert Olssoncb13fc22008-11-25 16:43:52 -08001624 int ri=0, ti=0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001625
1626 /* Decrement for Other and TCP Timer vectors */
1627 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1628
1629 /* Map the Tx/Rx rings to the vectors we were allotted. */
1630 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1631 if (err)
1632 goto out;
1633
1634#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001635 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1636 &ixgbe_msix_clean_many)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001637 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001638 handler = SET_HANDLER(adapter->q_vector[vector]);
Robert Olssoncb13fc22008-11-25 16:43:52 -08001639
1640 if(handler == &ixgbe_msix_clean_rx) {
1641 sprintf(adapter->name[vector], "%s-%s-%d",
1642 netdev->name, "rx", ri++);
1643 }
1644 else if(handler == &ixgbe_msix_clean_tx) {
1645 sprintf(adapter->name[vector], "%s-%s-%d",
1646 netdev->name, "tx", ti++);
1647 }
1648 else
1649 sprintf(adapter->name[vector], "%s-%s-%d",
1650 netdev->name, "TxRx", vector);
1651
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001652 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001653 handler, 0, adapter->name[vector],
Alexander Duyck7a921c92009-05-06 10:43:28 +00001654 adapter->q_vector[vector]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001655 if (err) {
1656 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001657 "request_irq failed for MSIX interrupt "
1658 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001659 goto free_queue_irqs;
1660 }
1661 }
1662
1663 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1664 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001665 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001666 if (err) {
1667 DPRINTK(PROBE, ERR,
1668 "request_irq for msix_lsc failed: %d\n", err);
1669 goto free_queue_irqs;
1670 }
1671
1672 return 0;
1673
1674free_queue_irqs:
1675 for (i = vector - 1; i >= 0; i--)
1676 free_irq(adapter->msix_entries[--vector].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001677 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001678 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1679 pci_disable_msix(adapter->pdev);
1680 kfree(adapter->msix_entries);
1681 adapter->msix_entries = NULL;
1682out:
1683 return err;
1684}
1685
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001686static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1687{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001688 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001689 u8 current_itr;
1690 u32 new_itr = q_vector->eitr;
1691 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1692 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1693
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001694 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001695 q_vector->tx_itr,
1696 tx_ring->total_packets,
1697 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001698 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001699 q_vector->rx_itr,
1700 rx_ring->total_packets,
1701 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001702
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001703 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001704
1705 switch (current_itr) {
1706 /* counts and packets in update_itr are dependent on these numbers */
1707 case lowest_latency:
1708 new_itr = 100000;
1709 break;
1710 case low_latency:
1711 new_itr = 20000; /* aka hwitr = ~200 */
1712 break;
1713 case bulk_latency:
1714 new_itr = 8000;
1715 break;
1716 default:
1717 break;
1718 }
1719
1720 if (new_itr != q_vector->eitr) {
Alexander Duyckfe49f042009-06-04 16:00:09 +00001721 /* do an exponential smoothing */
1722 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001723
1724 /* save the algorithm value here, not the smoothed one */
1725 q_vector->eitr = new_itr;
Alexander Duyckfe49f042009-06-04 16:00:09 +00001726
1727 ixgbe_write_eitr(q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001728 }
1729
1730 return;
1731}
1732
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001733/**
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001734 * ixgbe_irq_enable - Enable default interrupt generation settings
1735 * @adapter: board private structure
1736 **/
1737static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1738{
1739 u32 mask;
Nelson, Shannon835462f2009-04-27 22:42:54 +00001740
1741 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
David S. Miller6ab33d52008-11-20 16:44:00 -08001742 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1743 mask |= IXGBE_EIMS_GPI_SDP1;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001744 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001745 mask |= IXGBE_EIMS_ECC;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001746 mask |= IXGBE_EIMS_GPI_SDP1;
1747 mask |= IXGBE_EIMS_GPI_SDP2;
1748 }
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00001749 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1750 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1751 mask |= IXGBE_EIMS_FLOW_DIR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001752
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001753 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Nelson, Shannon835462f2009-04-27 22:42:54 +00001754 ixgbe_irq_enable_queues(adapter, ~0);
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001755 IXGBE_WRITE_FLUSH(&adapter->hw);
1756}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001757
1758/**
1759 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001760 * @irq: interrupt number
1761 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07001762 **/
1763static irqreturn_t ixgbe_intr(int irq, void *data)
1764{
1765 struct net_device *netdev = data;
1766 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1767 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00001768 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07001769 u32 eicr;
1770
Don Skidmore54037502009-02-21 15:42:56 -08001771 /*
1772 * Workaround for silicon errata. Mask the interrupts
1773 * before the read of EICR.
1774 */
1775 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1776
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001777 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1778 * therefore no explict interrupt disable is necessary */
1779 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001780 if (!eicr) {
1781 /* shared interrupt alert!
1782 * make sure interrupts are enabled because the read will
1783 * have disabled interrupts due to EIAM */
1784 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001785 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001786 }
Auke Kok9a799d72007-09-15 14:07:45 -07001787
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001788 if (eicr & IXGBE_EICR_LSC)
1789 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001790
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001791 if (hw->mac.type == ixgbe_mac_82599EB)
1792 ixgbe_check_sfp_event(adapter, eicr);
1793
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001794 ixgbe_check_fan_failure(adapter, eicr);
1795
Alexander Duyck7a921c92009-05-06 10:43:28 +00001796 if (napi_schedule_prep(&(q_vector->napi))) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001797 adapter->tx_ring[0].total_packets = 0;
1798 adapter->tx_ring[0].total_bytes = 0;
1799 adapter->rx_ring[0].total_packets = 0;
1800 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001801 /* would disable interrupts here but EIAM disabled it */
Alexander Duyck7a921c92009-05-06 10:43:28 +00001802 __napi_schedule(&(q_vector->napi));
Auke Kok9a799d72007-09-15 14:07:45 -07001803 }
1804
1805 return IRQ_HANDLED;
1806}
1807
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001808static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1809{
1810 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1811
1812 for (i = 0; i < q_vectors; i++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001813 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001814 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1815 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1816 q_vector->rxr_count = 0;
1817 q_vector->txr_count = 0;
1818 }
1819}
1820
Auke Kok9a799d72007-09-15 14:07:45 -07001821/**
1822 * ixgbe_request_irq - initialize interrupts
1823 * @adapter: board private structure
1824 *
1825 * Attempts to configure interrupts using the best available
1826 * capabilities of the hardware and kernel.
1827 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001828static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001829{
1830 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001831 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001832
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001833 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1834 err = ixgbe_request_msix_irqs(adapter);
1835 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1836 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001837 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001838 } else {
1839 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001840 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001841 }
1842
Auke Kok9a799d72007-09-15 14:07:45 -07001843 if (err)
1844 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1845
Auke Kok9a799d72007-09-15 14:07:45 -07001846 return err;
1847}
1848
1849static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1850{
1851 struct net_device *netdev = adapter->netdev;
1852
1853 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001854 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001855
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001856 q_vectors = adapter->num_msix_vectors;
1857
1858 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001859 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001860
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001861 i--;
1862 for (; i >= 0; i--) {
1863 free_irq(adapter->msix_entries[i].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001864 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001865 }
1866
1867 ixgbe_reset_q_vectors(adapter);
1868 } else {
1869 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001870 }
1871}
1872
1873/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001874 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1875 * @adapter: board private structure
1876 **/
1877static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1878{
Nelson, Shannon835462f2009-04-27 22:42:54 +00001879 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1880 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1881 } else {
1882 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1883 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001884 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001885 }
1886 IXGBE_WRITE_FLUSH(&adapter->hw);
1887 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1888 int i;
1889 for (i = 0; i < adapter->num_msix_vectors; i++)
1890 synchronize_irq(adapter->msix_entries[i].vector);
1891 } else {
1892 synchronize_irq(adapter->pdev->irq);
1893 }
1894}
1895
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001896/**
Auke Kok9a799d72007-09-15 14:07:45 -07001897 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1898 *
1899 **/
1900static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1901{
Auke Kok9a799d72007-09-15 14:07:45 -07001902 struct ixgbe_hw *hw = &adapter->hw;
1903
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001904 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Nelson, Shannonf7554a22009-09-18 09:46:06 +00001905 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07001906
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001907 ixgbe_set_ivar(adapter, 0, 0, 0);
1908 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001909
1910 map_vector_to_rxq(adapter, 0, 0);
1911 map_vector_to_txq(adapter, 0, 0);
1912
1913 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001914}
1915
1916/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001917 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001918 * @adapter: board private structure
1919 *
1920 * Configure the Tx unit of the MAC after a reset.
1921 **/
1922static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1923{
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08001924 u64 tdba;
Auke Kok9a799d72007-09-15 14:07:45 -07001925 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001926 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001927
1928 /* Setup the HW Tx Head and Tail descriptor pointers */
1929 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001930 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1931 j = ring->reg_idx;
1932 tdba = ring->dma;
1933 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001934 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
Yang Hongyang284901a2009-04-06 19:01:15 -07001935 (tdba & DMA_BIT_MASK(32)));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001936 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1937 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1938 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1939 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1940 adapter->tx_ring[i].head = IXGBE_TDH(j);
1941 adapter->tx_ring[i].tail = IXGBE_TDT(j);
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001942 /*
1943 * Disable Tx Head Writeback RO bit, since this hoses
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001944 * bookkeeping if things aren't delivered in order.
1945 */
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001946 switch (hw->mac.type) {
1947 case ixgbe_mac_82598EB:
1948 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1949 break;
1950 case ixgbe_mac_82599EB:
1951 default:
1952 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
1953 break;
1954 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001955 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
Peter P Waskiewicz Jr84f62d42009-09-30 12:07:16 +00001956 switch (hw->mac.type) {
1957 case ixgbe_mac_82598EB:
1958 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1959 break;
1960 case ixgbe_mac_82599EB:
1961 default:
1962 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
1963 break;
1964 }
Auke Kok9a799d72007-09-15 14:07:45 -07001965 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001966 if (hw->mac.type == ixgbe_mac_82599EB) {
1967 /* We enable 8 traffic classes, DCB only */
1968 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1969 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1970 IXGBE_MTQC_8TC_8TQ));
1971 }
Auke Kok9a799d72007-09-15 14:07:45 -07001972}
1973
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001974#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07001975
Yi Zoua6616b42009-08-06 13:05:23 +00001976static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
1977 struct ixgbe_ring *rx_ring)
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001978{
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001979 u32 srrctl;
Yi Zoua6616b42009-08-06 13:05:23 +00001980 int index;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001981 struct ixgbe_ring_feature *feature = adapter->ring_feature;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001982
Yi Zoua6616b42009-08-06 13:05:23 +00001983 index = rx_ring->reg_idx;
1984 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1985 unsigned long mask;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00001986 mask = (unsigned long) feature[RING_F_RSS].mask;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001987 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001988 }
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001989 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1990
1991 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1992 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1993
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001994 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1995 IXGBE_SRRCTL_BSIZEHDR_MASK;
1996
Yi Zou6e455b892009-08-06 13:05:44 +00001997 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00001998#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
1999 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2000#else
2001 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2002#endif
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002003 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002004 } else {
Alexander Duyckafafd5b2009-05-07 10:38:56 +00002005 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2006 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002007 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002008 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002009
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002010 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
2011}
2012
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002013static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2014{
2015 u32 mrqc = 0;
2016 int mask;
2017
2018 if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
2019 return mrqc;
2020
2021 mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2022#ifdef CONFIG_IXGBE_DCB
2023 | IXGBE_FLAG_DCB_ENABLED
2024#endif
2025 );
2026
2027 switch (mask) {
2028 case (IXGBE_FLAG_RSS_ENABLED):
2029 mrqc = IXGBE_MRQC_RSSEN;
2030 break;
2031#ifdef CONFIG_IXGBE_DCB
2032 case (IXGBE_FLAG_DCB_ENABLED):
2033 mrqc = IXGBE_MRQC_RT8TCEN;
2034 break;
2035#endif /* CONFIG_IXGBE_DCB */
2036 default:
2037 break;
2038 }
2039
2040 return mrqc;
2041}
2042
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002043/**
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002044 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2045 * @adapter: address of board private structure
2046 * @index: index of ring to set
2047 * @rx_buf_len: rx buffer length
2048 **/
2049static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index,
2050 int rx_buf_len)
2051{
2052 struct ixgbe_ring *rx_ring;
2053 struct ixgbe_hw *hw = &adapter->hw;
2054 int j;
2055 u32 rscctrl;
2056
2057 rx_ring = &adapter->rx_ring[index];
2058 j = rx_ring->reg_idx;
2059 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2060 rscctrl |= IXGBE_RSCCTL_RSCEN;
2061 /*
2062 * we must limit the number of descriptors so that the
2063 * total size of max desc * buf_len is not greater
2064 * than 65535
2065 */
2066 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2067#if (MAX_SKB_FRAGS > 16)
2068 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2069#elif (MAX_SKB_FRAGS > 8)
2070 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2071#elif (MAX_SKB_FRAGS > 4)
2072 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2073#else
2074 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2075#endif
2076 } else {
2077 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2078 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2079 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2080 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2081 else
2082 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2083 }
2084 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2085}
2086
2087/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002088 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07002089 * @adapter: board private structure
2090 *
2091 * Configure the Rx unit of the MAC after a reset.
2092 **/
2093static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2094{
2095 u64 rdba;
2096 struct ixgbe_hw *hw = &adapter->hw;
Yi Zoua6616b42009-08-06 13:05:23 +00002097 struct ixgbe_ring *rx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07002098 struct net_device *netdev = adapter->netdev;
2099 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002100 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002101 u32 rdlen, rxctrl, rxcsum;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002102 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2103 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2104 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Auke Kok9a799d72007-09-15 14:07:45 -07002105 u32 fctrl, hlreg0;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002106 u32 reta = 0, mrqc = 0;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002107 u32 rdrxctl;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002108 int rx_buf_len;
Auke Kok9a799d72007-09-15 14:07:45 -07002109
2110 /* Decide whether to use packet split mode or not */
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002111 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07002112
2113 /* Set the RX buffer length according to the mode */
2114 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002115 rx_buf_len = IXGBE_RX_HDR_SIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002116 if (hw->mac.type == ixgbe_mac_82599EB) {
2117 /* PSRTYPE must be initialized in 82599 */
2118 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2119 IXGBE_PSRTYPE_UDPHDR |
2120 IXGBE_PSRTYPE_IPV4HDR |
Yi Zoudfa12f02009-05-07 10:39:35 +00002121 IXGBE_PSRTYPE_IPV6HDR |
2122 IXGBE_PSRTYPE_L2HDR;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002123 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2124 }
Auke Kok9a799d72007-09-15 14:07:45 -07002125 } else {
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002126 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
Alexander Duyckf8212f92009-04-27 22:42:37 +00002127 (netdev->mtu <= ETH_DATA_LEN))
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002128 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07002129 else
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002130 rx_buf_len = ALIGN(max_frame, 1024);
Auke Kok9a799d72007-09-15 14:07:45 -07002131 }
2132
2133 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2134 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002135 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002136 fctrl |= IXGBE_FCTRL_PMCF;
Auke Kok9a799d72007-09-15 14:07:45 -07002137 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2138
2139 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2140 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2141 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2142 else
2143 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
Yi Zou63f39bd2009-05-17 12:34:35 +00002144#ifdef IXGBE_FCOE
Yi Zouf34c5c82009-08-14 12:42:17 +00002145 if (netdev->features & NETIF_F_FCOE_MTU)
Yi Zou63f39bd2009-05-17 12:34:35 +00002146 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2147#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002148 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2149
Auke Kok9a799d72007-09-15 14:07:45 -07002150 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
2151 /* disable receives while setting up the descriptors */
2152 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2153 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2154
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002155 /*
2156 * Setup the HW Rx Head and Tail Descriptor Pointers and
2157 * the Base and Length of the Rx Descriptor Ring
2158 */
Auke Kok9a799d72007-09-15 14:07:45 -07002159 for (i = 0; i < adapter->num_rx_queues; i++) {
Yi Zoua6616b42009-08-06 13:05:23 +00002160 rx_ring = &adapter->rx_ring[i];
2161 rdba = rx_ring->dma;
2162 j = rx_ring->reg_idx;
Yang Hongyang284901a2009-04-06 19:01:15 -07002163 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002164 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2165 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2166 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2167 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
Yi Zoua6616b42009-08-06 13:05:23 +00002168 rx_ring->head = IXGBE_RDH(j);
2169 rx_ring->tail = IXGBE_RDT(j);
2170 rx_ring->rx_buf_len = rx_buf_len;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002171
Yi Zou6e455b892009-08-06 13:05:44 +00002172 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2173 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
Peter P Waskiewicz Jr1b3ff022009-09-14 07:47:27 +00002174 else
2175 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07002176
Yi Zou63f39bd2009-05-17 12:34:35 +00002177#ifdef IXGBE_FCOE
Yi Zouf34c5c82009-08-14 12:42:17 +00002178 if (netdev->features & NETIF_F_FCOE_MTU) {
Yi Zou63f39bd2009-05-17 12:34:35 +00002179 struct ixgbe_ring_feature *f;
2180 f = &adapter->ring_feature[RING_F_FCOE];
Yi Zou6e455b892009-08-06 13:05:44 +00002181 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2182 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2183 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2184 rx_ring->rx_buf_len =
2185 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2186 }
Yi Zou63f39bd2009-05-17 12:34:35 +00002187 }
2188
2189#endif /* IXGBE_FCOE */
Yi Zoua6616b42009-08-06 13:05:23 +00002190 ixgbe_configure_srrctl(adapter, rx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07002191 }
2192
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002193 if (hw->mac.type == ixgbe_mac_82598EB) {
2194 /*
2195 * For VMDq support of different descriptor types or
2196 * buffer sizes through the use of multiple SRRCTL
2197 * registers, RDRXCTL.MVMEN must be set to 1
2198 *
2199 * also, the manual doesn't mention it clearly but DCA hints
2200 * will only use queue 0's tags unless this bit is set. Side
2201 * effects of setting this bit are only that SRRCTL must be
2202 * fully programmed [0..15]
2203 */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002204 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2205 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2206 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
Alexander Duyck2f90b862008-11-20 20:52:10 -08002207 }
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002208
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002209 /* Program MRQC for the distribution of queues */
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00002210 mrqc = ixgbe_setup_mrqc(adapter);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002211
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002212 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07002213 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002214 for (i = 0, j = 0; i < 128; i++, j++) {
2215 if (j == adapter->ring_feature[RING_F_RSS].indices)
2216 j = 0;
2217 /* reta = 4-byte sliding window of
2218 * 0x00..(indices-1)(indices-1)00..etc. */
2219 reta = (reta << 8) | (j * 0x11);
2220 if ((i & 3) == 3)
2221 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07002222 }
2223
2224 /* Fill out hash function seeds */
2225 for (i = 0; i < 10; i++)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002226 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07002227
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002228 if (hw->mac.type == ixgbe_mac_82598EB)
2229 mrqc |= IXGBE_MRQC_RSSEN;
Auke Kok9a799d72007-09-15 14:07:45 -07002230 /* Perform hash on these packet types */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002231 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2232 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2233 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2234 | IXGBE_MRQC_RSS_FIELD_IPV6
2235 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2236 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
Auke Kok9a799d72007-09-15 14:07:45 -07002237 }
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00002238 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002239
2240 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2241
2242 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
2243 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2244 /* Disable indicating checksum in descriptor, enables
2245 * RSS hash */
2246 rxcsum |= IXGBE_RXCSUM_PCSD;
2247 }
2248 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2249 /* Enable IPv4 payload checksum for UDP fragments
2250 * if PCSD is not set */
2251 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2252 }
2253
2254 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002255
2256 if (hw->mac.type == ixgbe_mac_82599EB) {
2257 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2258 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002259 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002260 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2261 }
Alexander Duyckf8212f92009-04-27 22:42:37 +00002262
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00002263 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002264 /* Enable 82599 HW-RSC */
Nelson, Shannonbb5a9ad2009-09-18 09:46:27 +00002265 for (i = 0; i < adapter->num_rx_queues; i++)
2266 ixgbe_configure_rscctl(adapter, i, rx_buf_len);
2267
Alexander Duyckf8212f92009-04-27 22:42:37 +00002268 /* Disable RSC for ACK packets */
2269 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2270 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2271 }
Auke Kok9a799d72007-09-15 14:07:45 -07002272}
2273
Auke Kok9a799d72007-09-15 14:07:45 -07002274static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2275{
2276 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002277 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002278
2279 /* add VID to filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002280 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
Auke Kok9a799d72007-09-15 14:07:45 -07002281}
2282
2283static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2284{
2285 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002286 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002287
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002288 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2289 ixgbe_irq_disable(adapter);
2290
Auke Kok9a799d72007-09-15 14:07:45 -07002291 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002292
2293 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2294 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002295
2296 /* remove VID from filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002297 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
Auke Kok9a799d72007-09-15 14:07:45 -07002298}
2299
Don Skidmore068c89b2009-01-19 16:54:36 -08002300static void ixgbe_vlan_rx_register(struct net_device *netdev,
2301 struct vlan_group *grp)
2302{
2303 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2304 u32 ctrl;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002305 int i, j;
Don Skidmore068c89b2009-01-19 16:54:36 -08002306
2307 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2308 ixgbe_irq_disable(adapter);
2309 adapter->vlgrp = grp;
2310
2311 /*
2312 * For a DCB driver, always enable VLAN tag stripping so we can
2313 * still receive traffic from a DCB-enabled host even if we're
2314 * not in DCB mode.
2315 */
2316 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002317 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2318 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
Don Skidmore068c89b2009-01-19 16:54:36 -08002319 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2320 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002321 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2322 ctrl |= IXGBE_VLNCTRL_VFE;
2323 /* enable VLAN tag insert/strip */
2324 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2325 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2326 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2327 for (i = 0; i < adapter->num_rx_queues; i++) {
2328 j = adapter->rx_ring[i].reg_idx;
2329 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2330 ctrl |= IXGBE_RXDCTL_VME;
2331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2332 }
Don Skidmore068c89b2009-01-19 16:54:36 -08002333 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002334 ixgbe_vlan_rx_add_vid(netdev, 0);
Don Skidmore068c89b2009-01-19 16:54:36 -08002335
2336 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2337 ixgbe_irq_enable(adapter);
2338}
2339
Auke Kok9a799d72007-09-15 14:07:45 -07002340static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2341{
2342 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2343
2344 if (adapter->vlgrp) {
2345 u16 vid;
2346 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2347 if (!vlan_group_get_device(adapter->vlgrp, vid))
2348 continue;
2349 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2350 }
2351 }
2352}
2353
Christopher Leech2c5645c2008-08-26 04:27:02 -07002354static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2355{
2356 struct dev_mc_list *mc_ptr;
2357 u8 *addr = *mc_addr_ptr;
2358 *vmdq = 0;
2359
2360 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2361 if (mc_ptr->next)
2362 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2363 else
2364 *mc_addr_ptr = NULL;
2365
2366 return addr;
2367}
2368
Auke Kok9a799d72007-09-15 14:07:45 -07002369/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07002370 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07002371 * @netdev: network interface device structure
2372 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07002373 * The set_rx_method entry point is called whenever the unicast/multicast
2374 * address list or the network interface flags are updated. This routine is
2375 * responsible for configuring the hardware for proper unicast, multicast and
2376 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07002377 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07002378static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07002379{
2380 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2381 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07002382 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002383 u8 *addr_list = NULL;
2384 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002385
2386 /* Check for Promiscuous and All Multicast modes */
2387
2388 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07002389 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07002390
2391 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07002392 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07002393 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07002394 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07002395 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002396 if (netdev->flags & IFF_ALLMULTI) {
2397 fctrl |= IXGBE_FCTRL_MPE;
2398 fctrl &= ~IXGBE_FCTRL_UPE;
2399 } else {
2400 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2401 }
Alexander Duyck3d016252008-08-26 18:30:04 -07002402 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002403 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002404 }
2405
2406 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07002407 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07002408
Christopher Leech2c5645c2008-08-26 04:27:02 -07002409 /* reprogram secondary unicast list */
Jiri Pirko31278e72009-06-17 01:12:19 +00002410 hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
Auke Kok9a799d72007-09-15 14:07:45 -07002411
Christopher Leech2c5645c2008-08-26 04:27:02 -07002412 /* reprogram multicast list */
2413 addr_count = netdev->mc_count;
2414 if (addr_count)
2415 addr_list = netdev->mc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002416 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2417 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07002418}
2419
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002420static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2421{
2422 int q_idx;
2423 struct ixgbe_q_vector *q_vector;
2424 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2425
2426 /* legacy and MSI only use one vector */
2427 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2428 q_vectors = 1;
2429
2430 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002431 struct napi_struct *napi;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002432 q_vector = adapter->q_vector[q_idx];
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002433 napi = &q_vector->napi;
Alexander Duyck91281fd2009-06-04 16:00:27 +00002434 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2435 if (!q_vector->rxr_count || !q_vector->txr_count) {
2436 if (q_vector->txr_count == 1)
2437 napi->poll = &ixgbe_clean_txonly;
2438 else if (q_vector->rxr_count == 1)
2439 napi->poll = &ixgbe_clean_rxonly;
2440 }
2441 }
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002442
2443 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002444 }
2445}
2446
2447static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2448{
2449 int q_idx;
2450 struct ixgbe_q_vector *q_vector;
2451 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2452
2453 /* legacy and MSI only use one vector */
2454 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2455 q_vectors = 1;
2456
2457 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002458 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002459 napi_disable(&q_vector->napi);
2460 }
2461}
2462
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002463#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002464/*
2465 * ixgbe_configure_dcb - Configure DCB hardware
2466 * @adapter: ixgbe adapter struct
2467 *
2468 * This is called by the driver on open to configure the DCB hardware.
2469 * This is also called by the gennetlink interface when reconfiguring
2470 * the DCB state.
2471 */
2472static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2473{
2474 struct ixgbe_hw *hw = &adapter->hw;
2475 u32 txdctl, vlnctrl;
2476 int i, j;
2477
2478 ixgbe_dcb_check_config(&adapter->dcb_cfg);
2479 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2480 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2481
2482 /* reconfigure the hardware */
2483 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2484
2485 for (i = 0; i < adapter->num_tx_queues; i++) {
2486 j = adapter->tx_ring[i].reg_idx;
2487 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2488 /* PThresh workaround for Tx hang with DFP enabled. */
2489 txdctl |= 32;
2490 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2491 }
2492 /* Enable VLAN tag insert/strip */
2493 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002494 if (hw->mac.type == ixgbe_mac_82598EB) {
2495 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2496 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2497 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2498 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2499 vlnctrl |= IXGBE_VLNCTRL_VFE;
2500 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2501 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2502 for (i = 0; i < adapter->num_rx_queues; i++) {
2503 j = adapter->rx_ring[i].reg_idx;
2504 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2505 vlnctrl |= IXGBE_RXDCTL_VME;
2506 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2507 }
2508 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08002509 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2510}
2511
2512#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002513static void ixgbe_configure(struct ixgbe_adapter *adapter)
2514{
2515 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002516 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002517 int i;
2518
Christopher Leech2c5645c2008-08-26 04:27:02 -07002519 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002520
2521 ixgbe_restore_vlan(adapter);
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002522#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002523 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zoub352e402009-11-06 12:55:38 +00002524 if (hw->mac.type == ixgbe_mac_82598EB)
2525 netif_set_gso_max_size(netdev, 32768);
2526 else
2527 netif_set_gso_max_size(netdev, 65536);
Alexander Duyck2f90b862008-11-20 20:52:10 -08002528 ixgbe_configure_dcb(adapter);
2529 } else {
2530 netif_set_gso_max_size(netdev, 65536);
2531 }
2532#else
2533 netif_set_gso_max_size(netdev, 65536);
2534#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002535
Yi Zoueacd73f2009-05-13 13:11:06 +00002536#ifdef IXGBE_FCOE
2537 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2538 ixgbe_configure_fcoe(adapter);
2539
2540#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002541 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2542 for (i = 0; i < adapter->num_tx_queues; i++)
2543 adapter->tx_ring[i].atr_sample_rate =
2544 adapter->atr_sample_rate;
2545 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2546 } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2547 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2548 }
2549
Auke Kok9a799d72007-09-15 14:07:45 -07002550 ixgbe_configure_tx(adapter);
2551 ixgbe_configure_rx(adapter);
2552 for (i = 0; i < adapter->num_rx_queues; i++)
2553 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002554 (adapter->rx_ring[i].count - 1));
Auke Kok9a799d72007-09-15 14:07:45 -07002555}
2556
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002557static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2558{
2559 switch (hw->phy.type) {
2560 case ixgbe_phy_sfp_avago:
2561 case ixgbe_phy_sfp_ftl:
2562 case ixgbe_phy_sfp_intel:
2563 case ixgbe_phy_sfp_unknown:
2564 case ixgbe_phy_tw_tyco:
2565 case ixgbe_phy_tw_unknown:
2566 return true;
2567 default:
2568 return false;
2569 }
2570}
2571
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002572/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002573 * ixgbe_sfp_link_config - set up SFP+ link
2574 * @adapter: pointer to private adapter struct
2575 **/
2576static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2577{
2578 struct ixgbe_hw *hw = &adapter->hw;
2579
2580 if (hw->phy.multispeed_fiber) {
2581 /*
2582 * In multispeed fiber setups, the device may not have
2583 * had a physical connection when the driver loaded.
2584 * If that's the case, the initial link configuration
2585 * couldn't get the MAC into 10G or 1G mode, so we'll
2586 * never have a link status change interrupt fire.
2587 * We need to try and force an autonegotiation
2588 * session, then bring up link.
2589 */
2590 hw->mac.ops.setup_sfp(hw);
2591 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2592 schedule_work(&adapter->multispeed_fiber_task);
2593 } else {
2594 /*
2595 * Direct Attach Cu and non-multispeed fiber modules
2596 * still need to be configured properly prior to
2597 * attempting link.
2598 */
2599 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2600 schedule_work(&adapter->sfp_config_module_task);
2601 }
2602}
2603
2604/**
2605 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002606 * @hw: pointer to private hardware struct
2607 *
2608 * Returns 0 on success, negative on failure
2609 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002610static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002611{
2612 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002613 bool negotiation, link_up = false;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002614 u32 ret = IXGBE_ERR_LINK_SETUP;
2615
2616 if (hw->mac.ops.check_link)
2617 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2618
2619 if (ret)
2620 goto link_cfg_out;
2621
2622 if (hw->mac.ops.get_link_capabilities)
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002623 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002624 if (ret)
2625 goto link_cfg_out;
2626
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00002627 if (hw->mac.ops.setup_link)
2628 ret = hw->mac.ops.setup_link(hw, autoneg, negotiation, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002629link_cfg_out:
2630 return ret;
2631}
2632
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002633#define IXGBE_MAX_RX_DESC_POLL 10
2634static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2635 int rxr)
2636{
2637 int j = adapter->rx_ring[rxr].reg_idx;
2638 int k;
2639
2640 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2641 if (IXGBE_READ_REG(&adapter->hw,
2642 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2643 break;
2644 else
2645 msleep(1);
2646 }
2647 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2648 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2649 "not set within the polling period\n", rxr);
2650 }
2651 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2652 (adapter->rx_ring[rxr].count - 1));
2653}
2654
Auke Kok9a799d72007-09-15 14:07:45 -07002655static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2656{
2657 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07002658 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002659 int i, j = 0;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002660 int num_rx_rings = adapter->num_rx_queues;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002661 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002662 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002663 u32 txdctl, rxdctl, mhadd;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002664 u32 dmatxctl;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002665 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07002666
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002667 ixgbe_get_hw_control(adapter);
2668
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002669 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2670 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002671 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2672 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002673 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
Auke Kok9a799d72007-09-15 14:07:45 -07002674 } else {
2675 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002676 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002677 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002678 /* XXX: to interrupt immediately for EICS writes, enable this */
2679 /* gpie |= IXGBE_GPIE_EIMEN; */
2680 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2681 }
2682
2683 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2684 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2685 * specifically only auto mask tx and rx interrupts */
2686 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07002687 }
2688
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002689 /* Enable fan failure interrupt if media type is copper */
2690 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2691 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2692 gpie |= IXGBE_SDP1_GPIEN;
2693 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2694 }
2695
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002696 if (hw->mac.type == ixgbe_mac_82599EB) {
2697 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2698 gpie |= IXGBE_SDP1_GPIEN;
2699 gpie |= IXGBE_SDP2_GPIEN;
2700 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2701 }
2702
Yi Zou63f39bd2009-05-17 12:34:35 +00002703#ifdef IXGBE_FCOE
2704 /* adjust max frame to be able to do baby jumbo for FCoE */
Yi Zouf34c5c82009-08-14 12:42:17 +00002705 if ((netdev->features & NETIF_F_FCOE_MTU) &&
Yi Zou63f39bd2009-05-17 12:34:35 +00002706 (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2707 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2708
2709#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07002710 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07002711 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2712 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2713 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2714
2715 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2716 }
2717
2718 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002719 j = adapter->tx_ring[i].reg_idx;
2720 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002721 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2722 txdctl |= (8 << 16);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002723 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2724 }
2725
2726 if (hw->mac.type == ixgbe_mac_82599EB) {
2727 /* DMATXCTL.EN must be set after all Tx queue config is done */
2728 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2729 dmatxctl |= IXGBE_DMATXCTL_TE;
2730 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2731 }
2732 for (i = 0; i < adapter->num_tx_queues; i++) {
2733 j = adapter->tx_ring[i].reg_idx;
2734 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Auke Kok9a799d72007-09-15 14:07:45 -07002735 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002736 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002737 }
2738
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002739 for (i = 0; i < num_rx_rings; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002740 j = adapter->rx_ring[i].reg_idx;
2741 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2742 /* enable PTHRESH=32 descriptors (half the internal cache)
2743 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2744 * this also removes a pesky rx_no_buffer_count increment */
2745 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07002746 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002747 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002748 if (hw->mac.type == ixgbe_mac_82599EB)
2749 ixgbe_rx_desc_queue_enable(adapter, i);
Auke Kok9a799d72007-09-15 14:07:45 -07002750 }
2751 /* enable all receives */
2752 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002753 if (hw->mac.type == ixgbe_mac_82598EB)
2754 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2755 else
2756 rxdctl |= IXGBE_RXCTRL_RXEN;
2757 hw->mac.ops.enable_rx_dma(hw, rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002758
2759 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2760 ixgbe_configure_msix(adapter);
2761 else
2762 ixgbe_configure_msi_and_legacy(adapter);
2763
2764 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002765 ixgbe_napi_enable_all(adapter);
2766
2767 /* clear any pending interrupts, may auto mask */
2768 IXGBE_READ_REG(hw, IXGBE_EICR);
2769
Auke Kok9a799d72007-09-15 14:07:45 -07002770 ixgbe_irq_enable(adapter);
2771
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002772 /*
Don Skidmorebf069c92009-05-07 10:39:54 +00002773 * If this adapter has a fan, check to see if we had a failure
2774 * before we enabled the interrupt.
2775 */
2776 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2777 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2778 if (esdp & IXGBE_ESDP_SDP1)
2779 DPRINTK(DRV, CRIT,
2780 "Fan has stopped, replace the adapter\n");
2781 }
2782
2783 /*
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002784 * For hot-pluggable SFP+ devices, a new SFP+ module may have
Don Skidmore19343de2009-07-02 12:50:31 +00002785 * arrived before interrupts were enabled but after probe. Such
2786 * devices wouldn't have their type identified yet. We need to
2787 * kick off the SFP+ module setup first, then try to bring up link.
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002788 * If we're not hot-pluggable SFP+, we just need to configure link
2789 * and bring it up.
2790 */
Don Skidmore19343de2009-07-02 12:50:31 +00002791 if (hw->phy.type == ixgbe_phy_unknown) {
2792 err = hw->phy.ops.identify(hw);
2793 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore5da43c12009-07-02 12:50:52 +00002794 /*
2795 * Take the device down and schedule the sfp tasklet
2796 * which will unregister_netdev and log it.
2797 */
Don Skidmore19343de2009-07-02 12:50:31 +00002798 ixgbe_down(adapter);
Don Skidmore5da43c12009-07-02 12:50:52 +00002799 schedule_work(&adapter->sfp_config_module_task);
Don Skidmore19343de2009-07-02 12:50:31 +00002800 return err;
2801 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002802 }
2803
2804 if (ixgbe_is_sfp(hw)) {
2805 ixgbe_sfp_link_config(adapter);
2806 } else {
2807 err = ixgbe_non_sfp_link_config(hw);
2808 if (err)
2809 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2810 }
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002811
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00002812 for (i = 0; i < adapter->num_tx_queues; i++)
2813 set_bit(__IXGBE_FDIR_INIT_DONE,
2814 &(adapter->tx_ring[i].reinit_state));
2815
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08002816 /* enable transmits */
2817 netif_tx_start_all_queues(netdev);
2818
Auke Kok9a799d72007-09-15 14:07:45 -07002819 /* bring the link up in the watchdog, this could race with our first
2820 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002821 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2822 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07002823 mod_timer(&adapter->watchdog_timer, jiffies);
2824 return 0;
2825}
2826
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002827void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2828{
2829 WARN_ON(in_interrupt());
2830 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2831 msleep(1);
2832 ixgbe_down(adapter);
2833 ixgbe_up(adapter);
2834 clear_bit(__IXGBE_RESETTING, &adapter->state);
2835}
2836
Auke Kok9a799d72007-09-15 14:07:45 -07002837int ixgbe_up(struct ixgbe_adapter *adapter)
2838{
2839 /* hardware has been reset, we need to reload some things */
2840 ixgbe_configure(adapter);
2841
2842 return ixgbe_up_complete(adapter);
2843}
2844
2845void ixgbe_reset(struct ixgbe_adapter *adapter)
2846{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002847 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore8ca783a2009-05-26 20:40:47 -07002848 int err;
2849
2850 err = hw->mac.ops.init_hw(hw);
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00002851 switch (err) {
2852 case 0:
2853 case IXGBE_ERR_SFP_NOT_PRESENT:
2854 break;
2855 case IXGBE_ERR_MASTER_REQUESTS_PENDING:
2856 dev_err(&adapter->pdev->dev, "master disable timed out\n");
2857 break;
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00002858 case IXGBE_ERR_EEPROM_VERSION:
2859 /* We are running on a pre-production device, log a warning */
2860 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
2861 "adapter/LOM. Please be aware there may be issues "
2862 "associated with your hardware. If you are "
2863 "experiencing problems please contact your Intel or "
2864 "hardware representative who provided you with this "
2865 "hardware.\n");
2866 break;
Peter P Waskiewicz Jrda4dd0f2009-06-04 11:10:35 +00002867 default:
2868 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
2869 }
Auke Kok9a799d72007-09-15 14:07:45 -07002870
2871 /* reprogram the RAR[0] in case user changed it. */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002872 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07002873}
2874
Auke Kok9a799d72007-09-15 14:07:45 -07002875/**
2876 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2877 * @adapter: board private structure
2878 * @rx_ring: ring to free buffers from
2879 **/
2880static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002881 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002882{
2883 struct pci_dev *pdev = adapter->pdev;
2884 unsigned long size;
2885 unsigned int i;
2886
2887 /* Free all the Rx ring sk_buffs */
2888
2889 for (i = 0; i < rx_ring->count; i++) {
2890 struct ixgbe_rx_buffer *rx_buffer_info;
2891
2892 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2893 if (rx_buffer_info->dma) {
2894 pci_unmap_single(pdev, rx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002895 rx_ring->rx_buf_len,
2896 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002897 rx_buffer_info->dma = 0;
2898 }
2899 if (rx_buffer_info->skb) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002900 struct sk_buff *skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07002901 rx_buffer_info->skb = NULL;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002902 do {
2903 struct sk_buff *this = skb;
2904 skb = skb->prev;
2905 dev_kfree_skb(this);
2906 } while (skb);
Auke Kok9a799d72007-09-15 14:07:45 -07002907 }
2908 if (!rx_buffer_info->page)
2909 continue;
Jesse Brandeburg4f57ca62009-06-30 11:44:56 +00002910 if (rx_buffer_info->page_dma) {
2911 pci_unmap_page(pdev, rx_buffer_info->page_dma,
2912 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
2913 rx_buffer_info->page_dma = 0;
2914 }
Auke Kok9a799d72007-09-15 14:07:45 -07002915 put_page(rx_buffer_info->page);
2916 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002917 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002918 }
2919
2920 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2921 memset(rx_ring->rx_buffer_info, 0, size);
2922
2923 /* Zero out the descriptor ring */
2924 memset(rx_ring->desc, 0, rx_ring->size);
2925
2926 rx_ring->next_to_clean = 0;
2927 rx_ring->next_to_use = 0;
2928
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002929 if (rx_ring->head)
2930 writel(0, adapter->hw.hw_addr + rx_ring->head);
2931 if (rx_ring->tail)
2932 writel(0, adapter->hw.hw_addr + rx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002933}
2934
2935/**
2936 * ixgbe_clean_tx_ring - Free Tx Buffers
2937 * @adapter: board private structure
2938 * @tx_ring: ring to be cleaned
2939 **/
2940static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002941 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002942{
2943 struct ixgbe_tx_buffer *tx_buffer_info;
2944 unsigned long size;
2945 unsigned int i;
2946
2947 /* Free all the Tx ring sk_buffs */
2948
2949 for (i = 0; i < tx_ring->count; i++) {
2950 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2951 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2952 }
2953
2954 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2955 memset(tx_ring->tx_buffer_info, 0, size);
2956
2957 /* Zero out the descriptor ring */
2958 memset(tx_ring->desc, 0, tx_ring->size);
2959
2960 tx_ring->next_to_use = 0;
2961 tx_ring->next_to_clean = 0;
2962
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002963 if (tx_ring->head)
2964 writel(0, adapter->hw.hw_addr + tx_ring->head);
2965 if (tx_ring->tail)
2966 writel(0, adapter->hw.hw_addr + tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002967}
2968
2969/**
Auke Kok9a799d72007-09-15 14:07:45 -07002970 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2971 * @adapter: board private structure
2972 **/
2973static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2974{
2975 int i;
2976
2977 for (i = 0; i < adapter->num_rx_queues; i++)
2978 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2979}
2980
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002981/**
2982 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2983 * @adapter: board private structure
2984 **/
2985static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2986{
2987 int i;
2988
2989 for (i = 0; i < adapter->num_tx_queues; i++)
2990 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2991}
2992
Auke Kok9a799d72007-09-15 14:07:45 -07002993void ixgbe_down(struct ixgbe_adapter *adapter)
2994{
2995 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002996 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002997 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002998 u32 txdctl;
2999 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07003000
3001 /* signal that we are down to the interrupt handler */
3002 set_bit(__IXGBE_DOWN, &adapter->state);
3003
3004 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003005 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3006 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07003007
3008 netif_tx_disable(netdev);
3009
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003010 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07003011 msleep(10);
3012
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003013 netif_tx_stop_all_queues(netdev);
3014
Auke Kok9a799d72007-09-15 14:07:45 -07003015 ixgbe_irq_disable(adapter);
3016
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003017 ixgbe_napi_disable_all(adapter);
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003018
Don Skidmore0a1f87c2009-09-18 09:45:43 +00003019 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3020 del_timer_sync(&adapter->sfp_timer);
Auke Kok9a799d72007-09-15 14:07:45 -07003021 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003022 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07003023
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003024 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3025 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3026 cancel_work_sync(&adapter->fdir_reinit_task);
3027
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003028 /* disable transmits in the hardware now that interrupts are off */
3029 for (i = 0; i < adapter->num_tx_queues; i++) {
3030 j = adapter->tx_ring[i].reg_idx;
3031 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3032 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
3033 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3034 }
PJ Waskiewicz88512532009-03-13 22:15:10 +00003035 /* Disable the Tx DMA engine on 82599 */
3036 if (hw->mac.type == ixgbe_mac_82599EB)
3037 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3038 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3039 ~IXGBE_DMATXCTL_TE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07003040
Auke Kok9a799d72007-09-15 14:07:45 -07003041 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003042
Paul Larson6f4a0e42008-06-24 17:00:56 -07003043 if (!pci_channel_offline(adapter->pdev))
3044 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003045 ixgbe_clean_all_tx_rings(adapter);
3046 ixgbe_clean_all_rx_rings(adapter);
3047
Jeff Garzik5dd2d332008-10-16 05:09:31 -04003048#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07003049 /* since we reset the hardware DCA settings were cleared */
Alexander Duycke35ec122009-05-21 13:07:12 +00003050 ixgbe_setup_dca(adapter);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07003051#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003052}
3053
Auke Kok9a799d72007-09-15 14:07:45 -07003054/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003055 * ixgbe_poll - NAPI Rx polling callback
3056 * @napi: structure for representing this polling device
3057 * @budget: how many packets driver is allowed to clean
3058 *
3059 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07003060 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003061static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07003062{
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003063 struct ixgbe_q_vector *q_vector =
3064 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003065 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003066 int tx_clean_complete, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003067
Jeff Garzik5dd2d332008-10-16 05:09:31 -04003068#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003069 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3070 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
3071 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
3072 }
3073#endif
3074
Alexander Duyckfe49f042009-06-04 16:00:09 +00003075 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
Herbert Xu78b6f4c2009-01-18 21:49:45 -08003076 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07003077
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00003078 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08003079 work_done = budget;
3080
David S. Miller53e52c72008-01-07 21:06:12 -08003081 /* If budget not fully consumed, exit the polling mode */
3082 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08003083 napi_complete(napi);
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003084 if (adapter->rx_itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08003085 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003086 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00003087 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07003088 }
Auke Kok9a799d72007-09-15 14:07:45 -07003089 return work_done;
3090}
3091
3092/**
3093 * ixgbe_tx_timeout - Respond to a Tx Hang
3094 * @netdev: network interface device structure
3095 **/
3096static void ixgbe_tx_timeout(struct net_device *netdev)
3097{
3098 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3099
3100 /* Do the reset outside of interrupt context */
3101 schedule_work(&adapter->reset_task);
3102}
3103
3104static void ixgbe_reset_task(struct work_struct *work)
3105{
3106 struct ixgbe_adapter *adapter;
3107 adapter = container_of(work, struct ixgbe_adapter, reset_task);
3108
Alexander Duyck2f90b862008-11-20 20:52:10 -08003109 /* If we're already down or resetting, just bail */
3110 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3111 test_bit(__IXGBE_RESETTING, &adapter->state))
3112 return;
3113
Auke Kok9a799d72007-09-15 14:07:45 -07003114 adapter->tx_timeout_count++;
3115
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003116 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003117}
3118
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003119#ifdef CONFIG_IXGBE_DCB
3120static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003121{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003122 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003123 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003124
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003125 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3126 return ret;
3127
3128 f->mask = 0x7 << 3;
3129 adapter->num_rx_queues = f->indices;
3130 adapter->num_tx_queues = f->indices;
3131 ret = true;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003132
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003133 return ret;
3134}
3135#endif
3136
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003137/**
3138 * ixgbe_set_rss_queues: Allocate queues for RSS
3139 * @adapter: board private structure to initialize
3140 *
3141 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
3142 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3143 *
3144 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003145static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3146{
3147 bool ret = false;
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003148 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003149
3150 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Jesse Brandeburg0cefafa2009-05-19 09:19:11 +00003151 f->mask = 0xF;
3152 adapter->num_rx_queues = f->indices;
3153 adapter->num_tx_queues = f->indices;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003154 ret = true;
3155 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003156 ret = false;
3157 }
3158
3159 return ret;
3160}
3161
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003162/**
3163 * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3164 * @adapter: board private structure to initialize
3165 *
3166 * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3167 * to the original CPU that initiated the Tx session. This runs in addition
3168 * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3169 * Rx load across CPUs using RSS.
3170 *
3171 **/
3172static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3173{
3174 bool ret = false;
3175 struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3176
3177 f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3178 f_fdir->mask = 0;
3179
3180 /* Flow Director must have RSS enabled */
3181 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3182 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3183 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3184 adapter->num_tx_queues = f_fdir->indices;
3185 adapter->num_rx_queues = f_fdir->indices;
3186 ret = true;
3187 } else {
3188 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3189 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3190 }
3191 return ret;
3192}
3193
Yi Zou0331a832009-05-17 12:33:52 +00003194#ifdef IXGBE_FCOE
3195/**
3196 * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
3197 * @adapter: board private structure to initialize
3198 *
3199 * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3200 * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3201 * rx queues out of the max number of rx queues, instead, it is used as the
3202 * index of the first rx queue used by FCoE.
3203 *
3204 **/
3205static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3206{
3207 bool ret = false;
3208 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3209
3210 f->indices = min((int)num_online_cpus(), f->indices);
3211 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003212 adapter->num_rx_queues = 1;
3213 adapter->num_tx_queues = 1;
Yi Zou0331a832009-05-17 12:33:52 +00003214#ifdef CONFIG_IXGBE_DCB
3215 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003216 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
Yi Zou0331a832009-05-17 12:33:52 +00003217 ixgbe_set_dcb_queues(adapter);
3218 }
3219#endif
3220 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003221 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
Yi Zou8faa2a72009-07-09 02:29:50 +00003222 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3223 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3224 ixgbe_set_fdir_queues(adapter);
3225 else
3226 ixgbe_set_rss_queues(adapter);
Yi Zou0331a832009-05-17 12:33:52 +00003227 }
3228 /* adding FCoE rx rings to the end */
3229 f->mask = adapter->num_rx_queues;
3230 adapter->num_rx_queues += f->indices;
Yi Zou8de8b2e2009-09-03 14:55:50 +00003231 adapter->num_tx_queues += f->indices;
Yi Zou0331a832009-05-17 12:33:52 +00003232
3233 ret = true;
3234 }
3235
3236 return ret;
3237}
3238
3239#endif /* IXGBE_FCOE */
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003240/*
3241 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3242 * @adapter: board private structure to initialize
3243 *
3244 * This is the top level queue allocation routine. The order here is very
3245 * important, starting with the "most" number of features turned on at once,
3246 * and ending with the smallest set of features. This way large combinations
3247 * can be allocated if they're turned on, and smaller combinations are the
3248 * fallthrough conditions.
3249 *
3250 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003251static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3252{
Yi Zou0331a832009-05-17 12:33:52 +00003253#ifdef IXGBE_FCOE
3254 if (ixgbe_set_fcoe_queues(adapter))
3255 goto done;
3256
3257#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003258#ifdef CONFIG_IXGBE_DCB
3259 if (ixgbe_set_dcb_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07003260 goto done;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003261
3262#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003263 if (ixgbe_set_fdir_queues(adapter))
3264 goto done;
3265
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003266 if (ixgbe_set_rss_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07003267 goto done;
3268
3269 /* fallback to base case */
3270 adapter->num_rx_queues = 1;
3271 adapter->num_tx_queues = 1;
3272
3273done:
3274 /* Notify the stack of the (possibly) reduced Tx Queue count. */
3275 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003276}
3277
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003278static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003279 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003280{
3281 int err, vector_threshold;
3282
3283 /* We'll want at least 3 (vector_threshold):
3284 * 1) TxQ[0] Cleanup
3285 * 2) RxQ[0] Cleanup
3286 * 3) Other (Link Status Change, etc.)
3287 * 4) TCP Timer (optional)
3288 */
3289 vector_threshold = MIN_MSIX_COUNT;
3290
3291 /* The more we get, the more we will assign to Tx/Rx Cleanup
3292 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
3293 * Right now, we simply care about how many we'll get; we'll
3294 * set them up later while requesting irq's.
3295 */
3296 while (vectors >= vector_threshold) {
3297 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003298 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003299 if (!err) /* Success in acquiring all requested vectors. */
3300 break;
3301 else if (err < 0)
3302 vectors = 0; /* Nasty failure, quit now */
3303 else /* err == number of vectors we should try again with */
3304 vectors = err;
3305 }
3306
3307 if (vectors < vector_threshold) {
3308 /* Can't allocate enough MSI-X interrupts? Oh well.
3309 * This just means we'll go with either a single MSI
3310 * vector or fall back to legacy interrupts.
3311 */
3312 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
3313 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3314 kfree(adapter->msix_entries);
3315 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003316 } else {
3317 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
Peter P Waskiewicz Jreb7f1392009-02-01 01:18:58 -08003318 /*
3319 * Adjust for only the vectors we'll use, which is minimum
3320 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
3321 * vectors we were allocated.
3322 */
3323 adapter->num_msix_vectors = min(vectors,
3324 adapter->max_msix_q_vectors + NON_Q_VECTORS);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003325 }
3326}
3327
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003328/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003329 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003330 * @adapter: board private structure to initialize
3331 *
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003332 * Cache the descriptor ring offsets for RSS to the assigned rings.
3333 *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003334 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003335static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003336{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003337 int i;
3338 bool ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003339
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003340 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3341 for (i = 0; i < adapter->num_rx_queues; i++)
3342 adapter->rx_ring[i].reg_idx = i;
3343 for (i = 0; i < adapter->num_tx_queues; i++)
3344 adapter->tx_ring[i].reg_idx = i;
3345 ret = true;
3346 } else {
3347 ret = false;
3348 }
3349
3350 return ret;
3351}
3352
3353#ifdef CONFIG_IXGBE_DCB
3354/**
3355 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
3356 * @adapter: board private structure to initialize
3357 *
3358 * Cache the descriptor ring offsets for DCB to the assigned rings.
3359 *
3360 **/
3361static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3362{
3363 int i;
3364 bool ret = false;
3365 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
3366
3367 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3368 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08003369 /* the number of queues is assumed to be symmetric */
3370 for (i = 0; i < dcb_i; i++) {
3371 adapter->rx_ring[i].reg_idx = i << 3;
3372 adapter->tx_ring[i].reg_idx = i << 2;
3373 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003374 ret = true;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003375 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00003376 if (dcb_i == 8) {
3377 /*
3378 * Tx TC0 starts at: descriptor queue 0
3379 * Tx TC1 starts at: descriptor queue 32
3380 * Tx TC2 starts at: descriptor queue 64
3381 * Tx TC3 starts at: descriptor queue 80
3382 * Tx TC4 starts at: descriptor queue 96
3383 * Tx TC5 starts at: descriptor queue 104
3384 * Tx TC6 starts at: descriptor queue 112
3385 * Tx TC7 starts at: descriptor queue 120
3386 *
3387 * Rx TC0-TC7 are offset by 16 queues each
3388 */
3389 for (i = 0; i < 3; i++) {
3390 adapter->tx_ring[i].reg_idx = i << 5;
3391 adapter->rx_ring[i].reg_idx = i << 4;
3392 }
3393 for ( ; i < 5; i++) {
3394 adapter->tx_ring[i].reg_idx =
3395 ((i + 2) << 4);
3396 adapter->rx_ring[i].reg_idx = i << 4;
3397 }
3398 for ( ; i < dcb_i; i++) {
3399 adapter->tx_ring[i].reg_idx =
3400 ((i + 8) << 3);
3401 adapter->rx_ring[i].reg_idx = i << 4;
3402 }
3403
3404 ret = true;
3405 } else if (dcb_i == 4) {
3406 /*
3407 * Tx TC0 starts at: descriptor queue 0
3408 * Tx TC1 starts at: descriptor queue 64
3409 * Tx TC2 starts at: descriptor queue 96
3410 * Tx TC3 starts at: descriptor queue 112
3411 *
3412 * Rx TC0-TC3 are offset by 32 queues each
3413 */
3414 adapter->tx_ring[0].reg_idx = 0;
3415 adapter->tx_ring[1].reg_idx = 64;
3416 adapter->tx_ring[2].reg_idx = 96;
3417 adapter->tx_ring[3].reg_idx = 112;
3418 for (i = 0 ; i < dcb_i; i++)
3419 adapter->rx_ring[i].reg_idx = i << 5;
3420
3421 ret = true;
3422 } else {
3423 ret = false;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003424 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003425 } else {
3426 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003427 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003428 } else {
3429 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003430 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003431
3432 return ret;
3433}
3434#endif
3435
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003436/**
3437 * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3438 * @adapter: board private structure to initialize
3439 *
3440 * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3441 *
3442 **/
3443static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3444{
3445 int i;
3446 bool ret = false;
3447
3448 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3449 ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3450 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3451 for (i = 0; i < adapter->num_rx_queues; i++)
3452 adapter->rx_ring[i].reg_idx = i;
3453 for (i = 0; i < adapter->num_tx_queues; i++)
3454 adapter->tx_ring[i].reg_idx = i;
3455 ret = true;
3456 }
3457
3458 return ret;
3459}
3460
Yi Zou0331a832009-05-17 12:33:52 +00003461#ifdef IXGBE_FCOE
3462/**
3463 * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
3464 * @adapter: board private structure to initialize
3465 *
3466 * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
3467 *
3468 */
3469static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3470{
Yi Zou8de8b2e2009-09-03 14:55:50 +00003471 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
Yi Zou0331a832009-05-17 12:33:52 +00003472 bool ret = false;
3473 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3474
3475 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3476#ifdef CONFIG_IXGBE_DCB
3477 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Yi Zou8de8b2e2009-09-03 14:55:50 +00003478 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
3479
Yi Zou0331a832009-05-17 12:33:52 +00003480 ixgbe_cache_ring_dcb(adapter);
Yi Zou8de8b2e2009-09-03 14:55:50 +00003481 /* find out queues in TC for FCoE */
3482 fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
3483 fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
3484 /*
3485 * In 82599, the number of Tx queues for each traffic
3486 * class for both 8-TC and 4-TC modes are:
3487 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
3488 * 8 TCs: 32 32 16 16 8 8 8 8
3489 * 4 TCs: 64 64 32 32
3490 * We have max 8 queues for FCoE, where 8 the is
3491 * FCoE redirection table size. If TC for FCoE is
3492 * less than or equal to TC3, we have enough queues
3493 * to add max of 8 queues for FCoE, so we start FCoE
3494 * tx descriptor from the next one, i.e., reg_idx + 1.
3495 * If TC for FCoE is above TC3, implying 8 TC mode,
3496 * and we need 8 for FCoE, we have to take all queues
3497 * in that traffic class for FCoE.
3498 */
3499 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
3500 fcoe_tx_i--;
Yi Zou0331a832009-05-17 12:33:52 +00003501 }
3502#endif /* CONFIG_IXGBE_DCB */
3503 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Yi Zou8faa2a72009-07-09 02:29:50 +00003504 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3505 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3506 ixgbe_cache_ring_fdir(adapter);
3507 else
3508 ixgbe_cache_ring_rss(adapter);
3509
Yi Zou8de8b2e2009-09-03 14:55:50 +00003510 fcoe_rx_i = f->mask;
3511 fcoe_tx_i = f->mask;
Yi Zou0331a832009-05-17 12:33:52 +00003512 }
Yi Zou8de8b2e2009-09-03 14:55:50 +00003513 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
3514 adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
3515 adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
3516 }
Yi Zou0331a832009-05-17 12:33:52 +00003517 ret = true;
3518 }
3519 return ret;
3520}
3521
3522#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003523/**
3524 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3525 * @adapter: board private structure to initialize
3526 *
3527 * Once we know the feature-set enabled for the device, we'll cache
3528 * the register offset the descriptor ring is assigned to.
3529 *
3530 * Note, the order the various feature calls is important. It must start with
3531 * the "most" features enabled at the same time, then trickle down to the
3532 * least amount of features turned on at once.
3533 **/
3534static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3535{
3536 /* start with default case */
3537 adapter->rx_ring[0].reg_idx = 0;
3538 adapter->tx_ring[0].reg_idx = 0;
3539
Yi Zou0331a832009-05-17 12:33:52 +00003540#ifdef IXGBE_FCOE
3541 if (ixgbe_cache_ring_fcoe(adapter))
3542 return;
3543
3544#endif /* IXGBE_FCOE */
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003545#ifdef CONFIG_IXGBE_DCB
3546 if (ixgbe_cache_ring_dcb(adapter))
3547 return;
3548
3549#endif
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003550 if (ixgbe_cache_ring_fdir(adapter))
3551 return;
3552
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08003553 if (ixgbe_cache_ring_rss(adapter))
3554 return;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003555}
3556
Auke Kok9a799d72007-09-15 14:07:45 -07003557/**
3558 * ixgbe_alloc_queues - Allocate memory for all rings
3559 * @adapter: board private structure to initialize
3560 *
3561 * We allocate one ring per queue at run-time since we don't know the
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003562 * number of queues at compile-time. The polling_netdev array is
3563 * intended for Multiqueue, but should work fine with a single queue.
Auke Kok9a799d72007-09-15 14:07:45 -07003564 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003565static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003566{
3567 int i;
3568
3569 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003570 sizeof(struct ixgbe_ring), GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07003571 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003572 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07003573
3574 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003575 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003576 if (!adapter->rx_ring)
3577 goto err_rx_ring_allocation;
3578
3579 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003580 adapter->tx_ring[i].count = adapter->tx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003581 adapter->tx_ring[i].queue_index = i;
3582 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003583
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003584 for (i = 0; i < adapter->num_rx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003585 adapter->rx_ring[i].count = adapter->rx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003586 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07003587 }
3588
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003589 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003590
3591 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003592
3593err_rx_ring_allocation:
3594 kfree(adapter->tx_ring);
3595err_tx_ring_allocation:
3596 return -ENOMEM;
3597}
3598
3599/**
3600 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3601 * @adapter: board private structure to initialize
3602 *
3603 * Attempt to configure the interrupts using the best available
3604 * capabilities of the hardware and the kernel.
3605 **/
Al Virofeea6a52008-11-27 15:34:07 -08003606static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003607{
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003608 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003609 int err = 0;
3610 int vector, v_budget;
3611
3612 /*
3613 * It's easy to be greedy for MSI-X vectors, but it really
3614 * doesn't do us much good if we have a lot more vectors
3615 * than CPU's. So let's be conservative and only ask for
3616 * (roughly) twice the number of vectors as there are CPU's.
3617 */
3618 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003619 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003620
3621 /*
3622 * At the same time, hardware can only support a maximum of
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003623 * hw.mac->max_msix_vectors vectors. With features
3624 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3625 * descriptor queues supported by our device. Thus, we cap it off in
3626 * those rare cases where the cpu count also exceeds our vector limit.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003627 */
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003628 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003629
3630 /* A failure in MSI-X entry allocation isn't fatal, but it does
3631 * mean we disable MSI-X capabilities of the adapter. */
3632 adapter->msix_entries = kcalloc(v_budget,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003633 sizeof(struct msix_entry), GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003634 if (adapter->msix_entries) {
3635 for (vector = 0; vector < v_budget; vector++)
3636 adapter->msix_entries[vector].entry = vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003637
Alexander Duyck7a921c92009-05-06 10:43:28 +00003638 ixgbe_acquire_msix_vectors(adapter, v_budget);
3639
3640 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3641 goto out;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003642 }
3643
Alexander Duyck7a921c92009-05-06 10:43:28 +00003644 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3645 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003646 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3647 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3648 adapter->atr_sample_rate = 0;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003649 ixgbe_set_num_queues(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003650
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003651 err = pci_enable_msi(adapter->pdev);
3652 if (!err) {
3653 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
3654 } else {
3655 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003656 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003657 /* reset err */
3658 err = 0;
3659 }
3660
3661out:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003662 return err;
3663}
3664
Alexander Duyck7a921c92009-05-06 10:43:28 +00003665/**
3666 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
3667 * @adapter: board private structure to initialize
3668 *
3669 * We allocate one q_vector per queue interrupt. If allocation fails we
3670 * return -ENOMEM.
3671 **/
3672static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3673{
3674 int q_idx, num_q_vectors;
3675 struct ixgbe_q_vector *q_vector;
3676 int napi_vectors;
3677 int (*poll)(struct napi_struct *, int);
3678
3679 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3680 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3681 napi_vectors = adapter->num_rx_queues;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003682 poll = &ixgbe_clean_rxtx_many;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003683 } else {
3684 num_q_vectors = 1;
3685 napi_vectors = 1;
3686 poll = &ixgbe_poll;
3687 }
3688
3689 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3690 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
3691 if (!q_vector)
3692 goto err_out;
3693 q_vector->adapter = adapter;
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003694 if (q_vector->txr_count && !q_vector->rxr_count)
3695 q_vector->eitr = adapter->tx_eitr_param;
3696 else
3697 q_vector->eitr = adapter->rx_eitr_param;
Alexander Duyckfe49f042009-06-04 16:00:09 +00003698 q_vector->v_idx = q_idx;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003699 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003700 adapter->q_vector[q_idx] = q_vector;
3701 }
3702
3703 return 0;
3704
3705err_out:
3706 while (q_idx) {
3707 q_idx--;
3708 q_vector = adapter->q_vector[q_idx];
3709 netif_napi_del(&q_vector->napi);
3710 kfree(q_vector);
3711 adapter->q_vector[q_idx] = NULL;
3712 }
3713 return -ENOMEM;
3714}
3715
3716/**
3717 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
3718 * @adapter: board private structure to initialize
3719 *
3720 * This function frees the memory allocated to the q_vectors. In addition if
3721 * NAPI is enabled it will delete any references to the NAPI struct prior
3722 * to freeing the q_vector.
3723 **/
3724static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3725{
3726 int q_idx, num_q_vectors;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003727
Alexander Duyck91281fd2009-06-04 16:00:27 +00003728 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
Alexander Duyck7a921c92009-05-06 10:43:28 +00003729 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003730 else
Alexander Duyck7a921c92009-05-06 10:43:28 +00003731 num_q_vectors = 1;
Alexander Duyck7a921c92009-05-06 10:43:28 +00003732
3733 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3734 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
Alexander Duyck7a921c92009-05-06 10:43:28 +00003735 adapter->q_vector[q_idx] = NULL;
Alexander Duyck91281fd2009-06-04 16:00:27 +00003736 netif_napi_del(&q_vector->napi);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003737 kfree(q_vector);
3738 }
3739}
3740
Don Skidmore7b25cdb2009-08-25 04:47:32 +00003741static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003742{
3743 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3744 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3745 pci_disable_msix(adapter->pdev);
3746 kfree(adapter->msix_entries);
3747 adapter->msix_entries = NULL;
3748 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
3749 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
3750 pci_disable_msi(adapter->pdev);
3751 }
3752 return;
3753}
3754
3755/**
3756 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
3757 * @adapter: board private structure to initialize
3758 *
3759 * We determine which interrupt scheme to use based on...
3760 * - Kernel support (MSI, MSI-X)
3761 * - which can be user-defined (via MODULE_PARAM)
3762 * - Hardware queue count (num_*_queues)
3763 * - defined by miscellaneous hardware support/features (RSS, etc.)
3764 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003765int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003766{
3767 int err;
3768
3769 /* Number of supported queues */
3770 ixgbe_set_num_queues(adapter);
3771
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003772 err = ixgbe_set_interrupt_capability(adapter);
3773 if (err) {
3774 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
3775 goto err_set_interrupt;
3776 }
3777
Alexander Duyck7a921c92009-05-06 10:43:28 +00003778 err = ixgbe_alloc_q_vectors(adapter);
3779 if (err) {
3780 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
3781 "vectors\n");
3782 goto err_alloc_q_vectors;
3783 }
3784
3785 err = ixgbe_alloc_queues(adapter);
3786 if (err) {
3787 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
3788 goto err_alloc_queues;
3789 }
3790
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003791 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003792 "Tx Queue count = %u\n",
3793 (adapter->num_rx_queues > 1) ? "Enabled" :
3794 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003795
3796 set_bit(__IXGBE_DOWN, &adapter->state);
3797
3798 return 0;
3799
Alexander Duyck7a921c92009-05-06 10:43:28 +00003800err_alloc_queues:
3801 ixgbe_free_q_vectors(adapter);
3802err_alloc_q_vectors:
3803 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003804err_set_interrupt:
Alexander Duyck7a921c92009-05-06 10:43:28 +00003805 return err;
3806}
3807
3808/**
3809 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
3810 * @adapter: board private structure to clear interrupt scheme on
3811 *
3812 * We go through and clear interrupt specific resources and reset the structure
3813 * to pre-load conditions
3814 **/
3815void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3816{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003817 kfree(adapter->tx_ring);
3818 kfree(adapter->rx_ring);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003819 adapter->tx_ring = NULL;
3820 adapter->rx_ring = NULL;
3821
3822 ixgbe_free_q_vectors(adapter);
3823 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003824}
3825
3826/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08003827 * ixgbe_sfp_timer - worker thread to find a missing module
3828 * @data: pointer to our adapter struct
3829 **/
3830static void ixgbe_sfp_timer(unsigned long data)
3831{
3832 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3833
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003834 /*
3835 * Do the sfp_timer outside of interrupt context due to the
Donald Skidmorec4900be2008-11-20 21:11:42 -08003836 * delays that sfp+ detection requires
3837 */
3838 schedule_work(&adapter->sfp_task);
3839}
3840
3841/**
3842 * ixgbe_sfp_task - worker thread to find a missing module
3843 * @work: pointer to work_struct containing our data
3844 **/
3845static void ixgbe_sfp_task(struct work_struct *work)
3846{
3847 struct ixgbe_adapter *adapter = container_of(work,
3848 struct ixgbe_adapter,
3849 sfp_task);
3850 struct ixgbe_hw *hw = &adapter->hw;
3851
3852 if ((hw->phy.type == ixgbe_phy_nl) &&
3853 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3854 s32 ret = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00003855 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
Donald Skidmorec4900be2008-11-20 21:11:42 -08003856 goto reschedule;
3857 ret = hw->phy.ops.reset(hw);
3858 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00003859 dev_err(&adapter->pdev->dev, "failed to initialize "
3860 "because an unsupported SFP+ module type "
3861 "was detected.\n"
3862 "Reload the driver after installing a "
3863 "supported module.\n");
Donald Skidmorec4900be2008-11-20 21:11:42 -08003864 unregister_netdev(adapter->netdev);
3865 } else {
3866 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
3867 hw->phy.sfp_type);
3868 }
3869 /* don't need this routine any more */
3870 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3871 }
3872 return;
3873reschedule:
3874 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
3875 mod_timer(&adapter->sfp_timer,
3876 round_jiffies(jiffies + (2 * HZ)));
3877}
3878
3879/**
Auke Kok9a799d72007-09-15 14:07:45 -07003880 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
3881 * @adapter: board private structure to initialize
3882 *
3883 * ixgbe_sw_init initializes the Adapter private data structure.
3884 * Fields are initialized based on PCI device information and
3885 * OS network device settings (MTU size).
3886 **/
3887static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3888{
3889 struct ixgbe_hw *hw = &adapter->hw;
3890 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003891 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003892#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003893 int j;
3894 struct tc_configuration *tc;
3895#endif
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003896
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003897 /* PCI config space info */
3898
3899 hw->vendor_id = pdev->vendor;
3900 hw->device_id = pdev->device;
3901 hw->revision_id = pdev->revision;
3902 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3903 hw->subsystem_device_id = pdev->subsystem_device;
3904
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003905 /* Set capability flags */
3906 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
3907 adapter->ring_feature[RING_F_RSS].indices = rss;
3908 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003909 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
Don Skidmorebf069c92009-05-07 10:39:54 +00003910 if (hw->mac.type == ixgbe_mac_82598EB) {
3911 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3912 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003913 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
Don Skidmorebf069c92009-05-07 10:39:54 +00003914 } else if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003915 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00003916 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3917 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00003918 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3919 adapter->ring_feature[RING_F_FDIR].indices =
3920 IXGBE_MAX_FDIR_INDICES;
3921 adapter->atr_sample_rate = 20;
3922 adapter->fdir_pballoc = 0;
Yi Zoueacd73f2009-05-13 13:11:06 +00003923#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00003924 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
3925 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
3926 adapter->ring_feature[RING_F_FCOE].indices = 0;
Yi Zou6ee16522009-08-31 12:34:28 +00003927 /* Default traffic class to use for FCoE */
3928 adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
Yi Zoueacd73f2009-05-13 13:11:06 +00003929#endif /* IXGBE_FCOE */
Alexander Duyckf8212f92009-04-27 22:42:37 +00003930 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08003931
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003932#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003933 /* Configure DCB traffic classes */
3934 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
3935 tc = &adapter->dcb_cfg.tc_config[j];
3936 tc->path[DCB_TX_CONFIG].bwg_id = 0;
3937 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
3938 tc->path[DCB_RX_CONFIG].bwg_id = 0;
3939 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
3940 tc->dcb_pfc = pfc_disabled;
3941 }
3942 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
3943 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
3944 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00003945 adapter->dcb_cfg.pfc_mode_enable = false;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003946 adapter->dcb_cfg.round_robin_enable = false;
3947 adapter->dcb_set_bitmap = 0x00;
3948 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
3949 adapter->ring_feature[RING_F_DCB].indices);
3950
3951#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003952
3953 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00003954 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00003955 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00003956#ifdef CONFIG_DCB
3957 adapter->last_lfc_mode = hw->fc.current_mode;
3958#endif
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07003959 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3960 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3961 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3962 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00003963 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07003964
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003965 /* enable itr by default in dynamic mode */
Nelson, Shannonf7554a22009-09-18 09:46:06 +00003966 adapter->rx_itr_setting = 1;
3967 adapter->rx_eitr_param = 20000;
3968 adapter->tx_itr_setting = 1;
3969 adapter->tx_eitr_param = 10000;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003970
3971 /* set defaults for eitr in MegaBytes */
3972 adapter->eitr_low = 10;
3973 adapter->eitr_high = 20;
3974
3975 /* set default ring sizes */
3976 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
3977 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
3978
Auke Kok9a799d72007-09-15 14:07:45 -07003979 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003980 if (ixgbe_init_eeprom_params_generic(hw)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003981 dev_err(&pdev->dev, "EEPROM initialization failed\n");
3982 return -EIO;
3983 }
3984
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003985 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07003986 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3987
Auke Kok9a799d72007-09-15 14:07:45 -07003988 set_bit(__IXGBE_DOWN, &adapter->state);
3989
3990 return 0;
3991}
3992
3993/**
3994 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3995 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003996 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07003997 *
3998 * Return 0 on success, negative on failure
3999 **/
4000int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004001 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004002{
4003 struct pci_dev *pdev = adapter->pdev;
4004 int size;
4005
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004006 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4007 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004008 if (!tx_ring->tx_buffer_info)
4009 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004010 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07004011
4012 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08004013 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004014 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004015
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004016 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
4017 &tx_ring->dma);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004018 if (!tx_ring->desc)
4019 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07004020
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004021 tx_ring->next_to_use = 0;
4022 tx_ring->next_to_clean = 0;
4023 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07004024 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07004025
4026err:
4027 vfree(tx_ring->tx_buffer_info);
4028 tx_ring->tx_buffer_info = NULL;
4029 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
4030 "descriptor ring\n");
4031 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07004032}
4033
4034/**
Alexander Duyck69888672008-09-11 20:05:39 -07004035 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
4036 * @adapter: board private structure
4037 *
4038 * If this function returns with an error, then it's possible one or
4039 * more of the rings is populated (while the rest are not). It is the
4040 * callers duty to clean those orphaned rings.
4041 *
4042 * Return 0 on success, negative on failure
4043 **/
4044static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4045{
4046 int i, err = 0;
4047
4048 for (i = 0; i < adapter->num_tx_queues; i++) {
4049 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
4050 if (!err)
4051 continue;
4052 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
4053 break;
4054 }
4055
4056 return err;
4057}
4058
4059/**
Auke Kok9a799d72007-09-15 14:07:45 -07004060 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
4061 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004062 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07004063 *
4064 * Returns 0 on success, negative on failure
4065 **/
4066int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004067 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004068{
4069 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004070 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07004071
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004072 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
4073 rx_ring->rx_buffer_info = vmalloc(size);
4074 if (!rx_ring->rx_buffer_info) {
Auke Kok9a799d72007-09-15 14:07:45 -07004075 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004076 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004077 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07004078 }
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004079 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07004080
Auke Kok9a799d72007-09-15 14:07:45 -07004081 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004082 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4083 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07004084
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004085 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07004086
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004087 if (!rx_ring->desc) {
Auke Kok9a799d72007-09-15 14:07:45 -07004088 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004089 "Memory allocation failed for the rx desc ring\n");
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004090 vfree(rx_ring->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004091 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07004092 }
4093
Jesse Brandeburg3a581072008-08-26 04:27:08 -07004094 rx_ring->next_to_clean = 0;
4095 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004096
4097 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004098
4099alloc_failed:
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07004100 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07004101}
4102
4103/**
Alexander Duyck69888672008-09-11 20:05:39 -07004104 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
4105 * @adapter: board private structure
4106 *
4107 * If this function returns with an error, then it's possible one or
4108 * more of the rings is populated (while the rest are not). It is the
4109 * callers duty to clean those orphaned rings.
4110 *
4111 * Return 0 on success, negative on failure
4112 **/
4113
4114static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
4115{
4116 int i, err = 0;
4117
4118 for (i = 0; i < adapter->num_rx_queues; i++) {
4119 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
4120 if (!err)
4121 continue;
4122 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
4123 break;
4124 }
4125
4126 return err;
4127}
4128
4129/**
Auke Kok9a799d72007-09-15 14:07:45 -07004130 * ixgbe_free_tx_resources - Free Tx Resources per Queue
4131 * @adapter: board private structure
4132 * @tx_ring: Tx descriptor ring for a specific queue
4133 *
4134 * Free all transmit software resources
4135 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07004136void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4137 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004138{
4139 struct pci_dev *pdev = adapter->pdev;
4140
4141 ixgbe_clean_tx_ring(adapter, tx_ring);
4142
4143 vfree(tx_ring->tx_buffer_info);
4144 tx_ring->tx_buffer_info = NULL;
4145
4146 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
4147
4148 tx_ring->desc = NULL;
4149}
4150
4151/**
4152 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4153 * @adapter: board private structure
4154 *
4155 * Free all transmit software resources
4156 **/
4157static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4158{
4159 int i;
4160
4161 for (i = 0; i < adapter->num_tx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00004162 if (adapter->tx_ring[i].desc)
4163 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004164}
4165
4166/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004167 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07004168 * @adapter: board private structure
4169 * @rx_ring: ring to clean the resources from
4170 *
4171 * Free all receive software resources
4172 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07004173void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4174 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07004175{
4176 struct pci_dev *pdev = adapter->pdev;
4177
4178 ixgbe_clean_rx_ring(adapter, rx_ring);
4179
4180 vfree(rx_ring->rx_buffer_info);
4181 rx_ring->rx_buffer_info = NULL;
4182
4183 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4184
4185 rx_ring->desc = NULL;
4186}
4187
4188/**
4189 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4190 * @adapter: board private structure
4191 *
4192 * Free all receive software resources
4193 **/
4194static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4195{
4196 int i;
4197
4198 for (i = 0; i < adapter->num_rx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00004199 if (adapter->rx_ring[i].desc)
4200 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07004201}
4202
4203/**
Auke Kok9a799d72007-09-15 14:07:45 -07004204 * ixgbe_change_mtu - Change the Maximum Transfer Unit
4205 * @netdev: network interface device structure
4206 * @new_mtu: new value for maximum frame size
4207 *
4208 * Returns 0 on success, negative on failure
4209 **/
4210static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4211{
4212 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4213 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4214
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07004215 /* MTU < 68 is an error and causes problems on some kernels */
4216 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07004217 return -EINVAL;
4218
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004219 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004220 netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004221 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07004222 netdev->mtu = new_mtu;
4223
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08004224 if (netif_running(netdev))
4225 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004226
4227 return 0;
4228}
4229
4230/**
4231 * ixgbe_open - Called when a network interface is made active
4232 * @netdev: network interface device structure
4233 *
4234 * Returns 0 on success, negative value on failure
4235 *
4236 * The open entry point is called when a network interface is made
4237 * active by the system (IFF_UP). At this point all resources needed
4238 * for transmit and receive operations are allocated, the interrupt
4239 * handler is registered with the OS, the watchdog timer is started,
4240 * and the stack is notified that the interface is ready.
4241 **/
4242static int ixgbe_open(struct net_device *netdev)
4243{
4244 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4245 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07004246
Auke Kok4bebfaa2008-02-11 09:26:01 -08004247 /* disallow open during test */
4248 if (test_bit(__IXGBE_TESTING, &adapter->state))
4249 return -EBUSY;
4250
Jesse Brandeburg54386462009-04-17 20:44:27 +00004251 netif_carrier_off(netdev);
4252
Auke Kok9a799d72007-09-15 14:07:45 -07004253 /* allocate transmit descriptors */
4254 err = ixgbe_setup_all_tx_resources(adapter);
4255 if (err)
4256 goto err_setup_tx;
4257
Auke Kok9a799d72007-09-15 14:07:45 -07004258 /* allocate receive descriptors */
4259 err = ixgbe_setup_all_rx_resources(adapter);
4260 if (err)
4261 goto err_setup_rx;
4262
4263 ixgbe_configure(adapter);
4264
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004265 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004266 if (err)
4267 goto err_req_irq;
4268
Auke Kok9a799d72007-09-15 14:07:45 -07004269 err = ixgbe_up_complete(adapter);
4270 if (err)
4271 goto err_up;
4272
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07004273 netif_tx_start_all_queues(netdev);
4274
Auke Kok9a799d72007-09-15 14:07:45 -07004275 return 0;
4276
4277err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004278 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004279 ixgbe_free_irq(adapter);
4280err_req_irq:
Auke Kok9a799d72007-09-15 14:07:45 -07004281err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004282 ixgbe_free_all_rx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004283err_setup_tx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00004284 ixgbe_free_all_tx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004285 ixgbe_reset(adapter);
4286
4287 return err;
4288}
4289
4290/**
4291 * ixgbe_close - Disables a network interface
4292 * @netdev: network interface device structure
4293 *
4294 * Returns 0, this is not allowed to fail
4295 *
4296 * The close entry point is called when an interface is de-activated
4297 * by the OS. The hardware is still under the drivers control, but
4298 * needs to be disabled. A global MAC reset is issued to stop the
4299 * hardware, and all transmit and receive resources are freed.
4300 **/
4301static int ixgbe_close(struct net_device *netdev)
4302{
4303 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004304
4305 ixgbe_down(adapter);
4306 ixgbe_free_irq(adapter);
4307
4308 ixgbe_free_all_tx_resources(adapter);
4309 ixgbe_free_all_rx_resources(adapter);
4310
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004311 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004312
4313 return 0;
4314}
4315
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004316#ifdef CONFIG_PM
4317static int ixgbe_resume(struct pci_dev *pdev)
4318{
4319 struct net_device *netdev = pci_get_drvdata(pdev);
4320 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4321 u32 err;
4322
4323 pci_set_power_state(pdev, PCI_D0);
4324 pci_restore_state(pdev);
gouji-new9ce77662009-05-06 10:44:45 +00004325
4326 err = pci_enable_device_mem(pdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004327 if (err) {
Alexander Duyck69888672008-09-11 20:05:39 -07004328 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004329 "suspend\n");
4330 return err;
4331 }
4332 pci_set_master(pdev);
4333
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07004334 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004335
4336 err = ixgbe_init_interrupt_scheme(adapter);
4337 if (err) {
4338 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
4339 "device\n");
4340 return err;
4341 }
4342
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004343 ixgbe_reset(adapter);
4344
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00004345 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4346
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004347 if (netif_running(netdev)) {
4348 err = ixgbe_open(adapter->netdev);
4349 if (err)
4350 return err;
4351 }
4352
4353 netif_device_attach(netdev);
4354
4355 return 0;
4356}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004357#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004358
4359static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004360{
4361 struct net_device *netdev = pci_get_drvdata(pdev);
4362 struct ixgbe_adapter *adapter = netdev_priv(netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004363 struct ixgbe_hw *hw = &adapter->hw;
4364 u32 ctrl, fctrl;
4365 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004366#ifdef CONFIG_PM
4367 int retval = 0;
4368#endif
4369
4370 netif_device_detach(netdev);
4371
4372 if (netif_running(netdev)) {
4373 ixgbe_down(adapter);
4374 ixgbe_free_irq(adapter);
4375 ixgbe_free_all_tx_resources(adapter);
4376 ixgbe_free_all_rx_resources(adapter);
4377 }
Alexander Duyck7a921c92009-05-06 10:43:28 +00004378 ixgbe_clear_interrupt_scheme(adapter);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004379
4380#ifdef CONFIG_PM
4381 retval = pci_save_state(pdev);
4382 if (retval)
4383 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00004384
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004385#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004386 if (wufc) {
4387 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004388
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004389 /* turn on all-multi mode if wake on multicast is enabled */
4390 if (wufc & IXGBE_WUFC_MC) {
4391 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4392 fctrl |= IXGBE_FCTRL_MPE;
4393 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4394 }
4395
4396 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4397 ctrl |= IXGBE_CTRL_GIO_DIS;
4398 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4399
4400 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
4401 } else {
4402 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
4403 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4404 }
4405
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07004406 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
4407 pci_wake_from_d3(pdev, true);
4408 else
4409 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004410
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004411 *enable_wake = !!wufc;
4412
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004413 ixgbe_release_hw_control(adapter);
4414
4415 pci_disable_device(pdev);
4416
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004417 return 0;
4418}
4419
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004420#ifdef CONFIG_PM
4421static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
4422{
4423 int retval;
4424 bool wake;
4425
4426 retval = __ixgbe_shutdown(pdev, &wake);
4427 if (retval)
4428 return retval;
4429
4430 if (wake) {
4431 pci_prepare_to_sleep(pdev);
4432 } else {
4433 pci_wake_from_d3(pdev, false);
4434 pci_set_power_state(pdev, PCI_D3hot);
4435 }
4436
4437 return 0;
4438}
4439#endif /* CONFIG_PM */
4440
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004441static void ixgbe_shutdown(struct pci_dev *pdev)
4442{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00004443 bool wake;
4444
4445 __ixgbe_shutdown(pdev, &wake);
4446
4447 if (system_state == SYSTEM_POWER_OFF) {
4448 pci_wake_from_d3(pdev, wake);
4449 pci_set_power_state(pdev, PCI_D3hot);
4450 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004451}
4452
4453/**
Auke Kok9a799d72007-09-15 14:07:45 -07004454 * ixgbe_update_stats - Update the board statistics counters.
4455 * @adapter: board private structure
4456 **/
4457void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4458{
4459 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004460 u64 total_mpc = 0;
4461 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004462
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004463 if (hw->mac.type == ixgbe_mac_82599EB) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00004464 u64 rsc_count = 0;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004465 for (i = 0; i < 16; i++)
4466 adapter->hw_rx_no_dma_resources +=
4467 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Alexander Duyckf8212f92009-04-27 22:42:37 +00004468 for (i = 0; i < adapter->num_rx_queues; i++)
4469 rsc_count += adapter->rx_ring[i].rsc_count;
4470 adapter->rsc_count = rsc_count;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00004471 }
4472
Auke Kok9a799d72007-09-15 14:07:45 -07004473 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004474 for (i = 0; i < 8; i++) {
4475 /* for packet buffers not used, the register should read 0 */
4476 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4477 missed_rx += mpc;
4478 adapter->stats.mpc[i] += mpc;
4479 total_mpc += adapter->stats.mpc[i];
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004480 if (hw->mac.type == ixgbe_mac_82598EB)
4481 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004482 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4483 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4484 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4485 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004486 if (hw->mac.type == ixgbe_mac_82599EB) {
4487 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4488 IXGBE_PXONRXCNT(i));
4489 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4490 IXGBE_PXOFFRXCNT(i));
4491 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004492 } else {
4493 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4494 IXGBE_PXONRXC(i));
4495 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4496 IXGBE_PXOFFRXC(i));
4497 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08004498 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
4499 IXGBE_PXONTXC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08004500 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004501 IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004502 }
4503 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4504 /* work around hardware counting issue */
4505 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07004506
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004507 /* 82598 hardware only has a 32 bit counter in the high register */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004508 if (hw->mac.type == ixgbe_mac_82599EB) {
Ben Greearaad71912009-09-30 12:08:16 +00004509 u64 tmp;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004510 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
Ben Greearaad71912009-09-30 12:08:16 +00004511 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; /* 4 high bits of GORC */
4512 adapter->stats.gorc += (tmp << 32);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004513 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
Ben Greearaad71912009-09-30 12:08:16 +00004514 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; /* 4 high bits of GOTC */
4515 adapter->stats.gotc += (tmp << 32);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004516 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4517 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4518 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4519 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004520 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4521 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
Yi Zou6d455222009-05-13 13:12:16 +00004522#ifdef IXGBE_FCOE
4523 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4524 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4525 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4526 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4527 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4528 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4529#endif /* IXGBE_FCOE */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004530 } else {
4531 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4532 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4533 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4534 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4535 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4536 }
Auke Kok9a799d72007-09-15 14:07:45 -07004537 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4538 adapter->stats.bprc += bprc;
4539 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004540 if (hw->mac.type == ixgbe_mac_82598EB)
4541 adapter->stats.mprc -= bprc;
Auke Kok9a799d72007-09-15 14:07:45 -07004542 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4543 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4544 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4545 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4546 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4547 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4548 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004549 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004550 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4551 adapter->stats.lxontxc += lxon;
4552 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4553 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07004554 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4555 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004556 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4557 /*
4558 * 82598 errata - tx of flow control packets is included in tx counters
4559 */
4560 xon_off_tot = lxon + lxoff;
4561 adapter->stats.gptc -= xon_off_tot;
4562 adapter->stats.mptc -= xon_off_tot;
4563 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07004564 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4565 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4566 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07004567 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4568 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004569 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004570 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4571 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4572 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4573 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4574 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004575 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4576
4577 /* Fill out the OS statistics structure */
Auke Kok9a799d72007-09-15 14:07:45 -07004578 adapter->net_stats.multicast = adapter->stats.mprc;
4579
4580 /* Rx Errors */
4581 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004582 adapter->stats.rlec;
Auke Kok9a799d72007-09-15 14:07:45 -07004583 adapter->net_stats.rx_dropped = 0;
4584 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
4585 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004586 adapter->net_stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07004587}
4588
4589/**
4590 * ixgbe_watchdog - Timer Call-back
4591 * @data: pointer to adapter cast into an unsigned long
4592 **/
4593static void ixgbe_watchdog(unsigned long data)
4594{
4595 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004596 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyckfe49f042009-06-04 16:00:09 +00004597 u64 eics = 0;
4598 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07004599
Alexander Duyckfe49f042009-06-04 16:00:09 +00004600 /*
4601 * Do the watchdog outside of interrupt context due to the lovely
4602 * delays that some of the newer hardware requires
4603 */
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004604
Alexander Duyckfe49f042009-06-04 16:00:09 +00004605 if (test_bit(__IXGBE_DOWN, &adapter->state))
4606 goto watchdog_short_circuit;
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004607
Alexander Duyckfe49f042009-06-04 16:00:09 +00004608 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4609 /*
4610 * for legacy and MSI interrupts don't set any bits
4611 * that are enabled for EIAM, because this operation
4612 * would set *both* EIMS and EICS for any bit in EIAM
4613 */
4614 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4615 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4616 goto watchdog_reschedule;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004617 }
4618
Alexander Duyckfe49f042009-06-04 16:00:09 +00004619 /* get one bit for every active tx/rx interrupt vector */
4620 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4621 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4622 if (qv->rxr_count || qv->txr_count)
4623 eics |= ((u64)1 << i);
4624 }
4625
4626 /* Cause software interrupt to ensure rx rings are cleaned */
4627 ixgbe_irq_rearm_queues(adapter, eics);
4628
4629watchdog_reschedule:
4630 /* Reset the timer */
4631 mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4632
4633watchdog_short_circuit:
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004634 schedule_work(&adapter->watchdog_task);
4635}
4636
4637/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004638 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
4639 * @work: pointer to work_struct containing our data
4640 **/
4641static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4642{
4643 struct ixgbe_adapter *adapter = container_of(work,
4644 struct ixgbe_adapter,
4645 multispeed_fiber_task);
4646 struct ixgbe_hw *hw = &adapter->hw;
4647 u32 autoneg;
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004648 bool negotiation;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004649
4650 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
Mallikarjuna R Chilakalaa1f25322009-06-30 11:44:36 +00004651 autoneg = hw->phy.autoneg_advertised;
4652 if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
Mallikarjuna R Chilakala8620a102009-09-01 13:49:35 +00004653 hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
4654 if (hw->mac.ops.setup_link)
4655 hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004656 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4657 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
4658}
4659
4660/**
4661 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
4662 * @work: pointer to work_struct containing our data
4663 **/
4664static void ixgbe_sfp_config_module_task(struct work_struct *work)
4665{
4666 struct ixgbe_adapter *adapter = container_of(work,
4667 struct ixgbe_adapter,
4668 sfp_config_module_task);
4669 struct ixgbe_hw *hw = &adapter->hw;
4670 u32 err;
4671
4672 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004673
4674 /* Time for electrical oscillations to settle down */
4675 msleep(100);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004676 err = hw->phy.ops.identify_sfp(hw);
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004677
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004678 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00004679 dev_err(&adapter->pdev->dev, "failed to initialize because "
4680 "an unsupported SFP+ module type was detected.\n"
4681 "Reload the driver after installing a supported "
4682 "module.\n");
Don Skidmore63d6e1d2009-07-02 12:50:12 +00004683 unregister_netdev(adapter->netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004684 return;
4685 }
4686 hw->mac.ops.setup_sfp(hw);
4687
Tony Breeds8d1c3c02009-04-09 22:29:10 +00004688 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004689 /* This will also work for DA Twinax connections */
4690 schedule_work(&adapter->multispeed_fiber_task);
4691 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
4692}
4693
4694/**
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00004695 * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
4696 * @work: pointer to work_struct containing our data
4697 **/
4698static void ixgbe_fdir_reinit_task(struct work_struct *work)
4699{
4700 struct ixgbe_adapter *adapter = container_of(work,
4701 struct ixgbe_adapter,
4702 fdir_reinit_task);
4703 struct ixgbe_hw *hw = &adapter->hw;
4704 int i;
4705
4706 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4707 for (i = 0; i < adapter->num_tx_queues; i++)
4708 set_bit(__IXGBE_FDIR_INIT_DONE,
4709 &(adapter->tx_ring[i].reinit_state));
4710 } else {
4711 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4712 "ignored adding FDIR ATR filters \n");
4713 }
4714 /* Done FDIR Re-initialization, enable transmits */
4715 netif_tx_start_all_queues(adapter->netdev);
4716}
4717
4718/**
Alexander Duyck69888672008-09-11 20:05:39 -07004719 * ixgbe_watchdog_task - worker thread to bring link up
4720 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004721 **/
4722static void ixgbe_watchdog_task(struct work_struct *work)
4723{
4724 struct ixgbe_adapter *adapter = container_of(work,
4725 struct ixgbe_adapter,
4726 watchdog_task);
4727 struct net_device *netdev = adapter->netdev;
4728 struct ixgbe_hw *hw = &adapter->hw;
4729 u32 link_speed = adapter->link_speed;
4730 bool link_up = adapter->link_up;
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004731 int i;
4732 struct ixgbe_ring *tx_ring;
4733 int some_tx_pending = 0;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004734
4735 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
4736
4737 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4738 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004739 if (link_up) {
4740#ifdef CONFIG_DCB
4741 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4742 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004743 hw->mac.ops.fc_enable(hw, i);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004744 } else {
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004745 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004746 }
4747#else
Mallikarjuna R Chilakala620fa032009-06-04 11:11:13 +00004748 hw->mac.ops.fc_enable(hw, 0);
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004749#endif
4750 }
4751
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004752 if (link_up ||
4753 time_after(jiffies, (adapter->link_check_timeout +
4754 IXGBE_TRY_LINK_TIMEOUT))) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004755 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
Peter P Waskiewicz Jr264857b2009-05-17 12:35:16 +00004756 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004757 }
4758 adapter->link_up = link_up;
4759 adapter->link_speed = link_speed;
4760 }
Auke Kok9a799d72007-09-15 14:07:45 -07004761
4762 if (link_up) {
4763 if (!netif_carrier_ok(netdev)) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004764 bool flow_rx, flow_tx;
4765
4766 if (hw->mac.type == ixgbe_mac_82599EB) {
4767 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4768 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00004769 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
4770 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004771 } else {
4772 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4773 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Peter P Waskiewicz Jr078788b2009-07-16 15:50:32 +00004774 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
4775 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004776 }
4777
Jeff Kirshera46e5342008-11-27 00:22:21 -08004778 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
4779 "Flow Control: %s\n",
4780 netdev->name,
4781 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
4782 "10 Gbps" :
4783 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
4784 "1 Gbps" : "unknown speed")),
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004785 ((flow_rx && flow_tx) ? "RX/TX" :
4786 (flow_rx ? "RX" :
4787 (flow_tx ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07004788
4789 netif_carrier_on(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004790 } else {
4791 /* Force detection of hung controller */
4792 adapter->detect_tx_hung = true;
4793 }
4794 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004795 adapter->link_up = false;
4796 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004797 if (netif_carrier_ok(netdev)) {
Jeff Kirshera46e5342008-11-27 00:22:21 -08004798 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
4799 netdev->name);
Auke Kok9a799d72007-09-15 14:07:45 -07004800 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004801 }
4802 }
4803
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004804 if (!netif_carrier_ok(netdev)) {
4805 for (i = 0; i < adapter->num_tx_queues; i++) {
4806 tx_ring = &adapter->tx_ring[i];
4807 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4808 some_tx_pending = 1;
4809 break;
4810 }
4811 }
4812
4813 if (some_tx_pending) {
4814 /* We've lost link, so the controller stops DMA,
4815 * but we've got queued Tx work that's never going
4816 * to get done, so reset controller to flush Tx.
4817 * (Do the reset outside of interrupt context).
4818 */
4819 schedule_work(&adapter->reset_task);
4820 }
4821 }
4822
Auke Kok9a799d72007-09-15 14:07:45 -07004823 ixgbe_update_stats(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004824 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
Auke Kok9a799d72007-09-15 14:07:45 -07004825}
4826
Auke Kok9a799d72007-09-15 14:07:45 -07004827static int ixgbe_tso(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004828 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
4829 u32 tx_flags, u8 *hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07004830{
4831 struct ixgbe_adv_tx_context_desc *context_desc;
4832 unsigned int i;
4833 int err;
4834 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004835 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
4836 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07004837
4838 if (skb_is_gso(skb)) {
4839 if (skb_header_cloned(skb)) {
4840 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4841 if (err)
4842 return err;
4843 }
4844 l4len = tcp_hdrlen(skb);
4845 *hdr_len += l4len;
4846
Al Viro8327d002007-12-10 18:54:12 +00004847 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07004848 struct iphdr *iph = ip_hdr(skb);
4849 iph->tot_len = 0;
4850 iph->check = 0;
4851 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004852 iph->daddr, 0,
4853 IPPROTO_TCP,
4854 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004855 adapter->hw_tso_ctxt++;
4856 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
4857 ipv6_hdr(skb)->payload_len = 0;
4858 tcp_hdr(skb)->check =
4859 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004860 &ipv6_hdr(skb)->daddr,
4861 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004862 adapter->hw_tso6_ctxt++;
4863 }
4864
4865 i = tx_ring->next_to_use;
4866
4867 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4868 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4869
4870 /* VLAN MACLEN IPLEN */
4871 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4872 vlan_macip_lens |=
4873 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4874 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004875 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004876 *hdr_len += skb_network_offset(skb);
4877 vlan_macip_lens |=
4878 (skb_transport_header(skb) - skb_network_header(skb));
4879 *hdr_len +=
4880 (skb_transport_header(skb) - skb_network_header(skb));
4881 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4882 context_desc->seqnum_seed = 0;
4883
4884 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004885 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004886 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004887
Al Viro8327d002007-12-10 18:54:12 +00004888 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07004889 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4890 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
4891 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4892
4893 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004894 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07004895 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
4896 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004897 /* use index 1 for TSO */
4898 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004899 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4900
4901 tx_buffer_info->time_stamp = jiffies;
4902 tx_buffer_info->next_to_watch = i;
4903
4904 i++;
4905 if (i == tx_ring->count)
4906 i = 0;
4907 tx_ring->next_to_use = i;
4908
4909 return true;
4910 }
4911 return false;
4912}
4913
4914static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004915 struct ixgbe_ring *tx_ring,
4916 struct sk_buff *skb, u32 tx_flags)
Auke Kok9a799d72007-09-15 14:07:45 -07004917{
4918 struct ixgbe_adv_tx_context_desc *context_desc;
4919 unsigned int i;
4920 struct ixgbe_tx_buffer *tx_buffer_info;
4921 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4922
4923 if (skb->ip_summed == CHECKSUM_PARTIAL ||
4924 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
4925 i = tx_ring->next_to_use;
4926 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4927 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4928
4929 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4930 vlan_macip_lens |=
4931 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4932 vlan_macip_lens |= (skb_network_offset(skb) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004933 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004934 if (skb->ip_summed == CHECKSUM_PARTIAL)
4935 vlan_macip_lens |= (skb_transport_header(skb) -
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004936 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07004937
4938 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4939 context_desc->seqnum_seed = 0;
4940
4941 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004942 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004943
4944 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08004945 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004946 case cpu_to_be16(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07004947 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08004948 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4949 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004950 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004951 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4952 type_tucmd_mlhl |=
4953 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004954 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004955 case cpu_to_be16(ETH_P_IPV6):
Auke Kok41825d72008-02-12 15:20:33 -08004956 /* XXX what about other V6 headers?? */
4957 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4958 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004959 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004960 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4961 type_tucmd_mlhl |=
4962 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004963 break;
Auke Kok41825d72008-02-12 15:20:33 -08004964 default:
4965 if (unlikely(net_ratelimit())) {
4966 DPRINTK(PROBE, WARNING,
4967 "partial checksum but proto=%x!\n",
4968 skb->protocol);
4969 }
4970 break;
4971 }
Auke Kok9a799d72007-09-15 14:07:45 -07004972 }
4973
4974 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004975 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07004976 context_desc->mss_l4len_idx = 0;
4977
4978 tx_buffer_info->time_stamp = jiffies;
4979 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004980
Auke Kok9a799d72007-09-15 14:07:45 -07004981 adapter->hw_csum_tx_good++;
4982 i++;
4983 if (i == tx_ring->count)
4984 i = 0;
4985 tx_ring->next_to_use = i;
4986
4987 return true;
4988 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004989
Auke Kok9a799d72007-09-15 14:07:45 -07004990 return false;
4991}
4992
4993static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004994 struct ixgbe_ring *tx_ring,
Yi Zoueacd73f2009-05-13 13:11:06 +00004995 struct sk_buff *skb, u32 tx_flags,
4996 unsigned int first)
Auke Kok9a799d72007-09-15 14:07:45 -07004997{
4998 struct ixgbe_tx_buffer *tx_buffer_info;
Yi Zoueacd73f2009-05-13 13:11:06 +00004999 unsigned int len;
5000 unsigned int total = skb->len;
Auke Kok9a799d72007-09-15 14:07:45 -07005001 unsigned int offset = 0, size, count = 0, i;
5002 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
5003 unsigned int f;
Alexander Duyck44df32c2009-03-31 21:34:23 +00005004 dma_addr_t *map;
Auke Kok9a799d72007-09-15 14:07:45 -07005005
5006 i = tx_ring->next_to_use;
5007
Alexander Duyck44df32c2009-03-31 21:34:23 +00005008 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
5009 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
5010 return 0;
5011 }
5012
5013 map = skb_shinfo(skb)->dma_maps;
5014
Yi Zoueacd73f2009-05-13 13:11:06 +00005015 if (tx_flags & IXGBE_TX_FLAGS_FCOE)
5016 /* excluding fcoe_crc_eof for FCoE */
5017 total -= sizeof(struct fcoe_crc_eof);
5018
5019 len = min(skb_headlen(skb), total);
Auke Kok9a799d72007-09-15 14:07:45 -07005020 while (len) {
5021 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5022 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5023
5024 tx_buffer_info->length = size;
Eric Dumazet042a53a2009-06-05 04:04:16 +00005025 tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07005026 tx_buffer_info->time_stamp = jiffies;
5027 tx_buffer_info->next_to_watch = i;
5028
5029 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00005030 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07005031 offset += size;
5032 count++;
Alexander Duyck44df32c2009-03-31 21:34:23 +00005033
5034 if (len) {
5035 i++;
5036 if (i == tx_ring->count)
5037 i = 0;
5038 }
Auke Kok9a799d72007-09-15 14:07:45 -07005039 }
5040
5041 for (f = 0; f < nr_frags; f++) {
5042 struct skb_frag_struct *frag;
5043
5044 frag = &skb_shinfo(skb)->frags[f];
Yi Zoueacd73f2009-05-13 13:11:06 +00005045 len = min((unsigned int)frag->size, total);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005046 offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07005047
5048 while (len) {
Alexander Duyck44df32c2009-03-31 21:34:23 +00005049 i++;
5050 if (i == tx_ring->count)
5051 i = 0;
5052
Auke Kok9a799d72007-09-15 14:07:45 -07005053 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5054 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5055
5056 tx_buffer_info->length = size;
Eric Dumazet042a53a2009-06-05 04:04:16 +00005057 tx_buffer_info->dma = map[f] + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07005058 tx_buffer_info->time_stamp = jiffies;
5059 tx_buffer_info->next_to_watch = i;
5060
5061 len -= size;
Yi Zoueacd73f2009-05-13 13:11:06 +00005062 total -= size;
Auke Kok9a799d72007-09-15 14:07:45 -07005063 offset += size;
5064 count++;
Auke Kok9a799d72007-09-15 14:07:45 -07005065 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005066 if (total == 0)
5067 break;
Auke Kok9a799d72007-09-15 14:07:45 -07005068 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00005069
Auke Kok9a799d72007-09-15 14:07:45 -07005070 tx_ring->tx_buffer_info[i].skb = skb;
5071 tx_ring->tx_buffer_info[first].next_to_watch = i;
5072
5073 return count;
5074}
5075
5076static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005077 struct ixgbe_ring *tx_ring,
5078 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07005079{
5080 union ixgbe_adv_tx_desc *tx_desc = NULL;
5081 struct ixgbe_tx_buffer *tx_buffer_info;
5082 u32 olinfo_status = 0, cmd_type_len = 0;
5083 unsigned int i;
5084 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
5085
5086 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
5087
5088 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
5089
5090 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
5091 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
5092
5093 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
5094 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5095
5096 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005097 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005098
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07005099 /* use index 1 context for tso */
5100 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07005101 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
5102 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005103 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005104
5105 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
5106 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005107 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07005108
Yi Zoueacd73f2009-05-13 13:11:06 +00005109 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5110 olinfo_status |= IXGBE_ADVTXD_CC;
5111 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
5112 if (tx_flags & IXGBE_TX_FLAGS_FSO)
5113 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
5114 }
5115
Auke Kok9a799d72007-09-15 14:07:45 -07005116 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
5117
5118 i = tx_ring->next_to_use;
5119 while (count--) {
5120 tx_buffer_info = &tx_ring->tx_buffer_info[i];
5121 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
5122 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
5123 tx_desc->read.cmd_type_len =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005124 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07005125 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07005126 i++;
5127 if (i == tx_ring->count)
5128 i = 0;
5129 }
5130
5131 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
5132
5133 /*
5134 * Force memory writes to complete before letting h/w
5135 * know there are new descriptors to fetch. (Only
5136 * applicable for weak-ordered memory model archs,
5137 * such as IA-64).
5138 */
5139 wmb();
5140
5141 tx_ring->next_to_use = i;
5142 writel(i, adapter->hw.hw_addr + tx_ring->tail);
5143}
5144
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005145static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5146 int queue, u32 tx_flags)
5147{
5148 /* Right now, we support IPv4 only */
5149 struct ixgbe_atr_input atr_input;
5150 struct tcphdr *th;
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005151 struct iphdr *iph = ip_hdr(skb);
5152 struct ethhdr *eth = (struct ethhdr *)skb->data;
5153 u16 vlan_id, src_port, dst_port, flex_bytes;
5154 u32 src_ipv4_addr, dst_ipv4_addr;
5155 u8 l4type = 0;
5156
5157 /* check if we're UDP or TCP */
5158 if (iph->protocol == IPPROTO_TCP) {
5159 th = tcp_hdr(skb);
5160 src_port = th->source;
5161 dst_port = th->dest;
5162 l4type |= IXGBE_ATR_L4TYPE_TCP;
5163 /* l4type IPv4 type is 0, no need to assign */
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005164 } else {
5165 /* Unsupported L4 header, just bail here */
5166 return;
5167 }
5168
5169 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5170
5171 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5172 IXGBE_TX_FLAGS_VLAN_SHIFT;
5173 src_ipv4_addr = iph->saddr;
5174 dst_ipv4_addr = iph->daddr;
5175 flex_bytes = eth->h_proto;
5176
5177 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5178 ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5179 ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5180 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5181 ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5182 /* src and dst are inverted, think how the receiver sees them */
5183 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5184 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5185
5186 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5187 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5188}
5189
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005190static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005191 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005192{
5193 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5194
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005195 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005196 /* Herbert's original patch had:
5197 * smp_mb__after_netif_stop_queue();
5198 * but since that doesn't exist yet, just open code it. */
5199 smp_mb();
5200
5201 /* We need to check again in a case another CPU has just
5202 * made room available. */
5203 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
5204 return -EBUSY;
5205
5206 /* A reprieve! - use start_queue because it doesn't call schedule */
Jesse Brandeburgaf721662008-09-11 19:54:23 -07005207 netif_start_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005208 ++adapter->restart_queue;
5209 return 0;
5210}
5211
5212static int ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005213 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005214{
5215 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
5216 return 0;
5217 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
5218}
5219
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005220static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5221{
5222 struct ixgbe_adapter *adapter = netdev_priv(dev);
5223
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005224 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
5225 return smp_processor_id();
5226
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005227 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
Lucy Liu36e89d72009-08-05 13:06:34 -07005228 return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005229
5230 return skb_tx_hash(dev, skb);
5231}
5232
Stephen Hemminger3b29a562009-08-31 19:50:55 +00005233static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5234 struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07005235{
5236 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5237 struct ixgbe_ring *tx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07005238 unsigned int first;
5239 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005240 u8 hdr_len = 0;
5241 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07005242 int count = 0;
5243 unsigned int f;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005244
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005245 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5246 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08005247 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5248 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5249 tx_flags |= (skb->queue_mapping << 13);
5250 }
5251 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5252 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5253 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
Lucy Liu60127862009-07-22 14:07:33 +00005254 if (skb->priority != TC_PRIO_CONTROL) {
5255 tx_flags |= (skb->queue_mapping << 13);
5256 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5257 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5258 } else {
5259 skb->queue_mapping =
5260 adapter->ring_feature[RING_F_DCB].indices-1;
5261 }
Auke Kok9a799d72007-09-15 14:07:45 -07005262 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005263
Lucy Liu60127862009-07-22 14:07:33 +00005264 r_idx = skb->queue_mapping;
5265 tx_ring = &adapter->tx_ring[r_idx];
5266
Yi Zoueacd73f2009-05-13 13:11:06 +00005267 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
Yi Zou09ad1cc2009-09-03 14:56:10 +00005268 (skb->protocol == htons(ETH_P_FCOE))) {
Yi Zoueacd73f2009-05-13 13:11:06 +00005269 tx_flags |= IXGBE_TX_FLAGS_FCOE;
Yi Zou09ad1cc2009-09-03 14:56:10 +00005270#ifdef IXGBE_FCOE
5271 r_idx = smp_processor_id();
5272 r_idx &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
5273 r_idx += adapter->ring_feature[RING_F_FCOE].mask;
5274 tx_ring = &adapter->tx_ring[r_idx];
5275#endif
5276 }
Yi Zoueacd73f2009-05-13 13:11:06 +00005277 /* four things can cause us to need a context descriptor */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005278 if (skb_is_gso(skb) ||
5279 (skb->ip_summed == CHECKSUM_PARTIAL) ||
Yi Zoueacd73f2009-05-13 13:11:06 +00005280 (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
5281 (tx_flags & IXGBE_TX_FLAGS_FCOE))
Auke Kok9a799d72007-09-15 14:07:45 -07005282 count++;
5283
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07005284 count += TXD_USE_COUNT(skb_headlen(skb));
5285 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07005286 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5287
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08005288 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005289 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07005290 return NETDEV_TX_BUSY;
5291 }
Auke Kok9a799d72007-09-15 14:07:45 -07005292
Auke Kok9a799d72007-09-15 14:07:45 -07005293 first = tx_ring->next_to_use;
Yi Zoueacd73f2009-05-13 13:11:06 +00005294 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5295#ifdef IXGBE_FCOE
5296 /* setup tx offload for FCoE */
5297 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5298 if (tso < 0) {
5299 dev_kfree_skb_any(skb);
5300 return NETDEV_TX_OK;
5301 }
5302 if (tso)
5303 tx_flags |= IXGBE_TX_FLAGS_FSO;
5304#endif /* IXGBE_FCOE */
5305 } else {
5306 if (skb->protocol == htons(ETH_P_IP))
5307 tx_flags |= IXGBE_TX_FLAGS_IPV4;
5308 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5309 if (tso < 0) {
5310 dev_kfree_skb_any(skb);
5311 return NETDEV_TX_OK;
5312 }
5313
5314 if (tso)
5315 tx_flags |= IXGBE_TX_FLAGS_TSO;
5316 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
5317 (skb->ip_summed == CHECKSUM_PARTIAL))
5318 tx_flags |= IXGBE_TX_FLAGS_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07005319 }
5320
Yi Zoueacd73f2009-05-13 13:11:06 +00005321 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005322 if (count) {
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005323 /* add the ATR filter if ATR is on */
5324 if (tx_ring->atr_sample_rate) {
5325 ++tx_ring->atr_count;
5326 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5327 test_bit(__IXGBE_FDIR_INIT_DONE,
5328 &tx_ring->reinit_state)) {
5329 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5330 tx_flags);
5331 tx_ring->atr_count = 0;
5332 }
5333 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00005334 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
5335 hdr_len);
Alexander Duyck44df32c2009-03-31 21:34:23 +00005336 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07005337
Alexander Duyck44df32c2009-03-31 21:34:23 +00005338 } else {
5339 dev_kfree_skb_any(skb);
5340 tx_ring->tx_buffer_info[first].time_stamp = 0;
5341 tx_ring->next_to_use = first;
5342 }
Auke Kok9a799d72007-09-15 14:07:45 -07005343
5344 return NETDEV_TX_OK;
5345}
5346
5347/**
5348 * ixgbe_get_stats - Get System Network Statistics
5349 * @netdev: network interface device structure
5350 *
5351 * Returns the address of the device statistics structure.
5352 * The statistics are actually updated from the timer callback.
5353 **/
5354static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5355{
5356 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5357
5358 /* only return the current stats */
5359 return &adapter->net_stats;
5360}
5361
5362/**
5363 * ixgbe_set_mac - Change the Ethernet Address of the NIC
5364 * @netdev: network interface device structure
5365 * @p: pointer to an address structure
5366 *
5367 * Returns 0 on success, negative on failure
5368 **/
5369static int ixgbe_set_mac(struct net_device *netdev, void *p)
5370{
5371 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005372 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07005373 struct sockaddr *addr = p;
5374
5375 if (!is_valid_ether_addr(addr->sa_data))
5376 return -EADDRNOTAVAIL;
5377
5378 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005379 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07005380
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005381 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07005382
5383 return 0;
5384}
5385
Ben Hutchings6b73e102009-04-29 08:08:58 +00005386static int
5387ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
5388{
5389 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5390 struct ixgbe_hw *hw = &adapter->hw;
5391 u16 value;
5392 int rc;
5393
5394 if (prtad != hw->phy.mdio.prtad)
5395 return -EINVAL;
5396 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
5397 if (!rc)
5398 rc = value;
5399 return rc;
5400}
5401
5402static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
5403 u16 addr, u16 value)
5404{
5405 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5406 struct ixgbe_hw *hw = &adapter->hw;
5407
5408 if (prtad != hw->phy.mdio.prtad)
5409 return -EINVAL;
5410 return hw->phy.ops.write_reg(hw, addr, devad, value);
5411}
5412
5413static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5414{
5415 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5416
5417 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
5418}
5419
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005420/**
5421 * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00005422 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005423 * @netdev: network interface device structure
5424 *
5425 * Returns non-zero on failure
5426 **/
5427static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5428{
5429 int err = 0;
5430 struct ixgbe_adapter *adapter = netdev_priv(dev);
5431 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5432
5433 if (is_valid_ether_addr(mac->san_addr)) {
5434 rtnl_lock();
5435 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5436 rtnl_unlock();
5437 }
5438 return err;
5439}
5440
5441/**
5442 * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
Jiri Pirko31278e72009-06-17 01:12:19 +00005443 * netdev->dev_addrs
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005444 * @netdev: network interface device structure
5445 *
5446 * Returns non-zero on failure
5447 **/
5448static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5449{
5450 int err = 0;
5451 struct ixgbe_adapter *adapter = netdev_priv(dev);
5452 struct ixgbe_mac_info *mac = &adapter->hw.mac;
5453
5454 if (is_valid_ether_addr(mac->san_addr)) {
5455 rtnl_lock();
5456 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5457 rtnl_unlock();
5458 }
5459 return err;
5460}
5461
Auke Kok9a799d72007-09-15 14:07:45 -07005462#ifdef CONFIG_NET_POLL_CONTROLLER
5463/*
5464 * Polling 'interrupt' - used by things like netconsole to send skbs
5465 * without having to re-enable interrupts. It's not called while
5466 * the interrupt routine is executing.
5467 */
5468static void ixgbe_netpoll(struct net_device *netdev)
5469{
5470 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00005471 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07005472
Auke Kok9a799d72007-09-15 14:07:45 -07005473 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
Peter P Waskiewicz Jr8f9a7162009-07-30 12:25:09 +00005474 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5475 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5476 for (i = 0; i < num_q_vectors; i++) {
5477 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5478 ixgbe_msix_clean_many(0, q_vector);
5479 }
5480 } else {
5481 ixgbe_intr(adapter->pdev->irq, netdev);
5482 }
Auke Kok9a799d72007-09-15 14:07:45 -07005483 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
Auke Kok9a799d72007-09-15 14:07:45 -07005484}
5485#endif
5486
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005487static const struct net_device_ops ixgbe_netdev_ops = {
5488 .ndo_open = ixgbe_open,
5489 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08005490 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07005491 .ndo_select_queue = ixgbe_select_queue,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005492 .ndo_get_stats = ixgbe_get_stats,
Chris Leeche90d4002009-03-10 16:00:24 +00005493 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005494 .ndo_set_multicast_list = ixgbe_set_rx_mode,
5495 .ndo_validate_addr = eth_validate_addr,
5496 .ndo_set_mac_address = ixgbe_set_mac,
5497 .ndo_change_mtu = ixgbe_change_mtu,
5498 .ndo_tx_timeout = ixgbe_tx_timeout,
5499 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
5500 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5501 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00005502 .ndo_do_ioctl = ixgbe_ioctl,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005503#ifdef CONFIG_NET_POLL_CONTROLLER
5504 .ndo_poll_controller = ixgbe_netpoll,
5505#endif
Yi Zou332d4a72009-05-13 13:11:53 +00005506#ifdef IXGBE_FCOE
5507 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
5508 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
Yi Zou8450ff82009-08-31 12:32:14 +00005509 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5510 .ndo_fcoe_disable = ixgbe_fcoe_disable,
Yi Zou332d4a72009-05-13 13:11:53 +00005511#endif /* IXGBE_FCOE */
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005512};
5513
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005514/**
Auke Kok9a799d72007-09-15 14:07:45 -07005515 * ixgbe_probe - Device Initialization Routine
5516 * @pdev: PCI device information struct
5517 * @ent: entry in ixgbe_pci_tbl
5518 *
5519 * Returns 0 on success, negative on failure
5520 *
5521 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
5522 * The OS initialization, configuring of the adapter private structure,
5523 * and a hardware reset occur.
5524 **/
5525static int __devinit ixgbe_probe(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005526 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07005527{
5528 struct net_device *netdev;
5529 struct ixgbe_adapter *adapter = NULL;
5530 struct ixgbe_hw *hw;
5531 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07005532 static int cards_found;
5533 int i, err, pci_using_dac;
Yi Zoueacd73f2009-05-13 13:11:06 +00005534#ifdef IXGBE_FCOE
5535 u16 device_caps;
5536#endif
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005537 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07005538
gouji-new9ce77662009-05-06 10:44:45 +00005539 err = pci_enable_device_mem(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005540 if (err)
5541 return err;
5542
Yang Hongyang6a355282009-04-06 19:01:13 -07005543 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
5544 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07005545 pci_using_dac = 1;
5546 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07005547 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07005548 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07005549 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07005550 if (err) {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005551 dev_err(&pdev->dev, "No usable DMA "
5552 "configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005553 goto err_dma;
5554 }
5555 }
5556 pci_using_dac = 0;
5557 }
5558
gouji-new9ce77662009-05-06 10:44:45 +00005559 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
5560 IORESOURCE_MEM), ixgbe_driver_name);
Auke Kok9a799d72007-09-15 14:07:45 -07005561 if (err) {
gouji-new9ce77662009-05-06 10:44:45 +00005562 dev_err(&pdev->dev,
5563 "pci_request_selected_regions failed 0x%x\n", err);
Auke Kok9a799d72007-09-15 14:07:45 -07005564 goto err_pci_reg;
5565 }
5566
Frans Pop19d5afd2009-10-02 10:04:12 -07005567 pci_enable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005568
Auke Kok9a799d72007-09-15 14:07:45 -07005569 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07005570 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005571
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08005572 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07005573 if (!netdev) {
5574 err = -ENOMEM;
5575 goto err_alloc_etherdev;
5576 }
5577
Auke Kok9a799d72007-09-15 14:07:45 -07005578 SET_NETDEV_DEV(netdev, &pdev->dev);
5579
5580 pci_set_drvdata(pdev, netdev);
5581 adapter = netdev_priv(netdev);
5582
5583 adapter->netdev = netdev;
5584 adapter->pdev = pdev;
5585 hw = &adapter->hw;
5586 hw->back = adapter;
5587 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5588
Jeff Kirsher05857982008-09-11 19:57:00 -07005589 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5590 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07005591 if (!hw->hw_addr) {
5592 err = -EIO;
5593 goto err_ioremap;
5594 }
5595
5596 for (i = 1; i <= 5; i++) {
5597 if (pci_resource_len(pdev, i) == 0)
5598 continue;
5599 }
5600
Stephen Hemminger0edc3522008-11-19 22:24:29 -08005601 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07005602 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005603 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07005604 strcpy(netdev->name, pci_name(pdev));
5605
Auke Kok9a799d72007-09-15 14:07:45 -07005606 adapter->bd_number = cards_found;
5607
Auke Kok9a799d72007-09-15 14:07:45 -07005608 /* Setup hw api */
5609 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005610 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07005611
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005612 /* EEPROM */
5613 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
5614 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5615 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
5616 if (!(eec & (1 << 8)))
5617 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
5618
5619 /* PHY */
5620 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08005621 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00005622 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
5623 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
5624 hw->phy.mdio.mmds = 0;
5625 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
5626 hw->phy.mdio.dev = netdev;
5627 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
5628 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08005629
5630 /* set up this timer and work struct before calling get_invariants
5631 * which might start the timer
5632 */
5633 init_timer(&adapter->sfp_timer);
5634 adapter->sfp_timer.function = &ixgbe_sfp_timer;
5635 adapter->sfp_timer.data = (unsigned long) adapter;
5636
5637 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005638
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005639 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
5640 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
5641
5642 /* a new SFP+ module arrival, called from GPI SDP2 context */
5643 INIT_WORK(&adapter->sfp_config_module_task,
5644 ixgbe_sfp_config_module_task);
5645
Don Skidmore8ca783a2009-05-26 20:40:47 -07005646 ii->get_invariants(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07005647
5648 /* setup the private structure */
5649 err = ixgbe_sw_init(adapter);
5650 if (err)
5651 goto err_sw_init;
5652
Don Skidmorebf069c92009-05-07 10:39:54 +00005653 /*
5654 * If there is a fan on this device and it has failed log the
5655 * failure.
5656 */
5657 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5658 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5659 if (esdp & IXGBE_ESDP_SDP1)
5660 DPRINTK(PROBE, CRIT,
5661 "Fan has stopped, replace the adapter\n");
5662 }
5663
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005664 /* reset_hw fills in the perm_addr as well */
5665 err = hw->mac.ops.reset_hw(hw);
Don Skidmore8ca783a2009-05-26 20:40:47 -07005666 if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
5667 hw->mac.type == ixgbe_mac_82598EB) {
5668 /*
5669 * Start a kernel thread to watch for a module to arrive.
5670 * Only do this for 82598, since 82599 will generate
5671 * interrupts on module arrival.
5672 */
5673 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5674 mod_timer(&adapter->sfp_timer,
5675 round_jiffies(jiffies + (2 * HZ)));
5676 err = 0;
5677 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
Don Skidmore88d2b812009-06-30 11:43:55 +00005678 dev_err(&adapter->pdev->dev, "failed to initialize because "
5679 "an unsupported SFP+ module type was detected.\n"
5680 "Reload the driver after installing a supported "
5681 "module.\n");
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00005682 goto err_sw_init;
5683 } else if (err) {
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005684 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
5685 goto err_sw_init;
5686 }
5687
Auke Kok9a799d72007-09-15 14:07:45 -07005688 netdev->features = NETIF_F_SG |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005689 NETIF_F_IP_CSUM |
5690 NETIF_F_HW_VLAN_TX |
5691 NETIF_F_HW_VLAN_RX |
5692 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07005693
Jesse Brandeburge9990a92008-08-26 04:27:24 -07005694 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07005695 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07005696 netdev->features |= NETIF_F_TSO6;
Herbert Xu78b6f4c2009-01-18 21:49:45 -08005697 netdev->features |= NETIF_F_GRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07005698
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00005699 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5700 netdev->features |= NETIF_F_SCTP_CSUM;
5701
Jeff Kirsherad31c402008-06-05 04:05:30 -07005702 netdev->vlan_features |= NETIF_F_TSO;
5703 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07005704 netdev->vlan_features |= NETIF_F_IP_CSUM;
Alexander Duyckcd1da502009-08-25 04:47:50 +00005705 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07005706 netdev->vlan_features |= NETIF_F_SG;
5707
Alexander Duyck2f90b862008-11-20 20:52:10 -08005708 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5709 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5710
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08005711#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08005712 netdev->dcbnl_ops = &dcbnl_ops;
5713#endif
5714
Yi Zoueacd73f2009-05-13 13:11:06 +00005715#ifdef IXGBE_FCOE
Yi Zou0d551582009-07-22 14:07:12 +00005716 if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
Yi Zoueacd73f2009-05-13 13:11:06 +00005717 if (hw->mac.ops.get_device_caps) {
5718 hw->mac.ops.get_device_caps(hw, &device_caps);
Yi Zou0d551582009-07-22 14:07:12 +00005719 if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
5720 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
Yi Zoueacd73f2009-05-13 13:11:06 +00005721 }
5722 }
5723#endif /* IXGBE_FCOE */
Auke Kok9a799d72007-09-15 14:07:45 -07005724 if (pci_using_dac)
5725 netdev->features |= NETIF_F_HIGHDMA;
5726
Peter P Waskiewicz Jr0c19d6a2009-07-30 12:25:28 +00005727 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
Alexander Duyckf8212f92009-04-27 22:42:37 +00005728 netdev->features |= NETIF_F_LRO;
5729
Auke Kok9a799d72007-09-15 14:07:45 -07005730 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005731 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Auke Kok9a799d72007-09-15 14:07:45 -07005732 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
5733 err = -EIO;
5734 goto err_eeprom;
5735 }
5736
5737 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
5738 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
5739
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005740 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
5741 dev_err(&pdev->dev, "invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07005742 err = -EIO;
5743 goto err_eeprom;
5744 }
5745
5746 init_timer(&adapter->watchdog_timer);
5747 adapter->watchdog_timer.function = &ixgbe_watchdog;
5748 adapter->watchdog_timer.data = (unsigned long)adapter;
5749
5750 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07005751 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005752
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005753 err = ixgbe_init_interrupt_scheme(adapter);
5754 if (err)
5755 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07005756
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005757 switch (pdev->device) {
5758 case IXGBE_DEV_ID_82599_KX4:
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00005759 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
5760 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
Peter P Waskiewicz Jrbdf0a552009-06-04 11:09:58 +00005761 /* Enable ACPI wakeup in GRC */
5762 IXGBE_WRITE_REG(hw, IXGBE_GRC,
5763 (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005764 break;
5765 default:
5766 adapter->wol = 0;
5767 break;
5768 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005769 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5770
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00005771 /* pick up the PCI bus settings for reporting later */
5772 hw->mac.ops.get_bus_info(hw);
5773
Auke Kok9a799d72007-09-15 14:07:45 -07005774 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07005775 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005776 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
5777 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
5778 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
5779 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
5780 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005781 "Unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07005782 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005783 ixgbe_read_pba_num_generic(hw, &part_num);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005784 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
5785 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
5786 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
5787 (part_num >> 8), (part_num & 0xff));
5788 else
5789 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5790 hw->mac.type, hw->phy.type,
5791 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07005792
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005793 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Auke Kok0c254d82008-02-11 09:25:56 -08005794 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005795 "this card is not sufficient for optimal "
5796 "performance.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005797 dev_warn(&pdev->dev, "For optimal performance a x8 "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005798 "PCI-Express slot is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005799 }
5800
Peter P Waskiewicz Jr34b03682009-02-05 23:54:42 -08005801 /* save off EEPROM version number */
5802 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5803
Auke Kok9a799d72007-09-15 14:07:45 -07005804 /* reset the hardware with the new settings */
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00005805 err = hw->mac.ops.start_hw(hw);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005806
Peter P Waskiewicz Jr794caeb2009-06-04 16:02:24 +00005807 if (err == IXGBE_ERR_EEPROM_VERSION) {
5808 /* We are running on a pre-production device, log a warning */
5809 dev_warn(&pdev->dev, "This device is a pre-production "
5810 "adapter/LOM. Please be aware there may be issues "
5811 "associated with your hardware. If you are "
5812 "experiencing problems please contact your Intel or "
5813 "hardware representative who provided you with this "
5814 "hardware.\n");
5815 }
Auke Kok9a799d72007-09-15 14:07:45 -07005816 strcpy(netdev->name, "eth%d");
5817 err = register_netdev(netdev);
5818 if (err)
5819 goto err_register;
5820
Jesse Brandeburg54386462009-04-17 20:44:27 +00005821 /* carrier off reporting is important to ethtool even BEFORE open */
5822 netif_carrier_off(netdev);
5823
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005824 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5825 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5826 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
5827
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005828#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03005829 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005830 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005831 ixgbe_setup_dca(adapter);
5832 }
5833#endif
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005834 /* add san mac addr to netdev */
5835 ixgbe_add_sanmac_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005836
5837 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
5838 cards_found++;
5839 return 0;
5840
5841err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005842 ixgbe_release_hw_control(adapter);
Alexander Duyck7a921c92009-05-06 10:43:28 +00005843 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005844err_sw_init:
5845err_eeprom:
Donald Skidmorec4900be2008-11-20 21:11:42 -08005846 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5847 del_timer_sync(&adapter->sfp_timer);
5848 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005849 cancel_work_sync(&adapter->multispeed_fiber_task);
5850 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005851 iounmap(hw->hw_addr);
5852err_ioremap:
5853 free_netdev(netdev);
5854err_alloc_etherdev:
gouji-new9ce77662009-05-06 10:44:45 +00005855 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5856 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07005857err_pci_reg:
5858err_dma:
5859 pci_disable_device(pdev);
5860 return err;
5861}
5862
5863/**
5864 * ixgbe_remove - Device Removal Routine
5865 * @pdev: PCI device information struct
5866 *
5867 * ixgbe_remove is called by the PCI subsystem to alert the driver
5868 * that it should release a PCI device. The could be caused by a
5869 * Hot-Plug event, or because the driver is going to be removed from
5870 * memory.
5871 **/
5872static void __devexit ixgbe_remove(struct pci_dev *pdev)
5873{
5874 struct net_device *netdev = pci_get_drvdata(pdev);
5875 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5876
5877 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08005878 /* clear the module not found bit to make sure the worker won't
5879 * reschedule
5880 */
5881 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07005882 del_timer_sync(&adapter->watchdog_timer);
5883
Donald Skidmorec4900be2008-11-20 21:11:42 -08005884 del_timer_sync(&adapter->sfp_timer);
5885 cancel_work_sync(&adapter->watchdog_task);
5886 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005887 cancel_work_sync(&adapter->multispeed_fiber_task);
5888 cancel_work_sync(&adapter->sfp_config_module_task);
Peter P Waskiewicz Jrc4cf55e2009-06-04 16:01:43 +00005889 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5890 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5891 cancel_work_sync(&adapter->fdir_reinit_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005892 flush_scheduled_work();
5893
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005894#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005895 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5896 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5897 dca_remove_requester(&pdev->dev);
5898 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
5899 }
5900
5901#endif
Yi Zou332d4a72009-05-13 13:11:53 +00005902#ifdef IXGBE_FCOE
5903 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
5904 ixgbe_cleanup_fcoe(adapter);
5905
5906#endif /* IXGBE_FCOE */
PJ Waskiewicz0365e6e2009-05-17 12:32:25 +00005907
5908 /* remove the added san mac */
5909 ixgbe_del_sanmac_netdev(netdev);
5910
Donald Skidmorec4900be2008-11-20 21:11:42 -08005911 if (netdev->reg_state == NETREG_REGISTERED)
5912 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005913
Alexander Duyck7a921c92009-05-06 10:43:28 +00005914 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005915
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005916 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005917
5918 iounmap(adapter->hw.hw_addr);
gouji-new9ce77662009-05-06 10:44:45 +00005919 pci_release_selected_regions(pdev, pci_select_bars(pdev,
5920 IORESOURCE_MEM));
Auke Kok9a799d72007-09-15 14:07:45 -07005921
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005922 DPRINTK(PROBE, INFO, "complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005923
Auke Kok9a799d72007-09-15 14:07:45 -07005924 free_netdev(netdev);
5925
Frans Pop19d5afd2009-10-02 10:04:12 -07005926 pci_disable_pcie_error_reporting(pdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005927
Auke Kok9a799d72007-09-15 14:07:45 -07005928 pci_disable_device(pdev);
5929}
5930
5931/**
5932 * ixgbe_io_error_detected - called when PCI error is detected
5933 * @pdev: Pointer to PCI device
5934 * @state: The current pci connection state
5935 *
5936 * This function is called after a PCI bus error affecting
5937 * this device has been detected.
5938 */
5939static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005940 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07005941{
5942 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005943 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005944
5945 netif_device_detach(netdev);
5946
Breno Leitao3044b8d2009-05-06 10:44:26 +00005947 if (state == pci_channel_io_perm_failure)
5948 return PCI_ERS_RESULT_DISCONNECT;
5949
Auke Kok9a799d72007-09-15 14:07:45 -07005950 if (netif_running(netdev))
5951 ixgbe_down(adapter);
5952 pci_disable_device(pdev);
5953
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005954 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07005955 return PCI_ERS_RESULT_NEED_RESET;
5956}
5957
5958/**
5959 * ixgbe_io_slot_reset - called after the pci bus has been reset.
5960 * @pdev: Pointer to PCI device
5961 *
5962 * Restart the card from scratch, as if from a cold-boot.
5963 */
5964static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5965{
5966 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005967 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005968 pci_ers_result_t result;
5969 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005970
gouji-new9ce77662009-05-06 10:44:45 +00005971 if (pci_enable_device_mem(pdev)) {
Auke Kok9a799d72007-09-15 14:07:45 -07005972 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005973 "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005974 result = PCI_ERS_RESULT_DISCONNECT;
5975 } else {
5976 pci_set_master(pdev);
5977 pci_restore_state(pdev);
5978
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005979 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005980
5981 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00005982 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005983 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07005984 }
Auke Kok9a799d72007-09-15 14:07:45 -07005985
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005986 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5987 if (err) {
5988 dev_err(&pdev->dev,
5989 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
5990 /* non-fatal, continue */
5991 }
Auke Kok9a799d72007-09-15 14:07:45 -07005992
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005993 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07005994}
5995
5996/**
5997 * ixgbe_io_resume - called when traffic can start flowing again.
5998 * @pdev: Pointer to PCI device
5999 *
6000 * This callback is called when the error recovery driver tells us that
6001 * its OK to resume normal operation.
6002 */
6003static void ixgbe_io_resume(struct pci_dev *pdev)
6004{
6005 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08006006 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006007
6008 if (netif_running(netdev)) {
6009 if (ixgbe_up(adapter)) {
6010 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
6011 return;
6012 }
6013 }
6014
6015 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07006016}
6017
6018static struct pci_error_handlers ixgbe_err_handler = {
6019 .error_detected = ixgbe_io_error_detected,
6020 .slot_reset = ixgbe_io_slot_reset,
6021 .resume = ixgbe_io_resume,
6022};
6023
6024static struct pci_driver ixgbe_driver = {
6025 .name = ixgbe_driver_name,
6026 .id_table = ixgbe_pci_tbl,
6027 .probe = ixgbe_probe,
6028 .remove = __devexit_p(ixgbe_remove),
6029#ifdef CONFIG_PM
6030 .suspend = ixgbe_suspend,
6031 .resume = ixgbe_resume,
6032#endif
6033 .shutdown = ixgbe_shutdown,
6034 .err_handler = &ixgbe_err_handler
6035};
6036
6037/**
6038 * ixgbe_init_module - Driver Registration Routine
6039 *
6040 * ixgbe_init_module is the first routine called when the driver is
6041 * loaded. All it does is register with the PCI subsystem.
6042 **/
6043static int __init ixgbe_init_module(void)
6044{
6045 int ret;
6046 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
6047 ixgbe_driver_string, ixgbe_driver_version);
6048
6049 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
6050
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006051#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006052 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006053#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006054
Auke Kok9a799d72007-09-15 14:07:45 -07006055 ret = pci_register_driver(&ixgbe_driver);
6056 return ret;
6057}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006058
Auke Kok9a799d72007-09-15 14:07:45 -07006059module_init(ixgbe_init_module);
6060
6061/**
6062 * ixgbe_exit_module - Driver Exit Cleanup Routine
6063 *
6064 * ixgbe_exit_module is called just before the driver is removed
6065 * from memory.
6066 **/
6067static void __exit ixgbe_exit_module(void)
6068{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006069#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006070 dca_unregister_notify(&dca_notifier);
6071#endif
Auke Kok9a799d72007-09-15 14:07:45 -07006072 pci_unregister_driver(&ixgbe_driver);
6073}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006074
Jeff Garzik5dd2d332008-10-16 05:09:31 -04006075#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006076static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006077 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006078{
6079 int ret_val;
6080
6081 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07006082 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006083
6084 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
6085}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08006086
Alexander Duyckb4533682009-03-31 21:32:42 +00006087#endif /* CONFIG_IXGBE_DCA */
6088#ifdef DEBUG
6089/**
6090 * ixgbe_get_hw_dev_name - return device name string
6091 * used by hardware layer to print debugging information
6092 **/
6093char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
6094{
6095 struct ixgbe_adapter *adapter = hw->back;
6096 return adapter->netdev->name;
6097}
6098
6099#endif
Auke Kok9a799d72007-09-15 14:07:45 -07006100module_exit(ixgbe_exit_module);
6101
6102/* ixgbe_main.c */