blob: 165bfa600b4bf55c34f2d5f07208637442504694 [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -08004 Copyright(c) 1999 - 2009 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42
43#include "ixgbe.h"
44#include "ixgbe_common.h"
45
46char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070047static const char ixgbe_driver_string[] =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070048 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070049
Peter P Waskiewicz Jr04193052009-04-09 22:28:50 +000050#define DRV_VERSION "2.0.16-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070051const char ixgbe_driver_version[] = DRV_VERSION;
Peter P Waskiewicz Jr3efac5a2009-02-01 01:19:20 -080052static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070053
54static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070055 [board_82598] = &ixgbe_82598_info,
PJ Waskiewicze8e26352009-02-27 15:45:05 +000056 [board_82599] = &ixgbe_82599_info,
Auke Kok9a799d72007-09-15 14:07:45 -070057};
58
59/* ixgbe_pci_tbl - PCI Device ID Table
60 *
61 * Wildcard entries (PCI_ANY_ID) should come last
62 * Last entry must be all 0s
63 *
64 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
65 * Class, Class Mask, private data (not used) }
66 */
67static struct pci_device_id ixgbe_pci_tbl[] = {
Don Skidmore1e336d02009-01-26 20:57:51 -080068 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
69 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070070 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070071 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070072 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070073 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070074 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
75 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070076 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070077 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070078 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
79 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080080 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
81 board_82598 },
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
83 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070084 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
85 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080086 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
87 board_82598 },
Don Skidmore2f21bdd2009-02-01 01:18:23 -080088 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
89 board_82598 },
PJ Waskiewicze8e26352009-02-27 15:45:05 +000090 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
91 board_82599 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
93 board_82599 },
Auke Kok9a799d72007-09-15 14:07:45 -070094
95 /* required last entry */
96 {0, }
97};
98MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
99
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400100#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800101static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700102 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800103static struct notifier_block dca_notifier = {
104 .notifier_call = ixgbe_notify_dca,
105 .next = NULL,
106 .priority = 0
107};
108#endif
109
Auke Kok9a799d72007-09-15 14:07:45 -0700110MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
111MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
115#define DEFAULT_DEBUG_LEVEL_SHIFT 3
116
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800117static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
118{
119 u32 ctrl_ext;
120
121 /* Let firmware take over control of h/w */
122 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
123 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700124 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800125}
126
127static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
128{
129 u32 ctrl_ext;
130
131 /* Let firmware know the driver has taken over */
132 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
133 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700134 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800135}
Auke Kok9a799d72007-09-15 14:07:45 -0700136
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000137/*
138 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
139 * @adapter: pointer to adapter struct
140 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
141 * @queue: queue to map the corresponding interrupt to
142 * @msix_vector: the vector to map to the corresponding queue
143 *
144 */
145static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
146 u8 queue, u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700147{
148 u32 ivar, index;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000149 struct ixgbe_hw *hw = &adapter->hw;
150 switch (hw->mac.type) {
151 case ixgbe_mac_82598EB:
152 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
153 if (direction == -1)
154 direction = 0;
155 index = (((direction * 64) + queue) >> 2) & 0x1F;
156 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
157 ivar &= ~(0xFF << (8 * (queue & 0x3)));
158 ivar |= (msix_vector << (8 * (queue & 0x3)));
159 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
160 break;
161 case ixgbe_mac_82599EB:
162 if (direction == -1) {
163 /* other causes */
164 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
165 index = ((queue & 1) * 8);
166 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
167 ivar &= ~(0xFF << index);
168 ivar |= (msix_vector << index);
169 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
170 break;
171 } else {
172 /* tx or rx causes */
173 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
174 index = ((16 * (queue & 1)) + (8 * direction));
175 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
176 ivar &= ~(0xFF << index);
177 ivar |= (msix_vector << index);
178 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
179 break;
180 }
181 default:
182 break;
183 }
Auke Kok9a799d72007-09-15 14:07:45 -0700184}
185
186static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700187 struct ixgbe_tx_buffer
188 *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700189{
Alexander Duyck44df32c2009-03-31 21:34:23 +0000190 tx_buffer_info->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700191 if (tx_buffer_info->skb) {
Alexander Duyck44df32c2009-03-31 21:34:23 +0000192 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
193 DMA_TO_DEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700194 dev_kfree_skb_any(tx_buffer_info->skb);
195 tx_buffer_info->skb = NULL;
196 }
Alexander Duyck44df32c2009-03-31 21:34:23 +0000197 tx_buffer_info->time_stamp = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700198 /* tx_buffer_info must be completely set up in the transmit path */
199}
200
201static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700202 struct ixgbe_ring *tx_ring,
203 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700204{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700205 struct ixgbe_hw *hw = &adapter->hw;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700206
Auke Kok9a799d72007-09-15 14:07:45 -0700207 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700208 * check with the clearing of time_stamp and movement of eop */
Auke Kok9a799d72007-09-15 14:07:45 -0700209 adapter->detect_tx_hung = false;
Alexander Duyck44df32c2009-03-31 21:34:23 +0000210 if (tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700211 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
212 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
213 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700214 union ixgbe_adv_tx_desc *tx_desc;
215 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
Auke Kok9a799d72007-09-15 14:07:45 -0700216 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700217 " Tx Queue <%d>\n"
218 " TDH, TDT <%x>, <%x>\n"
Auke Kok9a799d72007-09-15 14:07:45 -0700219 " next_to_use <%x>\n"
220 " next_to_clean <%x>\n"
221 "tx_buffer_info[next_to_clean]\n"
222 " time_stamp <%lx>\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700223 " jiffies <%lx>\n",
224 tx_ring->queue_index,
Alexander Duyck44df32c2009-03-31 21:34:23 +0000225 IXGBE_READ_REG(hw, tx_ring->head),
226 IXGBE_READ_REG(hw, tx_ring->tail),
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700227 tx_ring->next_to_use, eop,
228 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700229 return true;
230 }
231
232 return false;
233}
234
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700235#define IXGBE_MAX_TXD_PWR 14
236#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800237
238/* Tx Descriptors needed, worst case */
239#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
240 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
241#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700242 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800243
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700244static void ixgbe_tx_timeout(struct net_device *netdev);
245
Auke Kok9a799d72007-09-15 14:07:45 -0700246/**
247 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
248 * @adapter: board private structure
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700249 * @tx_ring: tx ring to clean
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000250 *
251 * returns true if transmit work is done
Auke Kok9a799d72007-09-15 14:07:45 -0700252 **/
253static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700254 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700255{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700256 struct net_device *netdev = adapter->netdev;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800257 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
258 struct ixgbe_tx_buffer *tx_buffer_info;
259 unsigned int i, eop, count = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700260 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700261
262 i = tx_ring->next_to_clean;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800263 eop = tx_ring->tx_buffer_info[i].next_to_watch;
264 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
265
266 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000267 (count < tx_ring->work_limit)) {
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800268 bool cleaned = false;
269 for ( ; !cleaned; count++) {
270 struct sk_buff *skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700271 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
272 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800273 cleaned = (i == eop);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700274 skb = tx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700275
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800276 if (cleaned && skb) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800277 unsigned int segs, bytecount;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700278
279 /* gso_segs is currently only valid for tcp */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800280 segs = skb_shinfo(skb)->gso_segs ?: 1;
281 /* multiply data chunks by size of headers */
282 bytecount = ((segs - 1) * skb_headlen(skb)) +
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700283 skb->len;
284 total_packets += segs;
285 total_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800286 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700287
Auke Kok9a799d72007-09-15 14:07:45 -0700288 ixgbe_unmap_and_free_tx_resource(adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700289 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700290
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800291 tx_desc->wb.status = 0;
292
Auke Kok9a799d72007-09-15 14:07:45 -0700293 i++;
294 if (i == tx_ring->count)
295 i = 0;
296 }
297
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800298 eop = tx_ring->tx_buffer_info[i].next_to_watch;
299 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
300 }
301
Auke Kok9a799d72007-09-15 14:07:45 -0700302 tx_ring->next_to_clean = i;
303
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800304#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700305 if (unlikely(count && netif_carrier_ok(netdev) &&
306 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800307 /* Make sure that anybody stopping the queue after this
308 * sees the new next_to_clean.
309 */
310 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800311 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
312 !test_bit(__IXGBE_DOWN, &adapter->state)) {
313 netif_wake_subqueue(netdev, tx_ring->queue_index);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700314 ++adapter->restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800315 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800316 }
Auke Kok9a799d72007-09-15 14:07:45 -0700317
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700318 if (adapter->detect_tx_hung) {
319 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
320 /* schedule immediate reset if we believe we hung */
321 DPRINTK(PROBE, INFO,
322 "tx hang %d detected, resetting adapter\n",
323 adapter->tx_timeout_count + 1);
324 ixgbe_tx_timeout(adapter->netdev);
325 }
326 }
Auke Kok9a799d72007-09-15 14:07:45 -0700327
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700328 /* re-arm the interrupt */
Nelson, Shannon835462f2009-04-27 22:42:54 +0000329 if (count >= tx_ring->work_limit) {
330 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
332 tx_ring->v_idx);
333 else if (tx_ring->v_idx & 0xFFFFFFFF)
334 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0),
335 tx_ring->v_idx);
336 else
337 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1),
338 (tx_ring->v_idx >> 32));
339 }
340
Auke Kok9a799d72007-09-15 14:07:45 -0700341
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700342 tx_ring->total_bytes += total_bytes;
343 tx_ring->total_packets += total_packets;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700344 tx_ring->stats.packets += total_packets;
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -0800345 tx_ring->stats.bytes += total_bytes;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700346 adapter->net_stats.tx_bytes += total_bytes;
347 adapter->net_stats.tx_packets += total_packets;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +0000348 return (count < tx_ring->work_limit);
Auke Kok9a799d72007-09-15 14:07:45 -0700349}
350
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400351#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800352static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700353 struct ixgbe_ring *rx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800354{
355 u32 rxctrl;
356 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700357 int q = rx_ring - adapter->rx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800358
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700359 if (rx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800360 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000361 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
362 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
363 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
364 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
365 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
366 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
367 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
368 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800369 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
370 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
Don Skidmore15005a32009-01-19 16:54:13 -0800371 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
372 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000373 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800374 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700375 rx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800376 }
377 put_cpu();
378}
379
380static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700381 struct ixgbe_ring *tx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800382{
383 u32 txctrl;
384 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700385 int q = tx_ring - adapter->tx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800386
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700387 if (tx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800388 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000389 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
390 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
391 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
392 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
393 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
394 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
395 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
396 }
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800397 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
398 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700399 tx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800400 }
401 put_cpu();
402}
403
404static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
405{
406 int i;
407
408 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
409 return;
410
411 for (i = 0; i < adapter->num_tx_queues; i++) {
412 adapter->tx_ring[i].cpu = -1;
413 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
414 }
415 for (i = 0; i < adapter->num_rx_queues; i++) {
416 adapter->rx_ring[i].cpu = -1;
417 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
418 }
419}
420
421static int __ixgbe_notify_dca(struct device *dev, void *data)
422{
423 struct net_device *netdev = dev_get_drvdata(dev);
424 struct ixgbe_adapter *adapter = netdev_priv(netdev);
425 unsigned long event = *(unsigned long *)data;
426
427 switch (event) {
428 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700429 /* if we're already enabled, don't do it again */
430 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
431 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800432 /* Always use CB2 mode, difference is masked
433 * in the CB driver. */
434 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
Denis V. Lunev652f0932008-03-27 14:39:17 +0300435 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700436 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800437 ixgbe_setup_dca(adapter);
438 break;
439 }
440 /* Fall Through since DCA is disabled. */
441 case DCA_PROVIDER_REMOVE:
442 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
443 dca_remove_requester(dev);
444 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
445 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
446 }
447 break;
448 }
449
Denis V. Lunev652f0932008-03-27 14:39:17 +0300450 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800451}
452
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400453#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700454/**
455 * ixgbe_receive_skb - Send a completed packet up the stack
456 * @adapter: board private structure
457 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700458 * @status: hardware indication of status of receive
459 * @rx_ring: rx descriptor ring (for a specific queue) to setup
460 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700461 **/
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800462static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700463 struct sk_buff *skb, u8 status,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700464 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700465{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800466 struct ixgbe_adapter *adapter = q_vector->adapter;
467 struct napi_struct *napi = &q_vector->napi;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700468 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
469 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700470
Alexander Duyck7a921c92009-05-06 10:43:28 +0000471 skb_record_rx_queue(skb, q_vector->v_idx);
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000472 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
Alexander Duyck2f90b862008-11-20 20:52:10 -0800473 if (adapter->vlgrp && is_vlan && (tag != 0))
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800474 vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700475 else
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800476 napi_gro_receive(napi, skb);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700477 } else {
Alexander Duyck182ff8d2009-04-27 22:35:33 +0000478 if (adapter->vlgrp && is_vlan && (tag != 0))
479 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
480 else
481 netif_rx(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700482 }
483}
484
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800485/**
486 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
487 * @adapter: address of board private structure
488 * @status_err: hardware indication of status of receive
489 * @skb: skb currently being received and modified
490 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700491static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700492 u32 status_err, struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700493{
494 skb->ip_summed = CHECKSUM_NONE;
495
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700496 /* Rx csum disabled */
497 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700498 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800499
500 /* if IP and error */
501 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
502 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700503 adapter->hw_csum_rx_error++;
504 return;
505 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800506
507 if (!(status_err & IXGBE_RXD_STAT_L4CS))
508 return;
509
510 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
511 adapter->hw_csum_rx_error++;
512 return;
513 }
514
Auke Kok9a799d72007-09-15 14:07:45 -0700515 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800516 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700517 adapter->hw_csum_rx_good++;
518}
519
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000520static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
521 struct ixgbe_ring *rx_ring, u32 val)
522{
523 /*
524 * Force memory writes to complete before letting h/w
525 * know there are new descriptors to fetch. (Only
526 * applicable for weak-ordered memory model archs,
527 * such as IA-64).
528 */
529 wmb();
530 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
531}
532
Auke Kok9a799d72007-09-15 14:07:45 -0700533/**
534 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
535 * @adapter: address of board private structure
536 **/
537static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700538 struct ixgbe_ring *rx_ring,
539 int cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -0700540{
Auke Kok9a799d72007-09-15 14:07:45 -0700541 struct pci_dev *pdev = adapter->pdev;
542 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700543 struct ixgbe_rx_buffer *bi;
Auke Kok9a799d72007-09-15 14:07:45 -0700544 unsigned int i;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000545 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
Auke Kok9a799d72007-09-15 14:07:45 -0700546
547 i = rx_ring->next_to_use;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700548 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700549
550 while (cleaned_count--) {
551 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
552
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700553 if (!bi->page_dma &&
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700554 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700555 if (!bi->page) {
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700556 bi->page = alloc_page(GFP_ATOMIC);
557 if (!bi->page) {
558 adapter->alloc_rx_page_failed++;
559 goto no_buffers;
560 }
561 bi->page_offset = 0;
562 } else {
563 /* use a half page if we're re-using */
564 bi->page_offset ^= (PAGE_SIZE / 2);
Auke Kok9a799d72007-09-15 14:07:45 -0700565 }
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700566
567 bi->page_dma = pci_map_page(pdev, bi->page,
568 bi->page_offset,
569 (PAGE_SIZE / 2),
570 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700571 }
572
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700573 if (!bi->skb) {
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800574 struct sk_buff *skb;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000575 skb = netdev_alloc_skb(adapter->netdev, bufsz);
Auke Kok9a799d72007-09-15 14:07:45 -0700576
577 if (!skb) {
578 adapter->alloc_rx_buff_failed++;
579 goto no_buffers;
580 }
581
582 /*
583 * Make buffer alignment 2 beyond a 16 byte boundary
584 * this will result in a 16 byte aligned IP header after
585 * the 14 byte MAC header is removed
586 */
587 skb_reserve(skb, NET_IP_ALIGN);
588
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700589 bi->skb = skb;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000590 bi->dma = pci_map_single(pdev, skb->data, bufsz,
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700591 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700592 }
593 /* Refresh the desc even if buffer_addrs didn't change because
594 * each write-back erases this info. */
595 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700596 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
597 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700598 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700599 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700600 }
601
602 i++;
603 if (i == rx_ring->count)
604 i = 0;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700605 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700606 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700607
Auke Kok9a799d72007-09-15 14:07:45 -0700608no_buffers:
609 if (rx_ring->next_to_use != i) {
610 rx_ring->next_to_use = i;
611 if (i-- == 0)
612 i = (rx_ring->count - 1);
613
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000614 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
Auke Kok9a799d72007-09-15 14:07:45 -0700615 }
616}
617
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700618static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
619{
620 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
621}
622
623static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
624{
625 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
626}
627
Alexander Duyckf8212f92009-04-27 22:42:37 +0000628static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
629{
630 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
631 IXGBE_RXDADV_RSCCNT_MASK) >>
632 IXGBE_RXDADV_RSCCNT_SHIFT;
633}
634
635/**
636 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
637 * @skb: pointer to the last skb in the rsc queue
638 *
639 * This function changes a queue full of hw rsc buffers into a completed
640 * packet. It uses the ->prev pointers to find the first packet and then
641 * turns it into the frag list owner.
642 **/
643static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
644{
645 unsigned int frag_list_size = 0;
646
647 while (skb->prev) {
648 struct sk_buff *prev = skb->prev;
649 frag_list_size += skb->len;
650 skb->prev = NULL;
651 skb = prev;
652 }
653
654 skb_shinfo(skb)->frag_list = skb->next;
655 skb->next = NULL;
656 skb->len += frag_list_size;
657 skb->data_len += frag_list_size;
658 skb->truesize += frag_list_size;
659 return skb;
660}
661
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800662static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700663 struct ixgbe_ring *rx_ring,
664 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -0700665{
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800666 struct ixgbe_adapter *adapter = q_vector->adapter;
Auke Kok9a799d72007-09-15 14:07:45 -0700667 struct pci_dev *pdev = adapter->pdev;
668 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
669 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
670 struct sk_buff *skb;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000671 unsigned int i, rsc_count = 0;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700672 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700673 u16 hdr_info;
674 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700675 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800676 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700677
678 i = rx_ring->next_to_clean;
Auke Kok9a799d72007-09-15 14:07:45 -0700679 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
680 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
681 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700682
683 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700684 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700685 if (*work_done >= work_to_do)
686 break;
687 (*work_done)++;
688
689 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700690 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
691 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700692 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -0700693 if (hdr_info & IXGBE_RXDADV_SPH)
694 adapter->rx_hdr_split++;
695 if (len > IXGBE_RX_HDR_SIZE)
696 len = IXGBE_RX_HDR_SIZE;
697 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700698 } else {
Auke Kok9a799d72007-09-15 14:07:45 -0700699 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700700 }
Auke Kok9a799d72007-09-15 14:07:45 -0700701
702 cleaned = true;
703 skb = rx_buffer_info->skb;
704 prefetch(skb->data - NET_IP_ALIGN);
705 rx_buffer_info->skb = NULL;
706
707 if (len && !skb_shinfo(skb)->nr_frags) {
708 pci_unmap_single(pdev, rx_buffer_info->dma,
Jesse Brandeburg5ecc3612008-12-15 01:00:57 -0800709 rx_ring->rx_buf_len,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700710 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700711 skb_put(skb, len);
712 }
713
714 if (upper_len) {
715 pci_unmap_page(pdev, rx_buffer_info->page_dma,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700716 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700717 rx_buffer_info->page_dma = 0;
718 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700719 rx_buffer_info->page,
720 rx_buffer_info->page_offset,
721 upper_len);
722
723 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
724 (page_count(rx_buffer_info->page) != 1))
725 rx_buffer_info->page = NULL;
726 else
727 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -0700728
729 skb->len += upper_len;
730 skb->data_len += upper_len;
731 skb->truesize += upper_len;
732 }
733
734 i++;
735 if (i == rx_ring->count)
736 i = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700737
738 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
739 prefetch(next_rxd);
Auke Kok9a799d72007-09-15 14:07:45 -0700740 cleaned_count++;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000741
742 if (adapter->flags & IXGBE_FLAG_RSC_CAPABLE)
743 rsc_count = ixgbe_get_rsc_count(rx_desc);
744
745 if (rsc_count) {
746 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
747 IXGBE_RXDADV_NEXTP_SHIFT;
748 next_buffer = &rx_ring->rx_buffer_info[nextp];
749 rx_ring->rsc_count += (rsc_count - 1);
750 } else {
751 next_buffer = &rx_ring->rx_buffer_info[i];
752 }
753
Auke Kok9a799d72007-09-15 14:07:45 -0700754 if (staterr & IXGBE_RXD_STAT_EOP) {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000755 if (skb->prev)
756 skb = ixgbe_transform_rsc_queue(skb);
Auke Kok9a799d72007-09-15 14:07:45 -0700757 rx_ring->stats.packets++;
758 rx_ring->stats.bytes += skb->len;
759 } else {
Alexander Duyckf8212f92009-04-27 22:42:37 +0000760 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
761 rx_buffer_info->skb = next_buffer->skb;
762 rx_buffer_info->dma = next_buffer->dma;
763 next_buffer->skb = skb;
764 next_buffer->dma = 0;
765 } else {
766 skb->next = next_buffer->skb;
767 skb->next->prev = skb;
768 }
Auke Kok9a799d72007-09-15 14:07:45 -0700769 adapter->non_eop_descs++;
770 goto next_desc;
771 }
772
773 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
774 dev_kfree_skb_irq(skb);
775 goto next_desc;
776 }
777
778 ixgbe_rx_checksum(adapter, staterr, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800779
780 /* probably a little skewed due to removing CRC */
781 total_rx_bytes += skb->len;
782 total_rx_packets++;
783
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -0700784 skb->protocol = eth_type_trans(skb, adapter->netdev);
Herbert Xu78b6f4c2009-01-18 21:49:45 -0800785 ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700786
787next_desc:
788 rx_desc->wb.upper.status_error = 0;
789
790 /* return some buffers to hardware, one at a time is too slow */
791 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
792 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
793 cleaned_count = 0;
794 }
795
796 /* use prefetched values */
797 rx_desc = next_rxd;
Alexander Duyckf8212f92009-04-27 22:42:37 +0000798 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700799
800 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700801 }
802
Auke Kok9a799d72007-09-15 14:07:45 -0700803 rx_ring->next_to_clean = i;
804 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
805
806 if (cleaned_count)
807 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
808
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800809 rx_ring->total_packets += total_rx_packets;
810 rx_ring->total_bytes += total_rx_bytes;
811 adapter->net_stats.rx_bytes += total_rx_bytes;
812 adapter->net_stats.rx_packets += total_rx_packets;
813
Auke Kok9a799d72007-09-15 14:07:45 -0700814 return cleaned;
815}
816
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800817static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700818/**
819 * ixgbe_configure_msix - Configure MSI-X hardware
820 * @adapter: board private structure
821 *
822 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
823 * interrupts.
824 **/
825static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
826{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800827 struct ixgbe_q_vector *q_vector;
828 int i, j, q_vectors, v_idx, r_idx;
829 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700830
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800831 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
832
Jesse Brandeburg4df10462009-03-13 22:15:31 +0000833 /*
834 * Populate the IVAR table and set the ITR values to the
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800835 * corresponding register.
836 */
837 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +0000838 q_vector = adapter->q_vector[v_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800839 /* XXX for_each_bit(...) */
840 r_idx = find_first_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700841 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800842
843 for (i = 0; i < q_vector->rxr_count; i++) {
844 j = adapter->rx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000845 ixgbe_set_ivar(adapter, 0, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800846 r_idx = find_next_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700847 adapter->num_rx_queues,
848 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800849 }
850 r_idx = find_first_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700851 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800852
853 for (i = 0; i < q_vector->txr_count; i++) {
854 j = adapter->tx_ring[r_idx].reg_idx;
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000855 ixgbe_set_ivar(adapter, 1, j, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800856 r_idx = find_next_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700857 adapter->num_tx_queues,
858 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800859 }
860
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700861 /* if this is a tx only vector halve the interrupt rate */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800862 if (q_vector->txr_count && !q_vector->rxr_count)
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700863 q_vector->eitr = (adapter->eitr_param >> 1);
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000864 else if (q_vector->rxr_count)
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700865 /* rx only */
866 q_vector->eitr = adapter->eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800867
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000868 /*
Jesse Brandeburg4df10462009-03-13 22:15:31 +0000869 * since this is initial set up don't need to call
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000870 * ixgbe_write_eitr helper
871 */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800872 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700873 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
Auke Kok9a799d72007-09-15 14:07:45 -0700874 }
875
PJ Waskiewicze8e26352009-02-27 15:45:05 +0000876 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
877 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
878 v_idx);
879 else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
880 ixgbe_set_ivar(adapter, -1, 1, v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800881 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700882
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700883 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800884 mask = IXGBE_EIMS_ENABLE_MASK;
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700885 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800886 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -0700887}
888
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800889enum latency_range {
890 lowest_latency = 0,
891 low_latency = 1,
892 bulk_latency = 2,
893 latency_invalid = 255
894};
895
896/**
897 * ixgbe_update_itr - update the dynamic ITR value based on statistics
898 * @adapter: pointer to adapter
899 * @eitr: eitr setting (ints per sec) to give last timeslice
900 * @itr_setting: current throttle rate in ints/second
901 * @packets: the number of packets during this measurement interval
902 * @bytes: the number of bytes during this measurement interval
903 *
904 * Stores a new ITR value based on packets and byte
905 * counts during the last interrupt. The advantage of per interrupt
906 * computation is faster updates and more accurate ITR for the current
907 * traffic pattern. Constants in this function were computed
908 * based on theoretical maximum wire speed and thresholds were set based
909 * on testing data as well as attempting to minimize response time
910 * while increasing bulk throughput.
911 * this functionality is controlled by the InterruptThrottleRate module
912 * parameter (see ixgbe_param.c)
913 **/
914static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700915 u32 eitr, u8 itr_setting,
916 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800917{
918 unsigned int retval = itr_setting;
919 u32 timepassed_us;
920 u64 bytes_perint;
921
922 if (packets == 0)
923 goto update_itr_done;
924
925
926 /* simple throttlerate management
927 * 0-20MB/s lowest (100000 ints/s)
928 * 20-100MB/s low (20000 ints/s)
929 * 100-1249MB/s bulk (8000 ints/s)
930 */
931 /* what was last interrupt timeslice? */
932 timepassed_us = 1000000/eitr;
933 bytes_perint = bytes / timepassed_us; /* bytes/usec */
934
935 switch (itr_setting) {
936 case lowest_latency:
937 if (bytes_perint > adapter->eitr_low)
938 retval = low_latency;
939 break;
940 case low_latency:
941 if (bytes_perint > adapter->eitr_high)
942 retval = bulk_latency;
943 else if (bytes_perint <= adapter->eitr_low)
944 retval = lowest_latency;
945 break;
946 case bulk_latency:
947 if (bytes_perint <= adapter->eitr_high)
948 retval = low_latency;
949 break;
950 }
951
952update_itr_done:
953 return retval;
954}
955
Jesse Brandeburg509ee932009-03-13 22:13:28 +0000956/**
957 * ixgbe_write_eitr - write EITR register in hardware specific way
958 * @adapter: pointer to adapter struct
959 * @v_idx: vector index into q_vector array
960 * @itr_reg: new value to be written in *register* format, not ints/s
961 *
962 * This function is made to be called by ethtool and by the driver
963 * when it needs to update EITR registers at runtime. Hardware
964 * specific quirks/differences are taken care of here.
965 */
966void ixgbe_write_eitr(struct ixgbe_adapter *adapter, int v_idx, u32 itr_reg)
967{
968 struct ixgbe_hw *hw = &adapter->hw;
969 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
970 /* must write high and low 16 bits to reset counter */
971 itr_reg |= (itr_reg << 16);
972 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
973 /*
974 * set the WDIS bit to not clear the timer bits and cause an
975 * immediate assertion of the interrupt
976 */
977 itr_reg |= IXGBE_EITR_CNT_WDIS;
978 }
979 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
980}
981
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800982static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
983{
984 struct ixgbe_adapter *adapter = q_vector->adapter;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800985 u32 new_itr;
986 u8 current_itr, ret_itr;
Alexander Duyck7a921c92009-05-06 10:43:28 +0000987 int i, r_idx, v_idx = q_vector->v_idx;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800988 struct ixgbe_ring *rx_ring, *tx_ring;
989
990 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
991 for (i = 0; i < q_vector->txr_count; i++) {
992 tx_ring = &(adapter->tx_ring[r_idx]);
993 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700994 q_vector->tx_itr,
995 tx_ring->total_packets,
996 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800997 /* if the result for this queue would decrease interrupt
998 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700999 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001000 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001001 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001002 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001003 }
1004
1005 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1006 for (i = 0; i < q_vector->rxr_count; i++) {
1007 rx_ring = &(adapter->rx_ring[r_idx]);
1008 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001009 q_vector->rx_itr,
1010 rx_ring->total_packets,
1011 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001012 /* if the result for this queue would decrease interrupt
1013 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001014 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001015 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001016 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001017 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001018 }
1019
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001020 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001021
1022 switch (current_itr) {
1023 /* counts and packets in update_itr are dependent on these numbers */
1024 case lowest_latency:
1025 new_itr = 100000;
1026 break;
1027 case low_latency:
1028 new_itr = 20000; /* aka hwitr = ~200 */
1029 break;
1030 case bulk_latency:
1031 default:
1032 new_itr = 8000;
1033 break;
1034 }
1035
1036 if (new_itr != q_vector->eitr) {
1037 u32 itr_reg;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001038
1039 /* save the algorithm value here, not the smoothed one */
1040 q_vector->eitr = new_itr;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001041 /* do an exponential smoothing */
1042 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001043 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001044 ixgbe_write_eitr(adapter, v_idx, itr_reg);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001045 }
1046
1047 return;
1048}
1049
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001050static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1051{
1052 struct ixgbe_hw *hw = &adapter->hw;
1053
1054 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1055 (eicr & IXGBE_EICR_GPI_SDP1)) {
1056 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1057 /* write to clear the interrupt */
1058 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1059 }
1060}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001061
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001062static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1063{
1064 struct ixgbe_hw *hw = &adapter->hw;
1065
1066 if (eicr & IXGBE_EICR_GPI_SDP1) {
1067 /* Clear the interrupt */
1068 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1069 schedule_work(&adapter->multispeed_fiber_task);
1070 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1071 /* Clear the interrupt */
1072 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1073 schedule_work(&adapter->sfp_config_module_task);
1074 } else {
1075 /* Interrupt isn't for us... */
1076 return;
1077 }
1078}
1079
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001080static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1081{
1082 struct ixgbe_hw *hw = &adapter->hw;
1083
1084 adapter->lsc_int++;
1085 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1086 adapter->link_check_timeout = jiffies;
1087 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1088 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1089 schedule_work(&adapter->watchdog_task);
1090 }
1091}
1092
Auke Kok9a799d72007-09-15 14:07:45 -07001093static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1094{
1095 struct net_device *netdev = data;
1096 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1097 struct ixgbe_hw *hw = &adapter->hw;
Don Skidmore54037502009-02-21 15:42:56 -08001098 u32 eicr;
1099
1100 /*
1101 * Workaround for Silicon errata. Use clear-by-write instead
1102 * of clear-by-read. Reading with EICS will return the
1103 * interrupt causes without clearing, which later be done
1104 * with the write to EICR.
1105 */
1106 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1107 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
Auke Kok9a799d72007-09-15 14:07:45 -07001108
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001109 if (eicr & IXGBE_EICR_LSC)
1110 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001111
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001112 if (hw->mac.type == ixgbe_mac_82598EB)
1113 ixgbe_check_fan_failure(adapter, eicr);
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001114
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001115 if (hw->mac.type == ixgbe_mac_82599EB)
1116 ixgbe_check_sfp_event(adapter, eicr);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001117 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1118 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -07001119
1120 return IRQ_HANDLED;
1121}
1122
1123static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1124{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001125 struct ixgbe_q_vector *q_vector = data;
1126 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001127 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001128 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001129
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001130 if (!q_vector->txr_count)
1131 return IRQ_HANDLED;
1132
1133 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1134 for (i = 0; i < q_vector->txr_count; i++) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001135 tx_ring = &(adapter->tx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001136#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001137 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001138 ixgbe_update_tx_dca(adapter, tx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001139#endif
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001140 tx_ring->total_bytes = 0;
1141 tx_ring->total_packets = 0;
1142 ixgbe_clean_tx_irq(adapter, tx_ring);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001143 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001144 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001145 }
1146
Auke Kok9a799d72007-09-15 14:07:45 -07001147 return IRQ_HANDLED;
1148}
1149
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001150/**
1151 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1152 * @irq: unused
1153 * @data: pointer to our q_vector struct for this interrupt vector
1154 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001155static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1156{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001157 struct ixgbe_q_vector *q_vector = data;
1158 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001159 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001160 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001161 int i;
Auke Kok9a799d72007-09-15 14:07:45 -07001162
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001163 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001164 for (i = 0; i < q_vector->rxr_count; i++) {
1165 rx_ring = &(adapter->rx_ring[r_idx]);
1166 rx_ring->total_bytes = 0;
1167 rx_ring->total_packets = 0;
1168 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1169 r_idx + 1);
1170 }
1171
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001172 if (!q_vector->rxr_count)
1173 return IRQ_HANDLED;
1174
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001175 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001176 rx_ring = &(adapter->rx_ring[r_idx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001177 /* disable interrupts on this vector only */
Nelson, Shannon835462f2009-04-27 22:42:54 +00001178 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1179 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
1180 else if (rx_ring->v_idx & 0xFFFFFFFF)
1181 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), rx_ring->v_idx);
1182 else
1183 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1),
1184 (rx_ring->v_idx >> 32));
Ben Hutchings288379f2009-01-19 16:43:59 -08001185 napi_schedule(&q_vector->napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001186
Auke Kok9a799d72007-09-15 14:07:45 -07001187 return IRQ_HANDLED;
1188}
1189
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001190static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1191{
1192 ixgbe_msix_clean_rx(irq, data);
1193 ixgbe_msix_clean_tx(irq, data);
1194
1195 return IRQ_HANDLED;
1196}
1197
Nelson, Shannon835462f2009-04-27 22:42:54 +00001198static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1199 u64 qmask)
1200{
1201 u32 mask;
1202
1203 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1204 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1205 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1206 } else {
1207 mask = (qmask & 0xFFFFFFFF);
1208 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1209 mask = (qmask >> 32);
1210 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1211 }
1212 /* skip the flush */
1213}
1214
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001215/**
1216 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1217 * @napi: napi struct with our devices info in it
1218 * @budget: amount of work driver is allowed to do this pass, in packets
1219 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001220 * This function is optimized for cleaning one queue only on a single
1221 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001222 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001223static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1224{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001225 struct ixgbe_q_vector *q_vector =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001226 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001227 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001228 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001229 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001230 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001231
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001232 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001233 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001234#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001235 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001236 ixgbe_update_rx_dca(adapter, rx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001237#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001238
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001239 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001240
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001241 /* If all Rx work done, exit the polling mode */
1242 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001243 napi_complete(napi);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001244 if (adapter->itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001245 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001246 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00001247 ixgbe_irq_enable_queues(adapter, rx_ring->v_idx);
Auke Kok9a799d72007-09-15 14:07:45 -07001248 }
1249
1250 return work_done;
1251}
1252
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001253/**
1254 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1255 * @napi: napi struct with our devices info in it
1256 * @budget: amount of work driver is allowed to do this pass, in packets
1257 *
1258 * This function will clean more than one rx queue associated with a
1259 * q_vector.
1260 **/
1261static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1262{
1263 struct ixgbe_q_vector *q_vector =
1264 container_of(napi, struct ixgbe_q_vector, napi);
1265 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001266 struct ixgbe_ring *rx_ring = NULL;
1267 int work_done = 0, i;
1268 long r_idx;
Nelson, Shannon835462f2009-04-27 22:42:54 +00001269 u64 enable_mask = 0;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001270
1271 /* attempt to distribute budget to each queue fairly, but don't allow
1272 * the budget to go below 1 because we'll exit polling */
1273 budget /= (q_vector->rxr_count ?: 1);
1274 budget = max(budget, 1);
1275 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1276 for (i = 0; i < q_vector->rxr_count; i++) {
1277 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001278#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001279 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1280 ixgbe_update_rx_dca(adapter, rx_ring);
1281#endif
Herbert Xu78b6f4c2009-01-18 21:49:45 -08001282 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001283 enable_mask |= rx_ring->v_idx;
1284 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1285 r_idx + 1);
1286 }
1287
1288 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1289 rx_ring = &(adapter->rx_ring[r_idx]);
1290 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07001291 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08001292 napi_complete(napi);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001293 if (adapter->itr_setting & 1)
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001294 ixgbe_set_itr_msix(q_vector);
1295 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00001296 ixgbe_irq_enable_queues(adapter, enable_mask);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001297 return 0;
1298 }
1299
1300 return work_done;
1301}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001302static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001303 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07001304{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001305 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1306
1307 set_bit(r_idx, q_vector->rxr_idx);
1308 q_vector->rxr_count++;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001309 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1310}
Auke Kok9a799d72007-09-15 14:07:45 -07001311
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001312static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001313 int t_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001314{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001315 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1316
1317 set_bit(t_idx, q_vector->txr_idx);
1318 q_vector->txr_count++;
1319 a->tx_ring[t_idx].v_idx = 1 << v_idx;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001320}
Auke Kok9a799d72007-09-15 14:07:45 -07001321
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001322/**
1323 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1324 * @adapter: board private structure to initialize
1325 * @vectors: allotted vector count for descriptor rings
1326 *
1327 * This function maps descriptor rings to the queue-specific vectors
1328 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1329 * one vector per ring/queue, but on a constrained vector budget, we
1330 * group the rings as "efficiently" as possible. You would add new
1331 * mapping configurations in here.
1332 **/
1333static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001334 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001335{
1336 int v_start = 0;
1337 int rxr_idx = 0, txr_idx = 0;
1338 int rxr_remaining = adapter->num_rx_queues;
1339 int txr_remaining = adapter->num_tx_queues;
1340 int i, j;
1341 int rqpv, tqpv;
1342 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001343
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001344 /* No mapping required if MSI-X is disabled. */
1345 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001346 goto out;
1347
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001348 /*
1349 * The ideal configuration...
1350 * We have enough vectors to map one per queue.
1351 */
1352 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1353 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1354 map_vector_to_rxq(adapter, v_start, rxr_idx);
1355
1356 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1357 map_vector_to_txq(adapter, v_start, txr_idx);
1358
1359 goto out;
1360 }
1361
1362 /*
1363 * If we don't have enough vectors for a 1-to-1
1364 * mapping, we'll have to group them so there are
1365 * multiple queues per vector.
1366 */
1367 /* Re-adjusting *qpv takes care of the remainder. */
1368 for (i = v_start; i < vectors; i++) {
1369 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1370 for (j = 0; j < rqpv; j++) {
1371 map_vector_to_rxq(adapter, i, rxr_idx);
1372 rxr_idx++;
1373 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001374 }
Auke Kok9a799d72007-09-15 14:07:45 -07001375 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001376 for (i = v_start; i < vectors; i++) {
1377 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1378 for (j = 0; j < tqpv; j++) {
1379 map_vector_to_txq(adapter, i, txr_idx);
1380 txr_idx++;
1381 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001382 }
Auke Kok9a799d72007-09-15 14:07:45 -07001383 }
1384
Auke Kok9a799d72007-09-15 14:07:45 -07001385out:
Auke Kok9a799d72007-09-15 14:07:45 -07001386 return err;
1387}
1388
1389/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001390 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1391 * @adapter: board private structure
1392 *
1393 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1394 * interrupts from the kernel.
1395 **/
1396static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1397{
1398 struct net_device *netdev = adapter->netdev;
1399 irqreturn_t (*handler)(int, void *);
1400 int i, vector, q_vectors, err;
Robert Olssoncb13fc22008-11-25 16:43:52 -08001401 int ri=0, ti=0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001402
1403 /* Decrement for Other and TCP Timer vectors */
1404 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1405
1406 /* Map the Tx/Rx rings to the vectors we were allotted. */
1407 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1408 if (err)
1409 goto out;
1410
1411#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001412 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1413 &ixgbe_msix_clean_many)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001414 for (vector = 0; vector < q_vectors; vector++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001415 handler = SET_HANDLER(adapter->q_vector[vector]);
Robert Olssoncb13fc22008-11-25 16:43:52 -08001416
1417 if(handler == &ixgbe_msix_clean_rx) {
1418 sprintf(adapter->name[vector], "%s-%s-%d",
1419 netdev->name, "rx", ri++);
1420 }
1421 else if(handler == &ixgbe_msix_clean_tx) {
1422 sprintf(adapter->name[vector], "%s-%s-%d",
1423 netdev->name, "tx", ti++);
1424 }
1425 else
1426 sprintf(adapter->name[vector], "%s-%s-%d",
1427 netdev->name, "TxRx", vector);
1428
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001429 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001430 handler, 0, adapter->name[vector],
Alexander Duyck7a921c92009-05-06 10:43:28 +00001431 adapter->q_vector[vector]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001432 if (err) {
1433 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001434 "request_irq failed for MSIX interrupt "
1435 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001436 goto free_queue_irqs;
1437 }
1438 }
1439
1440 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1441 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001442 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001443 if (err) {
1444 DPRINTK(PROBE, ERR,
1445 "request_irq for msix_lsc failed: %d\n", err);
1446 goto free_queue_irqs;
1447 }
1448
1449 return 0;
1450
1451free_queue_irqs:
1452 for (i = vector - 1; i >= 0; i--)
1453 free_irq(adapter->msix_entries[--vector].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001454 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001455 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1456 pci_disable_msix(adapter->pdev);
1457 kfree(adapter->msix_entries);
1458 adapter->msix_entries = NULL;
1459out:
1460 return err;
1461}
1462
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001463static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1464{
Alexander Duyck7a921c92009-05-06 10:43:28 +00001465 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001466 u8 current_itr;
1467 u32 new_itr = q_vector->eitr;
1468 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1469 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1470
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001471 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001472 q_vector->tx_itr,
1473 tx_ring->total_packets,
1474 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001475 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001476 q_vector->rx_itr,
1477 rx_ring->total_packets,
1478 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001479
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001480 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001481
1482 switch (current_itr) {
1483 /* counts and packets in update_itr are dependent on these numbers */
1484 case lowest_latency:
1485 new_itr = 100000;
1486 break;
1487 case low_latency:
1488 new_itr = 20000; /* aka hwitr = ~200 */
1489 break;
1490 case bulk_latency:
1491 new_itr = 8000;
1492 break;
1493 default:
1494 break;
1495 }
1496
1497 if (new_itr != q_vector->eitr) {
1498 u32 itr_reg;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001499
1500 /* save the algorithm value here, not the smoothed one */
1501 q_vector->eitr = new_itr;
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001502 /* do an exponential smoothing */
1503 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001504 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001505 ixgbe_write_eitr(adapter, 0, itr_reg);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001506 }
1507
1508 return;
1509}
1510
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001511/**
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001512 * ixgbe_irq_enable - Enable default interrupt generation settings
1513 * @adapter: board private structure
1514 **/
1515static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1516{
1517 u32 mask;
Nelson, Shannon835462f2009-04-27 22:42:54 +00001518
1519 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
David S. Miller6ab33d52008-11-20 16:44:00 -08001520 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1521 mask |= IXGBE_EIMS_GPI_SDP1;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001522 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001523 mask |= IXGBE_EIMS_ECC;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001524 mask |= IXGBE_EIMS_GPI_SDP1;
1525 mask |= IXGBE_EIMS_GPI_SDP2;
1526 }
1527
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001528 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Nelson, Shannon835462f2009-04-27 22:42:54 +00001529 ixgbe_irq_enable_queues(adapter, ~0);
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001530 IXGBE_WRITE_FLUSH(&adapter->hw);
1531}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001532
1533/**
1534 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001535 * @irq: interrupt number
1536 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07001537 **/
1538static irqreturn_t ixgbe_intr(int irq, void *data)
1539{
1540 struct net_device *netdev = data;
1541 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1542 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck7a921c92009-05-06 10:43:28 +00001543 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
Auke Kok9a799d72007-09-15 14:07:45 -07001544 u32 eicr;
1545
Don Skidmore54037502009-02-21 15:42:56 -08001546 /*
1547 * Workaround for silicon errata. Mask the interrupts
1548 * before the read of EICR.
1549 */
1550 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1551
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001552 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1553 * therefore no explict interrupt disable is necessary */
1554 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001555 if (!eicr) {
1556 /* shared interrupt alert!
1557 * make sure interrupts are enabled because the read will
1558 * have disabled interrupts due to EIAM */
1559 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001560 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001561 }
Auke Kok9a799d72007-09-15 14:07:45 -07001562
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001563 if (eicr & IXGBE_EICR_LSC)
1564 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001565
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001566 if (hw->mac.type == ixgbe_mac_82599EB)
1567 ixgbe_check_sfp_event(adapter, eicr);
1568
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001569 ixgbe_check_fan_failure(adapter, eicr);
1570
Alexander Duyck7a921c92009-05-06 10:43:28 +00001571 if (napi_schedule_prep(&(q_vector->napi))) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001572 adapter->tx_ring[0].total_packets = 0;
1573 adapter->tx_ring[0].total_bytes = 0;
1574 adapter->rx_ring[0].total_packets = 0;
1575 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001576 /* would disable interrupts here but EIAM disabled it */
Alexander Duyck7a921c92009-05-06 10:43:28 +00001577 __napi_schedule(&(q_vector->napi));
Auke Kok9a799d72007-09-15 14:07:45 -07001578 }
1579
1580 return IRQ_HANDLED;
1581}
1582
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001583static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1584{
1585 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1586
1587 for (i = 0; i < q_vectors; i++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00001588 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001589 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1590 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1591 q_vector->rxr_count = 0;
1592 q_vector->txr_count = 0;
1593 }
1594}
1595
Auke Kok9a799d72007-09-15 14:07:45 -07001596/**
1597 * ixgbe_request_irq - initialize interrupts
1598 * @adapter: board private structure
1599 *
1600 * Attempts to configure interrupts using the best available
1601 * capabilities of the hardware and kernel.
1602 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001603static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001604{
1605 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001606 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001607
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001608 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1609 err = ixgbe_request_msix_irqs(adapter);
1610 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1611 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001612 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001613 } else {
1614 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001615 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001616 }
1617
Auke Kok9a799d72007-09-15 14:07:45 -07001618 if (err)
1619 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1620
Auke Kok9a799d72007-09-15 14:07:45 -07001621 return err;
1622}
1623
1624static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1625{
1626 struct net_device *netdev = adapter->netdev;
1627
1628 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001629 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001630
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001631 q_vectors = adapter->num_msix_vectors;
1632
1633 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001634 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001635
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001636 i--;
1637 for (; i >= 0; i--) {
1638 free_irq(adapter->msix_entries[i].vector,
Alexander Duyck7a921c92009-05-06 10:43:28 +00001639 adapter->q_vector[i]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001640 }
1641
1642 ixgbe_reset_q_vectors(adapter);
1643 } else {
1644 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001645 }
1646}
1647
1648/**
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001649 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1650 * @adapter: board private structure
1651 **/
1652static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1653{
Nelson, Shannon835462f2009-04-27 22:42:54 +00001654 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1655 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1656 } else {
1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1658 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001659 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001660 }
1661 IXGBE_WRITE_FLUSH(&adapter->hw);
1662 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1663 int i;
1664 for (i = 0; i < adapter->num_msix_vectors; i++)
1665 synchronize_irq(adapter->msix_entries[i].vector);
1666 } else {
1667 synchronize_irq(adapter->pdev->irq);
1668 }
1669}
1670
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00001671/**
Auke Kok9a799d72007-09-15 14:07:45 -07001672 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1673 *
1674 **/
1675static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1676{
Auke Kok9a799d72007-09-15 14:07:45 -07001677 struct ixgbe_hw *hw = &adapter->hw;
1678
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001679 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001680 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07001681
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001682 ixgbe_set_ivar(adapter, 0, 0, 0);
1683 ixgbe_set_ivar(adapter, 1, 0, 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001684
1685 map_vector_to_rxq(adapter, 0, 0);
1686 map_vector_to_txq(adapter, 0, 0);
1687
1688 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001689}
1690
1691/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001692 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001693 * @adapter: board private structure
1694 *
1695 * Configure the Tx unit of the MAC after a reset.
1696 **/
1697static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1698{
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08001699 u64 tdba;
Auke Kok9a799d72007-09-15 14:07:45 -07001700 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001701 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001702
1703 /* Setup the HW Tx Head and Tail descriptor pointers */
1704 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001705 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1706 j = ring->reg_idx;
1707 tdba = ring->dma;
1708 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001709 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
Yang Hongyang284901a2009-04-06 19:01:15 -07001710 (tdba & DMA_BIT_MASK(32)));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001711 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1712 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1713 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1714 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1715 adapter->tx_ring[i].head = IXGBE_TDH(j);
1716 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1717 /* Disable Tx Head Writeback RO bit, since this hoses
1718 * bookkeeping if things aren't delivered in order.
1719 */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001720 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001721 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001722 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001723 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001724 if (hw->mac.type == ixgbe_mac_82599EB) {
1725 /* We enable 8 traffic classes, DCB only */
1726 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1727 IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1728 IXGBE_MTQC_8TC_8TQ));
1729 }
Auke Kok9a799d72007-09-15 14:07:45 -07001730}
1731
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001732#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07001733
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001734static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1735{
1736 struct ixgbe_ring *rx_ring;
1737 u32 srrctl;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001738 int queue0 = 0;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001739 unsigned long mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001740
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001741 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1742 queue0 = index;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001743 } else {
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001744 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1745 queue0 = index & mask;
1746 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001747 }
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001748
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001749 rx_ring = &adapter->rx_ring[queue0];
1750
1751 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1752
1753 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1754 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1755
1756 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg32344a32009-02-24 16:37:31 -08001757 u16 bufsz = IXGBE_RXBUFFER_2048;
1758 /* grow the amount we can receive on large page machines */
1759 if (bufsz < (PAGE_SIZE / 2))
1760 bufsz = (PAGE_SIZE / 2);
1761 /* cap the bufsz at our largest descriptor size */
1762 bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz);
1763
1764 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001765 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1766 srrctl |= ((IXGBE_RX_HDR_SIZE <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001767 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1768 IXGBE_SRRCTL_BSIZEHDR_MASK);
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001769 } else {
1770 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1771
1772 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1773 srrctl |= IXGBE_RXBUFFER_2048 >>
1774 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1775 else
1776 srrctl |= rx_ring->rx_buf_len >>
1777 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1778 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001779
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1781}
1782
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001783/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001784 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001785 * @adapter: board private structure
1786 *
1787 * Configure the Rx unit of the MAC after a reset.
1788 **/
1789static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1790{
1791 u64 rdba;
1792 struct ixgbe_hw *hw = &adapter->hw;
1793 struct net_device *netdev = adapter->netdev;
1794 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001795 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07001796 u32 rdlen, rxctrl, rxcsum;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001797 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1798 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1799 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Auke Kok9a799d72007-09-15 14:07:45 -07001800 u32 fctrl, hlreg0;
Jesse Brandeburg509ee932009-03-13 22:13:28 +00001801 u32 reta = 0, mrqc = 0;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001802 u32 rdrxctl;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001803 u32 rscctrl;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001804 int rx_buf_len;
Auke Kok9a799d72007-09-15 14:07:45 -07001805
1806 /* Decide whether to use packet split mode or not */
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001807 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07001808
1809 /* Set the RX buffer length according to the mode */
1810 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001811 rx_buf_len = IXGBE_RX_HDR_SIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001812 if (hw->mac.type == ixgbe_mac_82599EB) {
1813 /* PSRTYPE must be initialized in 82599 */
1814 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1815 IXGBE_PSRTYPE_UDPHDR |
1816 IXGBE_PSRTYPE_IPV4HDR |
1817 IXGBE_PSRTYPE_IPV6HDR;
1818 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
1819 }
Auke Kok9a799d72007-09-15 14:07:45 -07001820 } else {
Alexander Duyckf8212f92009-04-27 22:42:37 +00001821 if (!(adapter->flags & IXGBE_FLAG_RSC_ENABLED) &&
1822 (netdev->mtu <= ETH_DATA_LEN))
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001823 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07001824 else
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001825 rx_buf_len = ALIGN(max_frame, 1024);
Auke Kok9a799d72007-09-15 14:07:45 -07001826 }
1827
1828 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1829 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001830 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001831 fctrl |= IXGBE_FCTRL_PMCF;
Auke Kok9a799d72007-09-15 14:07:45 -07001832 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1833
1834 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1835 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1836 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1837 else
1838 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1839 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1840
Auke Kok9a799d72007-09-15 14:07:45 -07001841 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1842 /* disable receives while setting up the descriptors */
1843 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1844 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1845
1846 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1847 * the Base and Length of the Rx Descriptor Ring */
1848 for (i = 0; i < adapter->num_rx_queues; i++) {
1849 rdba = adapter->rx_ring[i].dma;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001850 j = adapter->rx_ring[i].reg_idx;
Yang Hongyang284901a2009-04-06 19:01:15 -07001851 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001852 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1853 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1854 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1855 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1856 adapter->rx_ring[i].head = IXGBE_RDH(j);
1857 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1858 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001859
1860 ixgbe_configure_srrctl(adapter, j);
Auke Kok9a799d72007-09-15 14:07:45 -07001861 }
1862
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001863 if (hw->mac.type == ixgbe_mac_82598EB) {
1864 /*
1865 * For VMDq support of different descriptor types or
1866 * buffer sizes through the use of multiple SRRCTL
1867 * registers, RDRXCTL.MVMEN must be set to 1
1868 *
1869 * also, the manual doesn't mention it clearly but DCA hints
1870 * will only use queue 0's tags unless this bit is set. Side
1871 * effects of setting this bit are only that SRRCTL must be
1872 * fully programmed [0..15]
1873 */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001874 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1875 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1876 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
Alexander Duyck2f90b862008-11-20 20:52:10 -08001877 }
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001878
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001879 /* Program MRQC for the distribution of queues */
1880 if (hw->mac.type == ixgbe_mac_82599EB) {
1881 int mask = adapter->flags & (
1882 IXGBE_FLAG_RSS_ENABLED
1883 | IXGBE_FLAG_DCB_ENABLED
1884 );
1885
1886 switch (mask) {
1887 case (IXGBE_FLAG_RSS_ENABLED):
1888 mrqc = IXGBE_MRQC_RSSEN;
1889 break;
1890 case (IXGBE_FLAG_DCB_ENABLED):
1891 mrqc = IXGBE_MRQC_RT8TCEN;
1892 break;
1893 default:
1894 break;
1895 }
1896 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001897 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07001898 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001899 for (i = 0, j = 0; i < 128; i++, j++) {
1900 if (j == adapter->ring_feature[RING_F_RSS].indices)
1901 j = 0;
1902 /* reta = 4-byte sliding window of
1903 * 0x00..(indices-1)(indices-1)00..etc. */
1904 reta = (reta << 8) | (j * 0x11);
1905 if ((i & 3) == 3)
1906 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07001907 }
1908
1909 /* Fill out hash function seeds */
1910 for (i = 0; i < 10; i++)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001911 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07001912
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001913 if (hw->mac.type == ixgbe_mac_82598EB)
1914 mrqc |= IXGBE_MRQC_RSSEN;
Auke Kok9a799d72007-09-15 14:07:45 -07001915 /* Perform hash on these packet types */
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001916 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
1917 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1918 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1919 | IXGBE_MRQC_RSS_FIELD_IPV6
1920 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1921 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
Auke Kok9a799d72007-09-15 14:07:45 -07001922 }
Jesse Brandeburg2a41ff82009-03-13 22:14:30 +00001923 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001924
1925 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1926
1927 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1928 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1929 /* Disable indicating checksum in descriptor, enables
1930 * RSS hash */
1931 rxcsum |= IXGBE_RXCSUM_PCSD;
1932 }
1933 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1934 /* Enable IPv4 payload checksum for UDP fragments
1935 * if PCSD is not set */
1936 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1937 }
1938
1939 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001940
1941 if (hw->mac.type == ixgbe_mac_82599EB) {
1942 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1943 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
Alexander Duyckf8212f92009-04-27 22:42:37 +00001944 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00001945 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1946 }
Alexander Duyckf8212f92009-04-27 22:42:37 +00001947
1948 if (adapter->flags & IXGBE_FLAG_RSC_ENABLED) {
1949 /* Enable 82599 HW-RSC */
1950 for (i = 0; i < adapter->num_rx_queues; i++) {
1951 j = adapter->rx_ring[i].reg_idx;
1952 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
1953 rscctrl |= IXGBE_RSCCTL_RSCEN;
1954 /*
1955 * if packet split is enabled we can only support up
1956 * to max frags + 1 descriptors.
1957 */
1958 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
1959#if (MAX_SKB_FRAGS < 3)
1960 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
1961#elif (MAX_SKB_FRAGS < 7)
1962 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
1963#elif (MAX_SKB_FRAGS < 15)
1964 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
1965#else
1966 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1967#endif
1968 else
1969 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
1970 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
1971 }
1972 /* Disable RSC for ACK packets */
1973 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
1974 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
1975 }
Auke Kok9a799d72007-09-15 14:07:45 -07001976}
1977
Auke Kok9a799d72007-09-15 14:07:45 -07001978static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1979{
1980 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001981 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07001982
1983 /* add VID to filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001984 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
Auke Kok9a799d72007-09-15 14:07:45 -07001985}
1986
1987static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1988{
1989 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001990 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07001991
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001992 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1993 ixgbe_irq_disable(adapter);
1994
Auke Kok9a799d72007-09-15 14:07:45 -07001995 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001996
1997 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1998 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001999
2000 /* remove VID from filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002001 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
Auke Kok9a799d72007-09-15 14:07:45 -07002002}
2003
Don Skidmore068c89b2009-01-19 16:54:36 -08002004static void ixgbe_vlan_rx_register(struct net_device *netdev,
2005 struct vlan_group *grp)
2006{
2007 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2008 u32 ctrl;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002009 int i, j;
Don Skidmore068c89b2009-01-19 16:54:36 -08002010
2011 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2012 ixgbe_irq_disable(adapter);
2013 adapter->vlgrp = grp;
2014
2015 /*
2016 * For a DCB driver, always enable VLAN tag stripping so we can
2017 * still receive traffic from a DCB-enabled host even if we're
2018 * not in DCB mode.
2019 */
2020 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002021 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2022 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
Don Skidmore068c89b2009-01-19 16:54:36 -08002023 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2024 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002025 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2026 ctrl |= IXGBE_VLNCTRL_VFE;
2027 /* enable VLAN tag insert/strip */
2028 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2029 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2030 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2031 for (i = 0; i < adapter->num_rx_queues; i++) {
2032 j = adapter->rx_ring[i].reg_idx;
2033 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2034 ctrl |= IXGBE_RXDCTL_VME;
2035 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2036 }
Don Skidmore068c89b2009-01-19 16:54:36 -08002037 }
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002038 ixgbe_vlan_rx_add_vid(netdev, 0);
Don Skidmore068c89b2009-01-19 16:54:36 -08002039
2040 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2041 ixgbe_irq_enable(adapter);
2042}
2043
Auke Kok9a799d72007-09-15 14:07:45 -07002044static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2045{
2046 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2047
2048 if (adapter->vlgrp) {
2049 u16 vid;
2050 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2051 if (!vlan_group_get_device(adapter->vlgrp, vid))
2052 continue;
2053 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2054 }
2055 }
2056}
2057
Christopher Leech2c5645c2008-08-26 04:27:02 -07002058static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2059{
2060 struct dev_mc_list *mc_ptr;
2061 u8 *addr = *mc_addr_ptr;
2062 *vmdq = 0;
2063
2064 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2065 if (mc_ptr->next)
2066 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2067 else
2068 *mc_addr_ptr = NULL;
2069
2070 return addr;
2071}
2072
Auke Kok9a799d72007-09-15 14:07:45 -07002073/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07002074 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07002075 * @netdev: network interface device structure
2076 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07002077 * The set_rx_method entry point is called whenever the unicast/multicast
2078 * address list or the network interface flags are updated. This routine is
2079 * responsible for configuring the hardware for proper unicast, multicast and
2080 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07002081 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07002082static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07002083{
2084 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2085 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07002086 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002087 u8 *addr_list = NULL;
2088 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002089
2090 /* Check for Promiscuous and All Multicast modes */
2091
2092 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07002093 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07002094
2095 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07002096 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07002097 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07002098 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07002099 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07002100 if (netdev->flags & IFF_ALLMULTI) {
2101 fctrl |= IXGBE_FCTRL_MPE;
2102 fctrl &= ~IXGBE_FCTRL_UPE;
2103 } else {
2104 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2105 }
Alexander Duyck3d016252008-08-26 18:30:04 -07002106 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07002107 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002108 }
2109
2110 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07002111 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07002112
Christopher Leech2c5645c2008-08-26 04:27:02 -07002113 /* reprogram secondary unicast list */
2114 addr_count = netdev->uc_count;
2115 if (addr_count)
2116 addr_list = netdev->uc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002117 hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
2118 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07002119
Christopher Leech2c5645c2008-08-26 04:27:02 -07002120 /* reprogram multicast list */
2121 addr_count = netdev->mc_count;
2122 if (addr_count)
2123 addr_list = netdev->mc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002124 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2125 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07002126}
2127
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002128static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2129{
2130 int q_idx;
2131 struct ixgbe_q_vector *q_vector;
2132 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2133
2134 /* legacy and MSI only use one vector */
2135 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2136 q_vectors = 1;
2137
2138 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002139 struct napi_struct *napi;
Alexander Duyck7a921c92009-05-06 10:43:28 +00002140 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002141 if (!q_vector->rxr_count)
2142 continue;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07002143 napi = &q_vector->napi;
2144 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
2145 (q_vector->rxr_count > 1))
2146 napi->poll = &ixgbe_clean_rxonly_many;
2147
2148 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002149 }
2150}
2151
2152static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2153{
2154 int q_idx;
2155 struct ixgbe_q_vector *q_vector;
2156 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2157
2158 /* legacy and MSI only use one vector */
2159 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2160 q_vectors = 1;
2161
2162 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Alexander Duyck7a921c92009-05-06 10:43:28 +00002163 q_vector = adapter->q_vector[q_idx];
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002164 if (!q_vector->rxr_count)
2165 continue;
2166 napi_disable(&q_vector->napi);
2167 }
2168}
2169
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002170#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002171/*
2172 * ixgbe_configure_dcb - Configure DCB hardware
2173 * @adapter: ixgbe adapter struct
2174 *
2175 * This is called by the driver on open to configure the DCB hardware.
2176 * This is also called by the gennetlink interface when reconfiguring
2177 * the DCB state.
2178 */
2179static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2180{
2181 struct ixgbe_hw *hw = &adapter->hw;
2182 u32 txdctl, vlnctrl;
2183 int i, j;
2184
2185 ixgbe_dcb_check_config(&adapter->dcb_cfg);
2186 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2187 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2188
2189 /* reconfigure the hardware */
2190 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2191
2192 for (i = 0; i < adapter->num_tx_queues; i++) {
2193 j = adapter->tx_ring[i].reg_idx;
2194 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2195 /* PThresh workaround for Tx hang with DFP enabled. */
2196 txdctl |= 32;
2197 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2198 }
2199 /* Enable VLAN tag insert/strip */
2200 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002201 if (hw->mac.type == ixgbe_mac_82598EB) {
2202 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2203 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2204 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2205 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2206 vlnctrl |= IXGBE_VLNCTRL_VFE;
2207 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2208 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2209 for (i = 0; i < adapter->num_rx_queues; i++) {
2210 j = adapter->rx_ring[i].reg_idx;
2211 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2212 vlnctrl |= IXGBE_RXDCTL_VME;
2213 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2214 }
2215 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08002216 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2217}
2218
2219#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002220static void ixgbe_configure(struct ixgbe_adapter *adapter)
2221{
2222 struct net_device *netdev = adapter->netdev;
2223 int i;
2224
Christopher Leech2c5645c2008-08-26 04:27:02 -07002225 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002226
2227 ixgbe_restore_vlan(adapter);
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002228#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002229 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2230 netif_set_gso_max_size(netdev, 32768);
2231 ixgbe_configure_dcb(adapter);
2232 } else {
2233 netif_set_gso_max_size(netdev, 65536);
2234 }
2235#else
2236 netif_set_gso_max_size(netdev, 65536);
2237#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002238
2239 ixgbe_configure_tx(adapter);
2240 ixgbe_configure_rx(adapter);
2241 for (i = 0; i < adapter->num_rx_queues; i++)
2242 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002243 (adapter->rx_ring[i].count - 1));
Auke Kok9a799d72007-09-15 14:07:45 -07002244}
2245
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002246static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2247{
2248 switch (hw->phy.type) {
2249 case ixgbe_phy_sfp_avago:
2250 case ixgbe_phy_sfp_ftl:
2251 case ixgbe_phy_sfp_intel:
2252 case ixgbe_phy_sfp_unknown:
2253 case ixgbe_phy_tw_tyco:
2254 case ixgbe_phy_tw_unknown:
2255 return true;
2256 default:
2257 return false;
2258 }
2259}
2260
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002261/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002262 * ixgbe_sfp_link_config - set up SFP+ link
2263 * @adapter: pointer to private adapter struct
2264 **/
2265static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2266{
2267 struct ixgbe_hw *hw = &adapter->hw;
2268
2269 if (hw->phy.multispeed_fiber) {
2270 /*
2271 * In multispeed fiber setups, the device may not have
2272 * had a physical connection when the driver loaded.
2273 * If that's the case, the initial link configuration
2274 * couldn't get the MAC into 10G or 1G mode, so we'll
2275 * never have a link status change interrupt fire.
2276 * We need to try and force an autonegotiation
2277 * session, then bring up link.
2278 */
2279 hw->mac.ops.setup_sfp(hw);
2280 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2281 schedule_work(&adapter->multispeed_fiber_task);
2282 } else {
2283 /*
2284 * Direct Attach Cu and non-multispeed fiber modules
2285 * still need to be configured properly prior to
2286 * attempting link.
2287 */
2288 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2289 schedule_work(&adapter->sfp_config_module_task);
2290 }
2291}
2292
2293/**
2294 * ixgbe_non_sfp_link_config - set up non-SFP+ link
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002295 * @hw: pointer to private hardware struct
2296 *
2297 * Returns 0 on success, negative on failure
2298 **/
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002299static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002300{
2301 u32 autoneg;
2302 bool link_up = false;
2303 u32 ret = IXGBE_ERR_LINK_SETUP;
2304
2305 if (hw->mac.ops.check_link)
2306 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2307
2308 if (ret)
2309 goto link_cfg_out;
2310
2311 if (hw->mac.ops.get_link_capabilities)
2312 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
2313 &hw->mac.autoneg);
2314 if (ret)
2315 goto link_cfg_out;
2316
2317 if (hw->mac.ops.setup_link_speed)
2318 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002319link_cfg_out:
2320 return ret;
2321}
2322
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002323#define IXGBE_MAX_RX_DESC_POLL 10
2324static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2325 int rxr)
2326{
2327 int j = adapter->rx_ring[rxr].reg_idx;
2328 int k;
2329
2330 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2331 if (IXGBE_READ_REG(&adapter->hw,
2332 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2333 break;
2334 else
2335 msleep(1);
2336 }
2337 if (k >= IXGBE_MAX_RX_DESC_POLL) {
2338 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2339 "not set within the polling period\n", rxr);
2340 }
2341 ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2342 (adapter->rx_ring[rxr].count - 1));
2343}
2344
Auke Kok9a799d72007-09-15 14:07:45 -07002345static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2346{
2347 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07002348 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002349 int i, j = 0;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002350 int num_rx_rings = adapter->num_rx_queues;
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002351 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002352 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002353 u32 txdctl, rxdctl, mhadd;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002354 u32 dmatxctl;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002355 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07002356
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002357 ixgbe_get_hw_control(adapter);
2358
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002359 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2360 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002361 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2362 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002363 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
Auke Kok9a799d72007-09-15 14:07:45 -07002364 } else {
2365 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002366 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002367 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002368 /* XXX: to interrupt immediately for EICS writes, enable this */
2369 /* gpie |= IXGBE_GPIE_EIMEN; */
2370 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2371 }
2372
2373 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2374 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2375 * specifically only auto mask tx and rx interrupts */
2376 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07002377 }
2378
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002379 /* Enable fan failure interrupt if media type is copper */
2380 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2381 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2382 gpie |= IXGBE_SDP1_GPIEN;
2383 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2384 }
2385
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002386 if (hw->mac.type == ixgbe_mac_82599EB) {
2387 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2388 gpie |= IXGBE_SDP1_GPIEN;
2389 gpie |= IXGBE_SDP2_GPIEN;
2390 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2391 }
2392
Auke Kok9a799d72007-09-15 14:07:45 -07002393 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07002394 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2395 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2396 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2397
2398 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2399 }
2400
2401 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002402 j = adapter->tx_ring[i].reg_idx;
2403 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002404 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2405 txdctl |= (8 << 16);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002406 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2407 }
2408
2409 if (hw->mac.type == ixgbe_mac_82599EB) {
2410 /* DMATXCTL.EN must be set after all Tx queue config is done */
2411 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2412 dmatxctl |= IXGBE_DMATXCTL_TE;
2413 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2414 }
2415 for (i = 0; i < adapter->num_tx_queues; i++) {
2416 j = adapter->tx_ring[i].reg_idx;
2417 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Auke Kok9a799d72007-09-15 14:07:45 -07002418 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002419 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002420 }
2421
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002422 for (i = 0; i < num_rx_rings; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002423 j = adapter->rx_ring[i].reg_idx;
2424 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2425 /* enable PTHRESH=32 descriptors (half the internal cache)
2426 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2427 * this also removes a pesky rx_no_buffer_count increment */
2428 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07002429 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002430 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002431 if (hw->mac.type == ixgbe_mac_82599EB)
2432 ixgbe_rx_desc_queue_enable(adapter, i);
Auke Kok9a799d72007-09-15 14:07:45 -07002433 }
2434 /* enable all receives */
2435 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002436 if (hw->mac.type == ixgbe_mac_82598EB)
2437 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2438 else
2439 rxdctl |= IXGBE_RXCTRL_RXEN;
2440 hw->mac.ops.enable_rx_dma(hw, rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002441
2442 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2443 ixgbe_configure_msix(adapter);
2444 else
2445 ixgbe_configure_msi_and_legacy(adapter);
2446
2447 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002448 ixgbe_napi_enable_all(adapter);
2449
2450 /* clear any pending interrupts, may auto mask */
2451 IXGBE_READ_REG(hw, IXGBE_EICR);
2452
Auke Kok9a799d72007-09-15 14:07:45 -07002453 ixgbe_irq_enable(adapter);
2454
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002455 /*
2456 * For hot-pluggable SFP+ devices, a new SFP+ module may have
2457 * arrived before interrupts were enabled. We need to kick off
2458 * the SFP+ module setup first, then try to bring up link.
2459 * If we're not hot-pluggable SFP+, we just need to configure link
2460 * and bring it up.
2461 */
2462 err = hw->phy.ops.identify(hw);
2463 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2464 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
2465 ixgbe_down(adapter);
2466 return err;
2467 }
2468
2469 if (ixgbe_is_sfp(hw)) {
2470 ixgbe_sfp_link_config(adapter);
2471 } else {
2472 err = ixgbe_non_sfp_link_config(hw);
2473 if (err)
2474 DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2475 }
Peter P Waskiewicz Jr0ecc0612009-02-06 21:46:54 -08002476
Peter P Waskiewicz Jr1da100b2009-01-19 16:55:03 -08002477 /* enable transmits */
2478 netif_tx_start_all_queues(netdev);
2479
Auke Kok9a799d72007-09-15 14:07:45 -07002480 /* bring the link up in the watchdog, this could race with our first
2481 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002482 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2483 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07002484 mod_timer(&adapter->watchdog_timer, jiffies);
2485 return 0;
2486}
2487
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002488void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2489{
2490 WARN_ON(in_interrupt());
2491 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2492 msleep(1);
2493 ixgbe_down(adapter);
2494 ixgbe_up(adapter);
2495 clear_bit(__IXGBE_RESETTING, &adapter->state);
2496}
2497
Auke Kok9a799d72007-09-15 14:07:45 -07002498int ixgbe_up(struct ixgbe_adapter *adapter)
2499{
2500 /* hardware has been reset, we need to reload some things */
2501 ixgbe_configure(adapter);
2502
2503 return ixgbe_up_complete(adapter);
2504}
2505
2506void ixgbe_reset(struct ixgbe_adapter *adapter)
2507{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002508 struct ixgbe_hw *hw = &adapter->hw;
2509 if (hw->mac.ops.init_hw(hw))
2510 dev_err(&adapter->pdev->dev, "Hardware Error\n");
Auke Kok9a799d72007-09-15 14:07:45 -07002511
2512 /* reprogram the RAR[0] in case user changed it. */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002513 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07002514
2515}
2516
Auke Kok9a799d72007-09-15 14:07:45 -07002517/**
2518 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2519 * @adapter: board private structure
2520 * @rx_ring: ring to free buffers from
2521 **/
2522static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002523 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002524{
2525 struct pci_dev *pdev = adapter->pdev;
2526 unsigned long size;
2527 unsigned int i;
2528
2529 /* Free all the Rx ring sk_buffs */
2530
2531 for (i = 0; i < rx_ring->count; i++) {
2532 struct ixgbe_rx_buffer *rx_buffer_info;
2533
2534 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2535 if (rx_buffer_info->dma) {
2536 pci_unmap_single(pdev, rx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002537 rx_ring->rx_buf_len,
2538 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002539 rx_buffer_info->dma = 0;
2540 }
2541 if (rx_buffer_info->skb) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00002542 struct sk_buff *skb = rx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -07002543 rx_buffer_info->skb = NULL;
Alexander Duyckf8212f92009-04-27 22:42:37 +00002544 do {
2545 struct sk_buff *this = skb;
2546 skb = skb->prev;
2547 dev_kfree_skb(this);
2548 } while (skb);
Auke Kok9a799d72007-09-15 14:07:45 -07002549 }
2550 if (!rx_buffer_info->page)
2551 continue;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002552 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
2553 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002554 rx_buffer_info->page_dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002555 put_page(rx_buffer_info->page);
2556 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002557 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002558 }
2559
2560 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2561 memset(rx_ring->rx_buffer_info, 0, size);
2562
2563 /* Zero out the descriptor ring */
2564 memset(rx_ring->desc, 0, rx_ring->size);
2565
2566 rx_ring->next_to_clean = 0;
2567 rx_ring->next_to_use = 0;
2568
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002569 if (rx_ring->head)
2570 writel(0, adapter->hw.hw_addr + rx_ring->head);
2571 if (rx_ring->tail)
2572 writel(0, adapter->hw.hw_addr + rx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002573}
2574
2575/**
2576 * ixgbe_clean_tx_ring - Free Tx Buffers
2577 * @adapter: board private structure
2578 * @tx_ring: ring to be cleaned
2579 **/
2580static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002581 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002582{
2583 struct ixgbe_tx_buffer *tx_buffer_info;
2584 unsigned long size;
2585 unsigned int i;
2586
2587 /* Free all the Tx ring sk_buffs */
2588
2589 for (i = 0; i < tx_ring->count; i++) {
2590 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2591 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2592 }
2593
2594 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2595 memset(tx_ring->tx_buffer_info, 0, size);
2596
2597 /* Zero out the descriptor ring */
2598 memset(tx_ring->desc, 0, tx_ring->size);
2599
2600 tx_ring->next_to_use = 0;
2601 tx_ring->next_to_clean = 0;
2602
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00002603 if (tx_ring->head)
2604 writel(0, adapter->hw.hw_addr + tx_ring->head);
2605 if (tx_ring->tail)
2606 writel(0, adapter->hw.hw_addr + tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -07002607}
2608
2609/**
Auke Kok9a799d72007-09-15 14:07:45 -07002610 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2611 * @adapter: board private structure
2612 **/
2613static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2614{
2615 int i;
2616
2617 for (i = 0; i < adapter->num_rx_queues; i++)
2618 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2619}
2620
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002621/**
2622 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2623 * @adapter: board private structure
2624 **/
2625static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2626{
2627 int i;
2628
2629 for (i = 0; i < adapter->num_tx_queues; i++)
2630 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2631}
2632
Auke Kok9a799d72007-09-15 14:07:45 -07002633void ixgbe_down(struct ixgbe_adapter *adapter)
2634{
2635 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002636 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002637 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002638 u32 txdctl;
2639 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002640
2641 /* signal that we are down to the interrupt handler */
2642 set_bit(__IXGBE_DOWN, &adapter->state);
2643
2644 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002645 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2646 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07002647
2648 netif_tx_disable(netdev);
2649
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002650 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07002651 msleep(10);
2652
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002653 netif_tx_stop_all_queues(netdev);
2654
Auke Kok9a799d72007-09-15 14:07:45 -07002655 ixgbe_irq_disable(adapter);
2656
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002657 ixgbe_napi_disable_all(adapter);
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002658
Auke Kok9a799d72007-09-15 14:07:45 -07002659 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002660 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07002661
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002662 /* disable transmits in the hardware now that interrupts are off */
2663 for (i = 0; i < adapter->num_tx_queues; i++) {
2664 j = adapter->tx_ring[i].reg_idx;
2665 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2666 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2667 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2668 }
PJ Waskiewicz88512532009-03-13 22:15:10 +00002669 /* Disable the Tx DMA engine on 82599 */
2670 if (hw->mac.type == ixgbe_mac_82599EB)
2671 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
2672 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
2673 ~IXGBE_DMATXCTL_TE));
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002674
Auke Kok9a799d72007-09-15 14:07:45 -07002675 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002676
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002677#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002678 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2679 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2680 dca_remove_requester(&adapter->pdev->dev);
2681 }
2682
2683#endif
Paul Larson6f4a0e42008-06-24 17:00:56 -07002684 if (!pci_channel_offline(adapter->pdev))
2685 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002686 ixgbe_clean_all_tx_rings(adapter);
2687 ixgbe_clean_all_rx_rings(adapter);
2688
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002689#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002690 /* since we reset the hardware DCA settings were cleared */
2691 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2692 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2693 /* always use CB2 mode, difference is masked
2694 * in the CB driver */
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002695 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002696 ixgbe_setup_dca(adapter);
2697 }
2698#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002699}
2700
Auke Kok9a799d72007-09-15 14:07:45 -07002701/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002702 * ixgbe_poll - NAPI Rx polling callback
2703 * @napi: structure for representing this polling device
2704 * @budget: how many packets driver is allowed to clean
2705 *
2706 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07002707 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002708static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07002709{
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00002710 struct ixgbe_q_vector *q_vector =
2711 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002712 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00002713 int tx_clean_complete, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002714
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002715#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08002716 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2717 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2718 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2719 }
2720#endif
2721
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00002722 tx_clean_complete = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
Herbert Xu78b6f4c2009-01-18 21:49:45 -08002723 ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07002724
Jesse Brandeburg9a1a69ad2009-03-13 22:14:10 +00002725 if (!tx_clean_complete)
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002726 work_done = budget;
2727
David S. Miller53e52c72008-01-07 21:06:12 -08002728 /* If budget not fully consumed, exit the polling mode */
2729 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -08002730 napi_complete(napi);
Jesse Brandeburg509ee932009-03-13 22:13:28 +00002731 if (adapter->itr_setting & 1)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002732 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002733 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Nelson, Shannon835462f2009-04-27 22:42:54 +00002734 ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07002735 }
Auke Kok9a799d72007-09-15 14:07:45 -07002736 return work_done;
2737}
2738
2739/**
2740 * ixgbe_tx_timeout - Respond to a Tx Hang
2741 * @netdev: network interface device structure
2742 **/
2743static void ixgbe_tx_timeout(struct net_device *netdev)
2744{
2745 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2746
2747 /* Do the reset outside of interrupt context */
2748 schedule_work(&adapter->reset_task);
2749}
2750
2751static void ixgbe_reset_task(struct work_struct *work)
2752{
2753 struct ixgbe_adapter *adapter;
2754 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2755
Alexander Duyck2f90b862008-11-20 20:52:10 -08002756 /* If we're already down or resetting, just bail */
2757 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
2758 test_bit(__IXGBE_RESETTING, &adapter->state))
2759 return;
2760
Auke Kok9a799d72007-09-15 14:07:45 -07002761 adapter->tx_timeout_count++;
2762
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002763 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002764}
2765
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002766#ifdef CONFIG_IXGBE_DCB
2767static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002768{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002769 bool ret = false;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002770
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002771 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2772 adapter->ring_feature[RING_F_DCB].mask = 0x7 << 3;
2773 adapter->num_rx_queues =
2774 adapter->ring_feature[RING_F_DCB].indices;
2775 adapter->num_tx_queues =
2776 adapter->ring_feature[RING_F_DCB].indices;
2777 ret = true;
2778 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002779 ret = false;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002780 }
2781
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002782 return ret;
2783}
2784#endif
2785
Jesse Brandeburg4df10462009-03-13 22:15:31 +00002786/**
2787 * ixgbe_set_rss_queues: Allocate queues for RSS
2788 * @adapter: board private structure to initialize
2789 *
2790 * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
2791 * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
2792 *
2793 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002794static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
2795{
2796 bool ret = false;
2797
2798 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2799 adapter->ring_feature[RING_F_RSS].mask = 0xF;
2800 adapter->num_rx_queues =
2801 adapter->ring_feature[RING_F_RSS].indices;
2802 adapter->num_tx_queues =
2803 adapter->ring_feature[RING_F_RSS].indices;
2804 ret = true;
2805 } else {
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002806 ret = false;
2807 }
2808
2809 return ret;
2810}
2811
Jesse Brandeburg4df10462009-03-13 22:15:31 +00002812/*
2813 * ixgbe_set_num_queues: Allocate queues for device, feature dependant
2814 * @adapter: board private structure to initialize
2815 *
2816 * This is the top level queue allocation routine. The order here is very
2817 * important, starting with the "most" number of features turned on at once,
2818 * and ending with the smallest set of features. This way large combinations
2819 * can be allocated if they're turned on, and smaller combinations are the
2820 * fallthrough conditions.
2821 *
2822 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002823static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2824{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002825#ifdef CONFIG_IXGBE_DCB
2826 if (ixgbe_set_dcb_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07002827 goto done;
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002828
2829#endif
2830 if (ixgbe_set_rss_queues(adapter))
Wu Fengguangaf22ab12009-04-14 21:54:07 -07002831 goto done;
2832
2833 /* fallback to base case */
2834 adapter->num_rx_queues = 1;
2835 adapter->num_tx_queues = 1;
2836
2837done:
2838 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2839 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002840}
2841
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002842static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002843 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002844{
2845 int err, vector_threshold;
2846
2847 /* We'll want at least 3 (vector_threshold):
2848 * 1) TxQ[0] Cleanup
2849 * 2) RxQ[0] Cleanup
2850 * 3) Other (Link Status Change, etc.)
2851 * 4) TCP Timer (optional)
2852 */
2853 vector_threshold = MIN_MSIX_COUNT;
2854
2855 /* The more we get, the more we will assign to Tx/Rx Cleanup
2856 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2857 * Right now, we simply care about how many we'll get; we'll
2858 * set them up later while requesting irq's.
2859 */
2860 while (vectors >= vector_threshold) {
2861 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002862 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002863 if (!err) /* Success in acquiring all requested vectors. */
2864 break;
2865 else if (err < 0)
2866 vectors = 0; /* Nasty failure, quit now */
2867 else /* err == number of vectors we should try again with */
2868 vectors = err;
2869 }
2870
2871 if (vectors < vector_threshold) {
2872 /* Can't allocate enough MSI-X interrupts? Oh well.
2873 * This just means we'll go with either a single MSI
2874 * vector or fall back to legacy interrupts.
2875 */
2876 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2877 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2878 kfree(adapter->msix_entries);
2879 adapter->msix_entries = NULL;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002880 } else {
2881 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
Peter P Waskiewicz Jreb7f1392009-02-01 01:18:58 -08002882 /*
2883 * Adjust for only the vectors we'll use, which is minimum
2884 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
2885 * vectors we were allocated.
2886 */
2887 adapter->num_msix_vectors = min(vectors,
2888 adapter->max_msix_q_vectors + NON_Q_VECTORS);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002889 }
2890}
2891
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002892/**
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002893 * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002894 * @adapter: board private structure to initialize
2895 *
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002896 * Cache the descriptor ring offsets for RSS to the assigned rings.
2897 *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002898 **/
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002899static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002900{
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002901 int i;
2902 bool ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002903
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002904 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2905 for (i = 0; i < adapter->num_rx_queues; i++)
2906 adapter->rx_ring[i].reg_idx = i;
2907 for (i = 0; i < adapter->num_tx_queues; i++)
2908 adapter->tx_ring[i].reg_idx = i;
2909 ret = true;
2910 } else {
2911 ret = false;
2912 }
2913
2914 return ret;
2915}
2916
2917#ifdef CONFIG_IXGBE_DCB
2918/**
2919 * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
2920 * @adapter: board private structure to initialize
2921 *
2922 * Cache the descriptor ring offsets for DCB to the assigned rings.
2923 *
2924 **/
2925static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
2926{
2927 int i;
2928 bool ret = false;
2929 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2930
2931 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2932 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08002933 /* the number of queues is assumed to be symmetric */
2934 for (i = 0; i < dcb_i; i++) {
2935 adapter->rx_ring[i].reg_idx = i << 3;
2936 adapter->tx_ring[i].reg_idx = i << 2;
2937 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002938 ret = true;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002939 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
PJ Waskiewiczf92ef202009-04-16 15:00:20 +00002940 if (dcb_i == 8) {
2941 /*
2942 * Tx TC0 starts at: descriptor queue 0
2943 * Tx TC1 starts at: descriptor queue 32
2944 * Tx TC2 starts at: descriptor queue 64
2945 * Tx TC3 starts at: descriptor queue 80
2946 * Tx TC4 starts at: descriptor queue 96
2947 * Tx TC5 starts at: descriptor queue 104
2948 * Tx TC6 starts at: descriptor queue 112
2949 * Tx TC7 starts at: descriptor queue 120
2950 *
2951 * Rx TC0-TC7 are offset by 16 queues each
2952 */
2953 for (i = 0; i < 3; i++) {
2954 adapter->tx_ring[i].reg_idx = i << 5;
2955 adapter->rx_ring[i].reg_idx = i << 4;
2956 }
2957 for ( ; i < 5; i++) {
2958 adapter->tx_ring[i].reg_idx =
2959 ((i + 2) << 4);
2960 adapter->rx_ring[i].reg_idx = i << 4;
2961 }
2962 for ( ; i < dcb_i; i++) {
2963 adapter->tx_ring[i].reg_idx =
2964 ((i + 8) << 3);
2965 adapter->rx_ring[i].reg_idx = i << 4;
2966 }
2967
2968 ret = true;
2969 } else if (dcb_i == 4) {
2970 /*
2971 * Tx TC0 starts at: descriptor queue 0
2972 * Tx TC1 starts at: descriptor queue 64
2973 * Tx TC2 starts at: descriptor queue 96
2974 * Tx TC3 starts at: descriptor queue 112
2975 *
2976 * Rx TC0-TC3 are offset by 32 queues each
2977 */
2978 adapter->tx_ring[0].reg_idx = 0;
2979 adapter->tx_ring[1].reg_idx = 64;
2980 adapter->tx_ring[2].reg_idx = 96;
2981 adapter->tx_ring[3].reg_idx = 112;
2982 for (i = 0 ; i < dcb_i; i++)
2983 adapter->rx_ring[i].reg_idx = i << 5;
2984
2985 ret = true;
2986 } else {
2987 ret = false;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00002988 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002989 } else {
2990 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002991 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002992 } else {
2993 ret = false;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002994 }
Peter P Waskiewicz Jrbc971142009-02-05 23:53:59 -08002995
2996 return ret;
2997}
2998#endif
2999
3000/**
3001 * ixgbe_cache_ring_register - Descriptor ring to register mapping
3002 * @adapter: board private structure to initialize
3003 *
3004 * Once we know the feature-set enabled for the device, we'll cache
3005 * the register offset the descriptor ring is assigned to.
3006 *
3007 * Note, the order the various feature calls is important. It must start with
3008 * the "most" features enabled at the same time, then trickle down to the
3009 * least amount of features turned on at once.
3010 **/
3011static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3012{
3013 /* start with default case */
3014 adapter->rx_ring[0].reg_idx = 0;
3015 adapter->tx_ring[0].reg_idx = 0;
3016
3017#ifdef CONFIG_IXGBE_DCB
3018 if (ixgbe_cache_ring_dcb(adapter))
3019 return;
3020
3021#endif
3022 if (ixgbe_cache_ring_rss(adapter))
3023 return;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003024}
3025
Auke Kok9a799d72007-09-15 14:07:45 -07003026/**
3027 * ixgbe_alloc_queues - Allocate memory for all rings
3028 * @adapter: board private structure to initialize
3029 *
3030 * We allocate one ring per queue at run-time since we don't know the
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003031 * number of queues at compile-time. The polling_netdev array is
3032 * intended for Multiqueue, but should work fine with a single queue.
Auke Kok9a799d72007-09-15 14:07:45 -07003033 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003034static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07003035{
3036 int i;
3037
3038 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003039 sizeof(struct ixgbe_ring), GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07003040 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003041 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07003042
3043 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003044 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003045 if (!adapter->rx_ring)
3046 goto err_rx_ring_allocation;
3047
3048 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003049 adapter->tx_ring[i].count = adapter->tx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003050 adapter->tx_ring[i].queue_index = i;
3051 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003052
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003053 for (i = 0; i < adapter->num_rx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07003054 adapter->rx_ring[i].count = adapter->rx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003055 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07003056 }
3057
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003058 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003059
3060 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003061
3062err_rx_ring_allocation:
3063 kfree(adapter->tx_ring);
3064err_tx_ring_allocation:
3065 return -ENOMEM;
3066}
3067
3068/**
3069 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3070 * @adapter: board private structure to initialize
3071 *
3072 * Attempt to configure the interrupts using the best available
3073 * capabilities of the hardware and the kernel.
3074 **/
Al Virofeea6a52008-11-27 15:34:07 -08003075static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003076{
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003077 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003078 int err = 0;
3079 int vector, v_budget;
3080
3081 /*
3082 * It's easy to be greedy for MSI-X vectors, but it really
3083 * doesn't do us much good if we have a lot more vectors
3084 * than CPU's. So let's be conservative and only ask for
3085 * (roughly) twice the number of vectors as there are CPU's.
3086 */
3087 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003088 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003089
3090 /*
3091 * At the same time, hardware can only support a maximum of
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003092 * hw.mac->max_msix_vectors vectors. With features
3093 * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3094 * descriptor queues supported by our device. Thus, we cap it off in
3095 * those rare cases where the cpu count also exceeds our vector limit.
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003096 */
PJ Waskiewicz8be0e462009-03-31 21:34:05 +00003097 v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003098
3099 /* A failure in MSI-X entry allocation isn't fatal, but it does
3100 * mean we disable MSI-X capabilities of the adapter. */
3101 adapter->msix_entries = kcalloc(v_budget,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003102 sizeof(struct msix_entry), GFP_KERNEL);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003103 if (adapter->msix_entries) {
3104 for (vector = 0; vector < v_budget; vector++)
3105 adapter->msix_entries[vector].entry = vector;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003106
Alexander Duyck7a921c92009-05-06 10:43:28 +00003107 ixgbe_acquire_msix_vectors(adapter, v_budget);
3108
3109 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3110 goto out;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003111 }
3112
Alexander Duyck7a921c92009-05-06 10:43:28 +00003113 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3114 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
3115 ixgbe_set_num_queues(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003116
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003117 err = pci_enable_msi(adapter->pdev);
3118 if (!err) {
3119 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
3120 } else {
3121 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003122 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003123 /* reset err */
3124 err = 0;
3125 }
3126
3127out:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003128 return err;
3129}
3130
Alexander Duyck7a921c92009-05-06 10:43:28 +00003131/**
3132 * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
3133 * @adapter: board private structure to initialize
3134 *
3135 * We allocate one q_vector per queue interrupt. If allocation fails we
3136 * return -ENOMEM.
3137 **/
3138static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3139{
3140 int q_idx, num_q_vectors;
3141 struct ixgbe_q_vector *q_vector;
3142 int napi_vectors;
3143 int (*poll)(struct napi_struct *, int);
3144
3145 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3146 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3147 napi_vectors = adapter->num_rx_queues;
3148 poll = &ixgbe_clean_rxonly;
3149 } else {
3150 num_q_vectors = 1;
3151 napi_vectors = 1;
3152 poll = &ixgbe_poll;
3153 }
3154
3155 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3156 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
3157 if (!q_vector)
3158 goto err_out;
3159 q_vector->adapter = adapter;
3160 q_vector->v_idx = q_idx;
3161 q_vector->eitr = adapter->eitr_param;
3162 if (q_idx < napi_vectors)
3163 netif_napi_add(adapter->netdev, &q_vector->napi,
3164 (*poll), 64);
3165 adapter->q_vector[q_idx] = q_vector;
3166 }
3167
3168 return 0;
3169
3170err_out:
3171 while (q_idx) {
3172 q_idx--;
3173 q_vector = adapter->q_vector[q_idx];
3174 netif_napi_del(&q_vector->napi);
3175 kfree(q_vector);
3176 adapter->q_vector[q_idx] = NULL;
3177 }
3178 return -ENOMEM;
3179}
3180
3181/**
3182 * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
3183 * @adapter: board private structure to initialize
3184 *
3185 * This function frees the memory allocated to the q_vectors. In addition if
3186 * NAPI is enabled it will delete any references to the NAPI struct prior
3187 * to freeing the q_vector.
3188 **/
3189static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3190{
3191 int q_idx, num_q_vectors;
3192 int napi_vectors;
3193
3194 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3195 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3196 napi_vectors = adapter->num_rx_queues;
3197 } else {
3198 num_q_vectors = 1;
3199 napi_vectors = 1;
3200 }
3201
3202 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3203 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
3204
3205 adapter->q_vector[q_idx] = NULL;
3206 if (q_idx < napi_vectors)
3207 netif_napi_del(&q_vector->napi);
3208 kfree(q_vector);
3209 }
3210}
3211
Alexander Duyck2f90b862008-11-20 20:52:10 -08003212void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003213{
3214 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3215 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3216 pci_disable_msix(adapter->pdev);
3217 kfree(adapter->msix_entries);
3218 adapter->msix_entries = NULL;
3219 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
3220 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
3221 pci_disable_msi(adapter->pdev);
3222 }
3223 return;
3224}
3225
3226/**
3227 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
3228 * @adapter: board private structure to initialize
3229 *
3230 * We determine which interrupt scheme to use based on...
3231 * - Kernel support (MSI, MSI-X)
3232 * - which can be user-defined (via MODULE_PARAM)
3233 * - Hardware queue count (num_*_queues)
3234 * - defined by miscellaneous hardware support/features (RSS, etc.)
3235 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08003236int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003237{
3238 int err;
3239
3240 /* Number of supported queues */
3241 ixgbe_set_num_queues(adapter);
3242
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003243 err = ixgbe_set_interrupt_capability(adapter);
3244 if (err) {
3245 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
3246 goto err_set_interrupt;
3247 }
3248
Alexander Duyck7a921c92009-05-06 10:43:28 +00003249 err = ixgbe_alloc_q_vectors(adapter);
3250 if (err) {
3251 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
3252 "vectors\n");
3253 goto err_alloc_q_vectors;
3254 }
3255
3256 err = ixgbe_alloc_queues(adapter);
3257 if (err) {
3258 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
3259 goto err_alloc_queues;
3260 }
3261
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003262 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003263 "Tx Queue count = %u\n",
3264 (adapter->num_rx_queues > 1) ? "Enabled" :
3265 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003266
3267 set_bit(__IXGBE_DOWN, &adapter->state);
3268
3269 return 0;
3270
Alexander Duyck7a921c92009-05-06 10:43:28 +00003271err_alloc_queues:
3272 ixgbe_free_q_vectors(adapter);
3273err_alloc_q_vectors:
3274 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003275err_set_interrupt:
Alexander Duyck7a921c92009-05-06 10:43:28 +00003276 return err;
3277}
3278
3279/**
3280 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
3281 * @adapter: board private structure to clear interrupt scheme on
3282 *
3283 * We go through and clear interrupt specific resources and reset the structure
3284 * to pre-load conditions
3285 **/
3286void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3287{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003288 kfree(adapter->tx_ring);
3289 kfree(adapter->rx_ring);
Alexander Duyck7a921c92009-05-06 10:43:28 +00003290 adapter->tx_ring = NULL;
3291 adapter->rx_ring = NULL;
3292
3293 ixgbe_free_q_vectors(adapter);
3294 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003295}
3296
3297/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08003298 * ixgbe_sfp_timer - worker thread to find a missing module
3299 * @data: pointer to our adapter struct
3300 **/
3301static void ixgbe_sfp_timer(unsigned long data)
3302{
3303 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3304
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003305 /*
3306 * Do the sfp_timer outside of interrupt context due to the
Donald Skidmorec4900be2008-11-20 21:11:42 -08003307 * delays that sfp+ detection requires
3308 */
3309 schedule_work(&adapter->sfp_task);
3310}
3311
3312/**
3313 * ixgbe_sfp_task - worker thread to find a missing module
3314 * @work: pointer to work_struct containing our data
3315 **/
3316static void ixgbe_sfp_task(struct work_struct *work)
3317{
3318 struct ixgbe_adapter *adapter = container_of(work,
3319 struct ixgbe_adapter,
3320 sfp_task);
3321 struct ixgbe_hw *hw = &adapter->hw;
3322
3323 if ((hw->phy.type == ixgbe_phy_nl) &&
3324 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3325 s32 ret = hw->phy.ops.identify_sfp(hw);
3326 if (ret)
3327 goto reschedule;
3328 ret = hw->phy.ops.reset(hw);
3329 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3330 DPRINTK(PROBE, ERR, "failed to initialize because an "
3331 "unsupported SFP+ module type was detected.\n"
3332 "Reload the driver after installing a "
3333 "supported module.\n");
3334 unregister_netdev(adapter->netdev);
3335 } else {
3336 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
3337 hw->phy.sfp_type);
3338 }
3339 /* don't need this routine any more */
3340 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3341 }
3342 return;
3343reschedule:
3344 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
3345 mod_timer(&adapter->sfp_timer,
3346 round_jiffies(jiffies + (2 * HZ)));
3347}
3348
3349/**
Auke Kok9a799d72007-09-15 14:07:45 -07003350 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
3351 * @adapter: board private structure to initialize
3352 *
3353 * ixgbe_sw_init initializes the Adapter private data structure.
3354 * Fields are initialized based on PCI device information and
3355 * OS network device settings (MTU size).
3356 **/
3357static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3358{
3359 struct ixgbe_hw *hw = &adapter->hw;
3360 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003361 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003362#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003363 int j;
3364 struct tc_configuration *tc;
3365#endif
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003366
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003367 /* PCI config space info */
3368
3369 hw->vendor_id = pdev->vendor;
3370 hw->device_id = pdev->device;
3371 hw->revision_id = pdev->revision;
3372 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3373 hw->subsystem_device_id = pdev->subsystem_device;
3374
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003375 /* Set capability flags */
3376 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
3377 adapter->ring_feature[RING_F_RSS].indices = rss;
3378 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08003379 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003380 if (hw->mac.type == ixgbe_mac_82598EB)
3381 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
Alexander Duyckf8212f92009-04-27 22:42:37 +00003382 else if (hw->mac.type == ixgbe_mac_82599EB) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003383 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
Alexander Duyckf8212f92009-04-27 22:42:37 +00003384 adapter->flags |= IXGBE_FLAG_RSC_CAPABLE;
3385 adapter->flags |= IXGBE_FLAG_RSC_ENABLED;
3386 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08003387
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08003388#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08003389 /* Configure DCB traffic classes */
3390 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
3391 tc = &adapter->dcb_cfg.tc_config[j];
3392 tc->path[DCB_TX_CONFIG].bwg_id = 0;
3393 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
3394 tc->path[DCB_RX_CONFIG].bwg_id = 0;
3395 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
3396 tc->dcb_pfc = pfc_disabled;
3397 }
3398 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
3399 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
3400 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
3401 adapter->dcb_cfg.round_robin_enable = false;
3402 adapter->dcb_set_bitmap = 0x00;
3403 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
3404 adapter->ring_feature[RING_F_DCB].indices);
3405
3406#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003407
3408 /* default flow control settings */
Don Skidmorecd7664f2009-03-31 21:33:44 +00003409 hw->fc.requested_mode = ixgbe_fc_full;
Don Skidmore71fd5702009-03-31 21:35:05 +00003410 hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07003411 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3412 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3413 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3414 hw->fc.send_xon = true;
Don Skidmore71fd5702009-03-31 21:35:05 +00003415 hw->fc.disable_fc_autoneg = false;
Auke Kok9a799d72007-09-15 14:07:45 -07003416
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07003417 /* enable itr by default in dynamic mode */
3418 adapter->itr_setting = 1;
3419 adapter->eitr_param = 20000;
3420
3421 /* set defaults for eitr in MegaBytes */
3422 adapter->eitr_low = 10;
3423 adapter->eitr_high = 20;
3424
3425 /* set default ring sizes */
3426 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
3427 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
3428
Auke Kok9a799d72007-09-15 14:07:45 -07003429 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003430 if (ixgbe_init_eeprom_params_generic(hw)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003431 dev_err(&pdev->dev, "EEPROM initialization failed\n");
3432 return -EIO;
3433 }
3434
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003435 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07003436 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3437
Auke Kok9a799d72007-09-15 14:07:45 -07003438 set_bit(__IXGBE_DOWN, &adapter->state);
3439
3440 return 0;
3441}
3442
3443/**
3444 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3445 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003446 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07003447 *
3448 * Return 0 on success, negative on failure
3449 **/
3450int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003451 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003452{
3453 struct pci_dev *pdev = adapter->pdev;
3454 int size;
3455
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003456 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3457 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003458 if (!tx_ring->tx_buffer_info)
3459 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003460 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07003461
3462 /* round up to nearest 4K */
Peter P Waskiewicz Jr12207e42009-02-06 21:47:24 -08003463 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003464 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07003465
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003466 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
3467 &tx_ring->dma);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003468 if (!tx_ring->desc)
3469 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07003470
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003471 tx_ring->next_to_use = 0;
3472 tx_ring->next_to_clean = 0;
3473 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07003474 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07003475
3476err:
3477 vfree(tx_ring->tx_buffer_info);
3478 tx_ring->tx_buffer_info = NULL;
3479 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
3480 "descriptor ring\n");
3481 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07003482}
3483
3484/**
Alexander Duyck69888672008-09-11 20:05:39 -07003485 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
3486 * @adapter: board private structure
3487 *
3488 * If this function returns with an error, then it's possible one or
3489 * more of the rings is populated (while the rest are not). It is the
3490 * callers duty to clean those orphaned rings.
3491 *
3492 * Return 0 on success, negative on failure
3493 **/
3494static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
3495{
3496 int i, err = 0;
3497
3498 for (i = 0; i < adapter->num_tx_queues; i++) {
3499 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
3500 if (!err)
3501 continue;
3502 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
3503 break;
3504 }
3505
3506 return err;
3507}
3508
3509/**
Auke Kok9a799d72007-09-15 14:07:45 -07003510 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
3511 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003512 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07003513 *
3514 * Returns 0 on success, negative on failure
3515 **/
3516int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003517 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003518{
3519 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003520 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07003521
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003522 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3523 rx_ring->rx_buffer_info = vmalloc(size);
3524 if (!rx_ring->rx_buffer_info) {
Auke Kok9a799d72007-09-15 14:07:45 -07003525 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003526 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003527 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07003528 }
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003529 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07003530
Auke Kok9a799d72007-09-15 14:07:45 -07003531 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003532 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3533 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07003534
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003535 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07003536
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003537 if (!rx_ring->desc) {
Auke Kok9a799d72007-09-15 14:07:45 -07003538 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003539 "Memory allocation failed for the rx desc ring\n");
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003540 vfree(rx_ring->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003541 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07003542 }
3543
Jesse Brandeburg3a581072008-08-26 04:27:08 -07003544 rx_ring->next_to_clean = 0;
3545 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003546
3547 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003548
3549alloc_failed:
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003550 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07003551}
3552
3553/**
Alexander Duyck69888672008-09-11 20:05:39 -07003554 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
3555 * @adapter: board private structure
3556 *
3557 * If this function returns with an error, then it's possible one or
3558 * more of the rings is populated (while the rest are not). It is the
3559 * callers duty to clean those orphaned rings.
3560 *
3561 * Return 0 on success, negative on failure
3562 **/
3563
3564static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
3565{
3566 int i, err = 0;
3567
3568 for (i = 0; i < adapter->num_rx_queues; i++) {
3569 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
3570 if (!err)
3571 continue;
3572 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
3573 break;
3574 }
3575
3576 return err;
3577}
3578
3579/**
Auke Kok9a799d72007-09-15 14:07:45 -07003580 * ixgbe_free_tx_resources - Free Tx Resources per Queue
3581 * @adapter: board private structure
3582 * @tx_ring: Tx descriptor ring for a specific queue
3583 *
3584 * Free all transmit software resources
3585 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07003586void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
3587 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003588{
3589 struct pci_dev *pdev = adapter->pdev;
3590
3591 ixgbe_clean_tx_ring(adapter, tx_ring);
3592
3593 vfree(tx_ring->tx_buffer_info);
3594 tx_ring->tx_buffer_info = NULL;
3595
3596 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
3597
3598 tx_ring->desc = NULL;
3599}
3600
3601/**
3602 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
3603 * @adapter: board private structure
3604 *
3605 * Free all transmit software resources
3606 **/
3607static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
3608{
3609 int i;
3610
3611 for (i = 0; i < adapter->num_tx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00003612 if (adapter->tx_ring[i].desc)
3613 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07003614}
3615
3616/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003617 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07003618 * @adapter: board private structure
3619 * @rx_ring: ring to clean the resources from
3620 *
3621 * Free all receive software resources
3622 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07003623void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
3624 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003625{
3626 struct pci_dev *pdev = adapter->pdev;
3627
3628 ixgbe_clean_rx_ring(adapter, rx_ring);
3629
3630 vfree(rx_ring->rx_buffer_info);
3631 rx_ring->rx_buffer_info = NULL;
3632
3633 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
3634
3635 rx_ring->desc = NULL;
3636}
3637
3638/**
3639 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
3640 * @adapter: board private structure
3641 *
3642 * Free all receive software resources
3643 **/
3644static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
3645{
3646 int i;
3647
3648 for (i = 0; i < adapter->num_rx_queues; i++)
Jesse Brandeburg9891ca72009-03-13 22:14:50 +00003649 if (adapter->rx_ring[i].desc)
3650 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07003651}
3652
3653/**
Auke Kok9a799d72007-09-15 14:07:45 -07003654 * ixgbe_change_mtu - Change the Maximum Transfer Unit
3655 * @netdev: network interface device structure
3656 * @new_mtu: new value for maximum frame size
3657 *
3658 * Returns 0 on success, negative on failure
3659 **/
3660static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
3661{
3662 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3663 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3664
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07003665 /* MTU < 68 is an error and causes problems on some kernels */
3666 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07003667 return -EINVAL;
3668
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003669 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003670 netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003671 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07003672 netdev->mtu = new_mtu;
3673
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003674 if (netif_running(netdev))
3675 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003676
3677 return 0;
3678}
3679
3680/**
3681 * ixgbe_open - Called when a network interface is made active
3682 * @netdev: network interface device structure
3683 *
3684 * Returns 0 on success, negative value on failure
3685 *
3686 * The open entry point is called when a network interface is made
3687 * active by the system (IFF_UP). At this point all resources needed
3688 * for transmit and receive operations are allocated, the interrupt
3689 * handler is registered with the OS, the watchdog timer is started,
3690 * and the stack is notified that the interface is ready.
3691 **/
3692static int ixgbe_open(struct net_device *netdev)
3693{
3694 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3695 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07003696
Auke Kok4bebfaa2008-02-11 09:26:01 -08003697 /* disallow open during test */
3698 if (test_bit(__IXGBE_TESTING, &adapter->state))
3699 return -EBUSY;
3700
Jesse Brandeburg54386462009-04-17 20:44:27 +00003701 netif_carrier_off(netdev);
3702
Auke Kok9a799d72007-09-15 14:07:45 -07003703 /* allocate transmit descriptors */
3704 err = ixgbe_setup_all_tx_resources(adapter);
3705 if (err)
3706 goto err_setup_tx;
3707
Auke Kok9a799d72007-09-15 14:07:45 -07003708 /* allocate receive descriptors */
3709 err = ixgbe_setup_all_rx_resources(adapter);
3710 if (err)
3711 goto err_setup_rx;
3712
3713 ixgbe_configure(adapter);
3714
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003715 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003716 if (err)
3717 goto err_req_irq;
3718
Auke Kok9a799d72007-09-15 14:07:45 -07003719 err = ixgbe_up_complete(adapter);
3720 if (err)
3721 goto err_up;
3722
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07003723 netif_tx_start_all_queues(netdev);
3724
Auke Kok9a799d72007-09-15 14:07:45 -07003725 return 0;
3726
3727err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003728 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003729 ixgbe_free_irq(adapter);
3730err_req_irq:
Auke Kok9a799d72007-09-15 14:07:45 -07003731err_setup_rx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00003732 ixgbe_free_all_rx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003733err_setup_tx:
Mallikarjuna R Chilakalaa20a1192009-03-31 21:34:44 +00003734 ixgbe_free_all_tx_resources(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003735 ixgbe_reset(adapter);
3736
3737 return err;
3738}
3739
3740/**
3741 * ixgbe_close - Disables a network interface
3742 * @netdev: network interface device structure
3743 *
3744 * Returns 0, this is not allowed to fail
3745 *
3746 * The close entry point is called when an interface is de-activated
3747 * by the OS. The hardware is still under the drivers control, but
3748 * needs to be disabled. A global MAC reset is issued to stop the
3749 * hardware, and all transmit and receive resources are freed.
3750 **/
3751static int ixgbe_close(struct net_device *netdev)
3752{
3753 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003754
3755 ixgbe_down(adapter);
3756 ixgbe_free_irq(adapter);
3757
3758 ixgbe_free_all_tx_resources(adapter);
3759 ixgbe_free_all_rx_resources(adapter);
3760
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003761 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003762
3763 return 0;
3764}
3765
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003766#ifdef CONFIG_PM
3767static int ixgbe_resume(struct pci_dev *pdev)
3768{
3769 struct net_device *netdev = pci_get_drvdata(pdev);
3770 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3771 u32 err;
3772
3773 pci_set_power_state(pdev, PCI_D0);
3774 pci_restore_state(pdev);
3775 err = pci_enable_device(pdev);
3776 if (err) {
Alexander Duyck69888672008-09-11 20:05:39 -07003777 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003778 "suspend\n");
3779 return err;
3780 }
3781 pci_set_master(pdev);
3782
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07003783 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003784
3785 err = ixgbe_init_interrupt_scheme(adapter);
3786 if (err) {
3787 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
3788 "device\n");
3789 return err;
3790 }
3791
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003792 ixgbe_reset(adapter);
3793
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00003794 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
3795
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003796 if (netif_running(netdev)) {
3797 err = ixgbe_open(adapter->netdev);
3798 if (err)
3799 return err;
3800 }
3801
3802 netif_device_attach(netdev);
3803
3804 return 0;
3805}
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003806#endif /* CONFIG_PM */
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00003807
3808static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003809{
3810 struct net_device *netdev = pci_get_drvdata(pdev);
3811 struct ixgbe_adapter *adapter = netdev_priv(netdev);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003812 struct ixgbe_hw *hw = &adapter->hw;
3813 u32 ctrl, fctrl;
3814 u32 wufc = adapter->wol;
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003815#ifdef CONFIG_PM
3816 int retval = 0;
3817#endif
3818
3819 netif_device_detach(netdev);
3820
3821 if (netif_running(netdev)) {
3822 ixgbe_down(adapter);
3823 ixgbe_free_irq(adapter);
3824 ixgbe_free_all_tx_resources(adapter);
3825 ixgbe_free_all_rx_resources(adapter);
3826 }
Alexander Duyck7a921c92009-05-06 10:43:28 +00003827 ixgbe_clear_interrupt_scheme(adapter);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003828
3829#ifdef CONFIG_PM
3830 retval = pci_save_state(pdev);
3831 if (retval)
3832 return retval;
Jesse Brandeburg4df10462009-03-13 22:15:31 +00003833
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003834#endif
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003835 if (wufc) {
3836 ixgbe_set_rx_mode(netdev);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003837
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003838 /* turn on all-multi mode if wake on multicast is enabled */
3839 if (wufc & IXGBE_WUFC_MC) {
3840 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3841 fctrl |= IXGBE_FCTRL_MPE;
3842 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
3843 }
3844
3845 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3846 ctrl |= IXGBE_CTRL_GIO_DIS;
3847 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3848
3849 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
3850 } else {
3851 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
3852 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
3853 }
3854
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07003855 if (wufc && hw->mac.type == ixgbe_mac_82599EB)
3856 pci_wake_from_d3(pdev, true);
3857 else
3858 pci_wake_from_d3(pdev, false);
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003859
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00003860 *enable_wake = !!wufc;
3861
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003862 ixgbe_release_hw_control(adapter);
3863
3864 pci_disable_device(pdev);
3865
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003866 return 0;
3867}
3868
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00003869#ifdef CONFIG_PM
3870static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3871{
3872 int retval;
3873 bool wake;
3874
3875 retval = __ixgbe_shutdown(pdev, &wake);
3876 if (retval)
3877 return retval;
3878
3879 if (wake) {
3880 pci_prepare_to_sleep(pdev);
3881 } else {
3882 pci_wake_from_d3(pdev, false);
3883 pci_set_power_state(pdev, PCI_D3hot);
3884 }
3885
3886 return 0;
3887}
3888#endif /* CONFIG_PM */
3889
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003890static void ixgbe_shutdown(struct pci_dev *pdev)
3891{
Rafael J. Wysocki9d8d05a2009-04-15 17:44:01 +00003892 bool wake;
3893
3894 __ixgbe_shutdown(pdev, &wake);
3895
3896 if (system_state == SYSTEM_POWER_OFF) {
3897 pci_wake_from_d3(pdev, wake);
3898 pci_set_power_state(pdev, PCI_D3hot);
3899 }
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003900}
3901
3902/**
Auke Kok9a799d72007-09-15 14:07:45 -07003903 * ixgbe_update_stats - Update the board statistics counters.
3904 * @adapter: board private structure
3905 **/
3906void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3907{
3908 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003909 u64 total_mpc = 0;
3910 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07003911
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00003912 if (hw->mac.type == ixgbe_mac_82599EB) {
Alexander Duyckf8212f92009-04-27 22:42:37 +00003913 u64 rsc_count = 0;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00003914 for (i = 0; i < 16; i++)
3915 adapter->hw_rx_no_dma_resources +=
3916 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
Alexander Duyckf8212f92009-04-27 22:42:37 +00003917 for (i = 0; i < adapter->num_rx_queues; i++)
3918 rsc_count += adapter->rx_ring[i].rsc_count;
3919 adapter->rsc_count = rsc_count;
PJ Waskiewiczd51019a2009-03-13 22:12:48 +00003920 }
3921
Auke Kok9a799d72007-09-15 14:07:45 -07003922 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003923 for (i = 0; i < 8; i++) {
3924 /* for packet buffers not used, the register should read 0 */
3925 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3926 missed_rx += mpc;
3927 adapter->stats.mpc[i] += mpc;
3928 total_mpc += adapter->stats.mpc[i];
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003929 if (hw->mac.type == ixgbe_mac_82598EB)
3930 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08003931 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3932 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3933 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3934 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003935 if (hw->mac.type == ixgbe_mac_82599EB) {
3936 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3937 IXGBE_PXONRXCNT(i));
3938 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3939 IXGBE_PXOFFRXCNT(i));
3940 adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003941 } else {
3942 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3943 IXGBE_PXONRXC(i));
3944 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3945 IXGBE_PXOFFRXC(i));
3946 }
Alexander Duyck2f90b862008-11-20 20:52:10 -08003947 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
3948 IXGBE_PXONTXC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08003949 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003950 IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003951 }
3952 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3953 /* work around hardware counting issue */
3954 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07003955
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003956 /* 82598 hardware only has a 32 bit counter in the high register */
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003957 if (hw->mac.type == ixgbe_mac_82599EB) {
3958 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
3959 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
3960 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
3961 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
3962 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
3963 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
3964 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3965 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3966 } else {
3967 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3968 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3969 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3970 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3971 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3972 }
Auke Kok9a799d72007-09-15 14:07:45 -07003973 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3974 adapter->stats.bprc += bprc;
3975 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00003976 if (hw->mac.type == ixgbe_mac_82598EB)
3977 adapter->stats.mprc -= bprc;
Auke Kok9a799d72007-09-15 14:07:45 -07003978 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3979 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3980 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3981 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3982 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3983 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3984 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07003985 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003986 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3987 adapter->stats.lxontxc += lxon;
3988 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3989 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07003990 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3991 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003992 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3993 /*
3994 * 82598 errata - tx of flow control packets is included in tx counters
3995 */
3996 xon_off_tot = lxon + lxoff;
3997 adapter->stats.gptc -= xon_off_tot;
3998 adapter->stats.mptc -= xon_off_tot;
3999 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07004000 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4001 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4002 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07004003 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4004 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004005 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07004006 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4007 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4008 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4009 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4010 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07004011 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4012
4013 /* Fill out the OS statistics structure */
Auke Kok9a799d72007-09-15 14:07:45 -07004014 adapter->net_stats.multicast = adapter->stats.mprc;
4015
4016 /* Rx Errors */
4017 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004018 adapter->stats.rlec;
Auke Kok9a799d72007-09-15 14:07:45 -07004019 adapter->net_stats.rx_dropped = 0;
4020 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
4021 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08004022 adapter->net_stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07004023}
4024
4025/**
4026 * ixgbe_watchdog - Timer Call-back
4027 * @data: pointer to adapter cast into an unsigned long
4028 **/
4029static void ixgbe_watchdog(unsigned long data)
4030{
4031 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004032 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07004033
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004034 /* Do the watchdog outside of interrupt context due to the lovely
4035 * delays that some of the newer hardware requires */
4036 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004037 u64 eics = 0;
4038 int i;
4039
4040 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
4041 eics |= (1 << i);
4042
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004043 /* Cause software interrupt to ensure rx rings are cleaned */
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004044 switch (hw->mac.type) {
4045 case ixgbe_mac_82598EB:
4046 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
4047 IXGBE_WRITE_REG(hw, IXGBE_EICS, (u32)eics);
4048 } else {
4049 /*
4050 * for legacy and MSI interrupts don't set any
4051 * bits that are enabled for EIAM, because this
4052 * operation would set *both* EIMS and EICS for
4053 * any bit in EIAM
4054 */
4055 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4056 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4057 }
4058 break;
4059 case ixgbe_mac_82599EB:
4060 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Nelson, Shannon835462f2009-04-27 22:42:54 +00004061 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(0),
4062 (u32)(eics & 0xFFFFFFFF));
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004063 IXGBE_WRITE_REG(hw, IXGBE_EICS_EX(1),
Jesse Brandeburg22d5a712009-03-19 01:24:04 +00004064 (u32)(eics >> 32));
4065 } else {
4066 /*
4067 * for legacy and MSI interrupts don't set any
4068 * bits that are enabled for EIAM, because this
4069 * operation would set *both* EIMS and EICS for
4070 * any bit in EIAM
4071 */
4072 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4073 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4074 }
4075 break;
4076 default:
4077 break;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004078 }
4079 /* Reset the timer */
4080 mod_timer(&adapter->watchdog_timer,
4081 round_jiffies(jiffies + 2 * HZ));
4082 }
4083
4084 schedule_work(&adapter->watchdog_task);
4085}
4086
4087/**
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004088 * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
4089 * @work: pointer to work_struct containing our data
4090 **/
4091static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4092{
4093 struct ixgbe_adapter *adapter = container_of(work,
4094 struct ixgbe_adapter,
4095 multispeed_fiber_task);
4096 struct ixgbe_hw *hw = &adapter->hw;
4097 u32 autoneg;
4098
4099 adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
4100 if (hw->mac.ops.get_link_capabilities)
4101 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4102 &hw->mac.autoneg);
4103 if (hw->mac.ops.setup_link_speed)
4104 hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
4105 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4106 adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
4107}
4108
4109/**
4110 * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
4111 * @work: pointer to work_struct containing our data
4112 **/
4113static void ixgbe_sfp_config_module_task(struct work_struct *work)
4114{
4115 struct ixgbe_adapter *adapter = container_of(work,
4116 struct ixgbe_adapter,
4117 sfp_config_module_task);
4118 struct ixgbe_hw *hw = &adapter->hw;
4119 u32 err;
4120
4121 adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
4122 err = hw->phy.ops.identify_sfp(hw);
4123 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4124 DPRINTK(PROBE, ERR, "PHY not supported on this NIC %d\n", err);
4125 ixgbe_down(adapter);
4126 return;
4127 }
4128 hw->mac.ops.setup_sfp(hw);
4129
Tony Breeds8d1c3c02009-04-09 22:29:10 +00004130 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004131 /* This will also work for DA Twinax connections */
4132 schedule_work(&adapter->multispeed_fiber_task);
4133 adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
4134}
4135
4136/**
Alexander Duyck69888672008-09-11 20:05:39 -07004137 * ixgbe_watchdog_task - worker thread to bring link up
4138 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004139 **/
4140static void ixgbe_watchdog_task(struct work_struct *work)
4141{
4142 struct ixgbe_adapter *adapter = container_of(work,
4143 struct ixgbe_adapter,
4144 watchdog_task);
4145 struct net_device *netdev = adapter->netdev;
4146 struct ixgbe_hw *hw = &adapter->hw;
4147 u32 link_speed = adapter->link_speed;
4148 bool link_up = adapter->link_up;
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004149 int i;
4150 struct ixgbe_ring *tx_ring;
4151 int some_tx_pending = 0;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004152
4153 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
4154
4155 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4156 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
4157 if (link_up ||
4158 time_after(jiffies, (adapter->link_check_timeout +
4159 IXGBE_TRY_LINK_TIMEOUT))) {
4160 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
4161 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4162 }
4163 adapter->link_up = link_up;
4164 adapter->link_speed = link_speed;
4165 }
Auke Kok9a799d72007-09-15 14:07:45 -07004166
4167 if (link_up) {
4168 if (!netif_carrier_ok(netdev)) {
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004169 bool flow_rx, flow_tx;
4170
4171 if (hw->mac.type == ixgbe_mac_82599EB) {
4172 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4173 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4174 flow_rx = (mflcn & IXGBE_MFLCN_RFCE);
4175 flow_tx = (fccfg & IXGBE_FCCFG_TFCE_802_3X);
4176 } else {
4177 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4178 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
4179 flow_rx = (frctl & IXGBE_FCTRL_RFCE);
4180 flow_tx = (rmcs & IXGBE_RMCS_TFCE_802_3X);
4181 }
4182
Jeff Kirshera46e5342008-11-27 00:22:21 -08004183 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
4184 "Flow Control: %s\n",
4185 netdev->name,
4186 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
4187 "10 Gbps" :
4188 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
4189 "1 Gbps" : "unknown speed")),
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004190 ((flow_rx && flow_tx) ? "RX/TX" :
4191 (flow_rx ? "RX" :
4192 (flow_tx ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07004193
4194 netif_carrier_on(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004195 } else {
4196 /* Force detection of hung controller */
4197 adapter->detect_tx_hung = true;
4198 }
4199 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004200 adapter->link_up = false;
4201 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004202 if (netif_carrier_ok(netdev)) {
Jeff Kirshera46e5342008-11-27 00:22:21 -08004203 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
4204 netdev->name);
Auke Kok9a799d72007-09-15 14:07:45 -07004205 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004206 }
4207 }
4208
Nelson, Shannonbc59fcd2009-04-27 22:43:12 +00004209 if (!netif_carrier_ok(netdev)) {
4210 for (i = 0; i < adapter->num_tx_queues; i++) {
4211 tx_ring = &adapter->tx_ring[i];
4212 if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4213 some_tx_pending = 1;
4214 break;
4215 }
4216 }
4217
4218 if (some_tx_pending) {
4219 /* We've lost link, so the controller stops DMA,
4220 * but we've got queued Tx work that's never going
4221 * to get done, so reset controller to flush Tx.
4222 * (Do the reset outside of interrupt context).
4223 */
4224 schedule_work(&adapter->reset_task);
4225 }
4226 }
4227
Auke Kok9a799d72007-09-15 14:07:45 -07004228 ixgbe_update_stats(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004229 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
Auke Kok9a799d72007-09-15 14:07:45 -07004230}
4231
Auke Kok9a799d72007-09-15 14:07:45 -07004232static int ixgbe_tso(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004233 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
4234 u32 tx_flags, u8 *hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07004235{
4236 struct ixgbe_adv_tx_context_desc *context_desc;
4237 unsigned int i;
4238 int err;
4239 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004240 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
4241 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07004242
4243 if (skb_is_gso(skb)) {
4244 if (skb_header_cloned(skb)) {
4245 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4246 if (err)
4247 return err;
4248 }
4249 l4len = tcp_hdrlen(skb);
4250 *hdr_len += l4len;
4251
Al Viro8327d002007-12-10 18:54:12 +00004252 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07004253 struct iphdr *iph = ip_hdr(skb);
4254 iph->tot_len = 0;
4255 iph->check = 0;
4256 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004257 iph->daddr, 0,
4258 IPPROTO_TCP,
4259 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004260 adapter->hw_tso_ctxt++;
4261 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
4262 ipv6_hdr(skb)->payload_len = 0;
4263 tcp_hdr(skb)->check =
4264 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004265 &ipv6_hdr(skb)->daddr,
4266 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07004267 adapter->hw_tso6_ctxt++;
4268 }
4269
4270 i = tx_ring->next_to_use;
4271
4272 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4273 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4274
4275 /* VLAN MACLEN IPLEN */
4276 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4277 vlan_macip_lens |=
4278 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4279 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004280 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004281 *hdr_len += skb_network_offset(skb);
4282 vlan_macip_lens |=
4283 (skb_transport_header(skb) - skb_network_header(skb));
4284 *hdr_len +=
4285 (skb_transport_header(skb) - skb_network_header(skb));
4286 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4287 context_desc->seqnum_seed = 0;
4288
4289 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004290 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004291 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004292
Al Viro8327d002007-12-10 18:54:12 +00004293 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07004294 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4295 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
4296 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4297
4298 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004299 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07004300 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
4301 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004302 /* use index 1 for TSO */
4303 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004304 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4305
4306 tx_buffer_info->time_stamp = jiffies;
4307 tx_buffer_info->next_to_watch = i;
4308
4309 i++;
4310 if (i == tx_ring->count)
4311 i = 0;
4312 tx_ring->next_to_use = i;
4313
4314 return true;
4315 }
4316 return false;
4317}
4318
4319static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004320 struct ixgbe_ring *tx_ring,
4321 struct sk_buff *skb, u32 tx_flags)
Auke Kok9a799d72007-09-15 14:07:45 -07004322{
4323 struct ixgbe_adv_tx_context_desc *context_desc;
4324 unsigned int i;
4325 struct ixgbe_tx_buffer *tx_buffer_info;
4326 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4327
4328 if (skb->ip_summed == CHECKSUM_PARTIAL ||
4329 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
4330 i = tx_ring->next_to_use;
4331 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4332 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4333
4334 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4335 vlan_macip_lens |=
4336 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4337 vlan_macip_lens |= (skb_network_offset(skb) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004338 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004339 if (skb->ip_summed == CHECKSUM_PARTIAL)
4340 vlan_macip_lens |= (skb_transport_header(skb) -
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004341 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07004342
4343 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4344 context_desc->seqnum_seed = 0;
4345
4346 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004347 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07004348
4349 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08004350 switch (skb->protocol) {
Harvey Harrison09640e62009-02-01 00:45:17 -08004351 case cpu_to_be16(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07004352 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08004353 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4354 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004355 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004356 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4357 type_tucmd_mlhl |=
4358 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004359 break;
Harvey Harrison09640e62009-02-01 00:45:17 -08004360 case cpu_to_be16(ETH_P_IPV6):
Auke Kok41825d72008-02-12 15:20:33 -08004361 /* XXX what about other V6 headers?? */
4362 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4363 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004364 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004365 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4366 type_tucmd_mlhl |=
4367 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
Auke Kok41825d72008-02-12 15:20:33 -08004368 break;
Auke Kok41825d72008-02-12 15:20:33 -08004369 default:
4370 if (unlikely(net_ratelimit())) {
4371 DPRINTK(PROBE, WARNING,
4372 "partial checksum but proto=%x!\n",
4373 skb->protocol);
4374 }
4375 break;
4376 }
Auke Kok9a799d72007-09-15 14:07:45 -07004377 }
4378
4379 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004380 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07004381 context_desc->mss_l4len_idx = 0;
4382
4383 tx_buffer_info->time_stamp = jiffies;
4384 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004385
Auke Kok9a799d72007-09-15 14:07:45 -07004386 adapter->hw_csum_tx_good++;
4387 i++;
4388 if (i == tx_ring->count)
4389 i = 0;
4390 tx_ring->next_to_use = i;
4391
4392 return true;
4393 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004394
Auke Kok9a799d72007-09-15 14:07:45 -07004395 return false;
4396}
4397
4398static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004399 struct ixgbe_ring *tx_ring,
4400 struct sk_buff *skb, unsigned int first)
Auke Kok9a799d72007-09-15 14:07:45 -07004401{
4402 struct ixgbe_tx_buffer *tx_buffer_info;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004403 unsigned int len = skb_headlen(skb);
Auke Kok9a799d72007-09-15 14:07:45 -07004404 unsigned int offset = 0, size, count = 0, i;
4405 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4406 unsigned int f;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004407 dma_addr_t *map;
Auke Kok9a799d72007-09-15 14:07:45 -07004408
4409 i = tx_ring->next_to_use;
4410
Alexander Duyck44df32c2009-03-31 21:34:23 +00004411 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4412 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4413 return 0;
4414 }
4415
4416 map = skb_shinfo(skb)->dma_maps;
4417
Auke Kok9a799d72007-09-15 14:07:45 -07004418 while (len) {
4419 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4420 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4421
4422 tx_buffer_info->length = size;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004423 tx_buffer_info->dma = map[0] + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07004424 tx_buffer_info->time_stamp = jiffies;
4425 tx_buffer_info->next_to_watch = i;
4426
4427 len -= size;
4428 offset += size;
4429 count++;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004430
4431 if (len) {
4432 i++;
4433 if (i == tx_ring->count)
4434 i = 0;
4435 }
Auke Kok9a799d72007-09-15 14:07:45 -07004436 }
4437
4438 for (f = 0; f < nr_frags; f++) {
4439 struct skb_frag_struct *frag;
4440
4441 frag = &skb_shinfo(skb)->frags[f];
4442 len = frag->size;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004443 offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07004444
4445 while (len) {
Alexander Duyck44df32c2009-03-31 21:34:23 +00004446 i++;
4447 if (i == tx_ring->count)
4448 i = 0;
4449
Auke Kok9a799d72007-09-15 14:07:45 -07004450 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4451 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4452
4453 tx_buffer_info->length = size;
Alexander Duyck44df32c2009-03-31 21:34:23 +00004454 tx_buffer_info->dma = map[f + 1] + offset;
Auke Kok9a799d72007-09-15 14:07:45 -07004455 tx_buffer_info->time_stamp = jiffies;
4456 tx_buffer_info->next_to_watch = i;
4457
4458 len -= size;
4459 offset += size;
4460 count++;
Auke Kok9a799d72007-09-15 14:07:45 -07004461 }
4462 }
Alexander Duyck44df32c2009-03-31 21:34:23 +00004463
Auke Kok9a799d72007-09-15 14:07:45 -07004464 tx_ring->tx_buffer_info[i].skb = skb;
4465 tx_ring->tx_buffer_info[first].next_to_watch = i;
4466
4467 return count;
4468}
4469
4470static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004471 struct ixgbe_ring *tx_ring,
4472 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07004473{
4474 union ixgbe_adv_tx_desc *tx_desc = NULL;
4475 struct ixgbe_tx_buffer *tx_buffer_info;
4476 u32 olinfo_status = 0, cmd_type_len = 0;
4477 unsigned int i;
4478 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
4479
4480 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
4481
4482 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
4483
4484 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4485 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
4486
4487 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
4488 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
4489
4490 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004491 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07004492
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07004493 /* use index 1 context for tso */
4494 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07004495 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
4496 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004497 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07004498
4499 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
4500 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004501 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07004502
4503 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
4504
4505 i = tx_ring->next_to_use;
4506 while (count--) {
4507 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4508 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
4509 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
4510 tx_desc->read.cmd_type_len =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004511 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07004512 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07004513 i++;
4514 if (i == tx_ring->count)
4515 i = 0;
4516 }
4517
4518 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
4519
4520 /*
4521 * Force memory writes to complete before letting h/w
4522 * know there are new descriptors to fetch. (Only
4523 * applicable for weak-ordered memory model archs,
4524 * such as IA-64).
4525 */
4526 wmb();
4527
4528 tx_ring->next_to_use = i;
4529 writel(i, adapter->hw.hw_addr + tx_ring->tail);
4530}
4531
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08004532static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004533 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08004534{
4535 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4536
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08004537 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08004538 /* Herbert's original patch had:
4539 * smp_mb__after_netif_stop_queue();
4540 * but since that doesn't exist yet, just open code it. */
4541 smp_mb();
4542
4543 /* We need to check again in a case another CPU has just
4544 * made room available. */
4545 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
4546 return -EBUSY;
4547
4548 /* A reprieve! - use start_queue because it doesn't call schedule */
Jesse Brandeburgaf721662008-09-11 19:54:23 -07004549 netif_start_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08004550 ++adapter->restart_queue;
4551 return 0;
4552}
4553
4554static int ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004555 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08004556{
4557 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
4558 return 0;
4559 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
4560}
4561
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07004562static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
4563{
4564 struct ixgbe_adapter *adapter = netdev_priv(dev);
4565
4566 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4567 return 0; /* All traffic should default to class 0 */
4568
4569 return skb_tx_hash(dev, skb);
4570}
4571
Auke Kok9a799d72007-09-15 14:07:45 -07004572static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4573{
4574 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4575 struct ixgbe_ring *tx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07004576 unsigned int first;
4577 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08004578 u8 hdr_len = 0;
4579 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07004580 int count = 0;
4581 unsigned int f;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004582
Wu Fengguang95615d92009-04-14 21:53:48 -07004583 r_idx = skb->queue_mapping;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08004584 tx_ring = &adapter->tx_ring[r_idx];
Auke Kok9a799d72007-09-15 14:07:45 -07004585
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004586 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4587 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08004588 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4589 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
4590 tx_flags |= (skb->queue_mapping << 13);
4591 }
4592 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4593 tx_flags |= IXGBE_TX_FLAGS_VLAN;
4594 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4595 tx_flags |= (skb->queue_mapping << 13);
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004596 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
4597 tx_flags |= IXGBE_TX_FLAGS_VLAN;
Auke Kok9a799d72007-09-15 14:07:45 -07004598 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004599 /* three things can cause us to need a context descriptor */
4600 if (skb_is_gso(skb) ||
4601 (skb->ip_summed == CHECKSUM_PARTIAL) ||
4602 (tx_flags & IXGBE_TX_FLAGS_VLAN))
Auke Kok9a799d72007-09-15 14:07:45 -07004603 count++;
4604
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07004605 count += TXD_USE_COUNT(skb_headlen(skb));
4606 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07004607 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
4608
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08004609 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07004610 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07004611 return NETDEV_TX_BUSY;
4612 }
Auke Kok9a799d72007-09-15 14:07:45 -07004613
Al Viro8327d002007-12-10 18:54:12 +00004614 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07004615 tx_flags |= IXGBE_TX_FLAGS_IPV4;
4616 first = tx_ring->next_to_use;
4617 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
4618 if (tso < 0) {
4619 dev_kfree_skb_any(skb);
4620 return NETDEV_TX_OK;
4621 }
4622
4623 if (tso)
4624 tx_flags |= IXGBE_TX_FLAGS_TSO;
4625 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004626 (skb->ip_summed == CHECKSUM_PARTIAL))
Auke Kok9a799d72007-09-15 14:07:45 -07004627 tx_flags |= IXGBE_TX_FLAGS_CSUM;
4628
Alexander Duyck44df32c2009-03-31 21:34:23 +00004629 count = ixgbe_tx_map(adapter, tx_ring, skb, first);
Auke Kok9a799d72007-09-15 14:07:45 -07004630
Alexander Duyck44df32c2009-03-31 21:34:23 +00004631 if (count) {
4632 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
4633 hdr_len);
4634 netdev->trans_start = jiffies;
4635 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07004636
Alexander Duyck44df32c2009-03-31 21:34:23 +00004637 } else {
4638 dev_kfree_skb_any(skb);
4639 tx_ring->tx_buffer_info[first].time_stamp = 0;
4640 tx_ring->next_to_use = first;
4641 }
Auke Kok9a799d72007-09-15 14:07:45 -07004642
4643 return NETDEV_TX_OK;
4644}
4645
4646/**
4647 * ixgbe_get_stats - Get System Network Statistics
4648 * @netdev: network interface device structure
4649 *
4650 * Returns the address of the device statistics structure.
4651 * The statistics are actually updated from the timer callback.
4652 **/
4653static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
4654{
4655 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4656
4657 /* only return the current stats */
4658 return &adapter->net_stats;
4659}
4660
4661/**
4662 * ixgbe_set_mac - Change the Ethernet Address of the NIC
4663 * @netdev: network interface device structure
4664 * @p: pointer to an address structure
4665 *
4666 * Returns 0 on success, negative on failure
4667 **/
4668static int ixgbe_set_mac(struct net_device *netdev, void *p)
4669{
4670 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004671 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07004672 struct sockaddr *addr = p;
4673
4674 if (!is_valid_ether_addr(addr->sa_data))
4675 return -EADDRNOTAVAIL;
4676
4677 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004678 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07004679
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004680 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07004681
4682 return 0;
4683}
4684
Ben Hutchings6b73e102009-04-29 08:08:58 +00004685static int
4686ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
4687{
4688 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4689 struct ixgbe_hw *hw = &adapter->hw;
4690 u16 value;
4691 int rc;
4692
4693 if (prtad != hw->phy.mdio.prtad)
4694 return -EINVAL;
4695 rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
4696 if (!rc)
4697 rc = value;
4698 return rc;
4699}
4700
4701static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
4702 u16 addr, u16 value)
4703{
4704 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4705 struct ixgbe_hw *hw = &adapter->hw;
4706
4707 if (prtad != hw->phy.mdio.prtad)
4708 return -EINVAL;
4709 return hw->phy.ops.write_reg(hw, addr, devad, value);
4710}
4711
4712static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
4713{
4714 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4715
4716 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
4717}
4718
Auke Kok9a799d72007-09-15 14:07:45 -07004719#ifdef CONFIG_NET_POLL_CONTROLLER
4720/*
4721 * Polling 'interrupt' - used by things like netconsole to send skbs
4722 * without having to re-enable interrupts. It's not called while
4723 * the interrupt routine is executing.
4724 */
4725static void ixgbe_netpoll(struct net_device *netdev)
4726{
4727 struct ixgbe_adapter *adapter = netdev_priv(netdev);
4728
4729 disable_irq(adapter->pdev->irq);
4730 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
4731 ixgbe_intr(adapter->pdev->irq, netdev);
4732 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
4733 enable_irq(adapter->pdev->irq);
4734}
4735#endif
4736
Stephen Hemminger0edc3522008-11-19 22:24:29 -08004737static const struct net_device_ops ixgbe_netdev_ops = {
4738 .ndo_open = ixgbe_open,
4739 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08004740 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger09a3b1f2009-03-21 13:40:01 -07004741 .ndo_select_queue = ixgbe_select_queue,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08004742 .ndo_get_stats = ixgbe_get_stats,
Chris Leeche90d4002009-03-10 16:00:24 +00004743 .ndo_set_rx_mode = ixgbe_set_rx_mode,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08004744 .ndo_set_multicast_list = ixgbe_set_rx_mode,
4745 .ndo_validate_addr = eth_validate_addr,
4746 .ndo_set_mac_address = ixgbe_set_mac,
4747 .ndo_change_mtu = ixgbe_change_mtu,
4748 .ndo_tx_timeout = ixgbe_tx_timeout,
4749 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
4750 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
4751 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
Ben Hutchings6b73e102009-04-29 08:08:58 +00004752 .ndo_do_ioctl = ixgbe_ioctl,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08004753#ifdef CONFIG_NET_POLL_CONTROLLER
4754 .ndo_poll_controller = ixgbe_netpoll,
4755#endif
4756};
4757
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004758/**
Auke Kok9a799d72007-09-15 14:07:45 -07004759 * ixgbe_probe - Device Initialization Routine
4760 * @pdev: PCI device information struct
4761 * @ent: entry in ixgbe_pci_tbl
4762 *
4763 * Returns 0 on success, negative on failure
4764 *
4765 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
4766 * The OS initialization, configuring of the adapter private structure,
4767 * and a hardware reset occur.
4768 **/
4769static int __devinit ixgbe_probe(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004770 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07004771{
4772 struct net_device *netdev;
4773 struct ixgbe_adapter *adapter = NULL;
4774 struct ixgbe_hw *hw;
4775 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07004776 static int cards_found;
4777 int i, err, pci_using_dac;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004778 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07004779
4780 err = pci_enable_device(pdev);
4781 if (err)
4782 return err;
4783
Yang Hongyang6a355282009-04-06 19:01:13 -07004784 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
4785 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Auke Kok9a799d72007-09-15 14:07:45 -07004786 pci_using_dac = 1;
4787 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004788 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07004789 if (err) {
Yang Hongyang284901a2009-04-06 19:01:15 -07004790 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Auke Kok9a799d72007-09-15 14:07:45 -07004791 if (err) {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004792 dev_err(&pdev->dev, "No usable DMA "
4793 "configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004794 goto err_dma;
4795 }
4796 }
4797 pci_using_dac = 0;
4798 }
4799
4800 err = pci_request_regions(pdev, ixgbe_driver_name);
4801 if (err) {
4802 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4803 goto err_pci_reg;
4804 }
4805
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004806 err = pci_enable_pcie_error_reporting(pdev);
4807 if (err) {
4808 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4809 "0x%x\n", err);
4810 /* non-fatal, continue */
4811 }
4812
Auke Kok9a799d72007-09-15 14:07:45 -07004813 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07004814 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004815
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08004816 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07004817 if (!netdev) {
4818 err = -ENOMEM;
4819 goto err_alloc_etherdev;
4820 }
4821
Auke Kok9a799d72007-09-15 14:07:45 -07004822 SET_NETDEV_DEV(netdev, &pdev->dev);
4823
4824 pci_set_drvdata(pdev, netdev);
4825 adapter = netdev_priv(netdev);
4826
4827 adapter->netdev = netdev;
4828 adapter->pdev = pdev;
4829 hw = &adapter->hw;
4830 hw->back = adapter;
4831 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4832
Jeff Kirsher05857982008-09-11 19:57:00 -07004833 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4834 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07004835 if (!hw->hw_addr) {
4836 err = -EIO;
4837 goto err_ioremap;
4838 }
4839
4840 for (i = 1; i <= 5; i++) {
4841 if (pci_resource_len(pdev, i) == 0)
4842 continue;
4843 }
4844
Stephen Hemminger0edc3522008-11-19 22:24:29 -08004845 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07004846 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004847 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07004848 strcpy(netdev->name, pci_name(pdev));
4849
Auke Kok9a799d72007-09-15 14:07:45 -07004850 adapter->bd_number = cards_found;
4851
Auke Kok9a799d72007-09-15 14:07:45 -07004852 /* Setup hw api */
4853 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004854 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07004855
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004856 /* EEPROM */
4857 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
4858 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
4859 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
4860 if (!(eec & (1 << 8)))
4861 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
4862
4863 /* PHY */
4864 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08004865 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
Ben Hutchings6b73e102009-04-29 08:08:58 +00004866 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
4867 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
4868 hw->phy.mdio.mmds = 0;
4869 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
4870 hw->phy.mdio.dev = netdev;
4871 hw->phy.mdio.mdio_read = ixgbe_mdio_read;
4872 hw->phy.mdio.mdio_write = ixgbe_mdio_write;
Donald Skidmorec4900be2008-11-20 21:11:42 -08004873
4874 /* set up this timer and work struct before calling get_invariants
4875 * which might start the timer
4876 */
4877 init_timer(&adapter->sfp_timer);
4878 adapter->sfp_timer.function = &ixgbe_sfp_timer;
4879 adapter->sfp_timer.data = (unsigned long) adapter;
4880
4881 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004882
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004883 /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
4884 INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
4885
4886 /* a new SFP+ module arrival, called from GPI SDP2 context */
4887 INIT_WORK(&adapter->sfp_config_module_task,
4888 ixgbe_sfp_config_module_task);
4889
Auke Kok9a799d72007-09-15 14:07:45 -07004890 err = ii->get_invariants(hw);
Donald Skidmorec4900be2008-11-20 21:11:42 -08004891 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
4892 /* start a kernel thread to watch for a module to arrive */
4893 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4894 mod_timer(&adapter->sfp_timer,
4895 round_jiffies(jiffies + (2 * HZ)));
4896 err = 0;
4897 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4898 DPRINTK(PROBE, ERR, "failed to load because an "
4899 "unsupported SFP+ module type was detected.\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004900 goto err_hw_init;
Donald Skidmorec4900be2008-11-20 21:11:42 -08004901 } else if (err) {
4902 goto err_hw_init;
4903 }
Auke Kok9a799d72007-09-15 14:07:45 -07004904
4905 /* setup the private structure */
4906 err = ixgbe_sw_init(adapter);
4907 if (err)
4908 goto err_sw_init;
4909
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004910 /* reset_hw fills in the perm_addr as well */
4911 err = hw->mac.ops.reset_hw(hw);
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00004912 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4913 dev_err(&adapter->pdev->dev, "failed to load because an "
4914 "unsupported SFP+ module type was detected.\n");
4915 goto err_sw_init;
4916 } else if (err) {
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004917 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
4918 goto err_sw_init;
4919 }
4920
Auke Kok9a799d72007-09-15 14:07:45 -07004921 netdev->features = NETIF_F_SG |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004922 NETIF_F_IP_CSUM |
4923 NETIF_F_HW_VLAN_TX |
4924 NETIF_F_HW_VLAN_RX |
4925 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07004926
Jesse Brandeburge9990a92008-08-26 04:27:24 -07004927 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07004928 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07004929 netdev->features |= NETIF_F_TSO6;
Herbert Xu78b6f4c2009-01-18 21:49:45 -08004930 netdev->features |= NETIF_F_GRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07004931
Jesse Brandeburg45a5ead2009-04-27 22:36:35 +00004932 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
4933 netdev->features |= NETIF_F_SCTP_CSUM;
4934
Jeff Kirsherad31c402008-06-05 04:05:30 -07004935 netdev->vlan_features |= NETIF_F_TSO;
4936 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07004937 netdev->vlan_features |= NETIF_F_IP_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07004938 netdev->vlan_features |= NETIF_F_SG;
4939
Alexander Duyck2f90b862008-11-20 20:52:10 -08004940 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4941 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4942
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08004943#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08004944 netdev->dcbnl_ops = &dcbnl_ops;
4945#endif
4946
Auke Kok9a799d72007-09-15 14:07:45 -07004947 if (pci_using_dac)
4948 netdev->features |= NETIF_F_HIGHDMA;
4949
Alexander Duyckf8212f92009-04-27 22:42:37 +00004950 if (adapter->flags & IXGBE_FLAG_RSC_ENABLED)
4951 netdev->features |= NETIF_F_LRO;
4952
Auke Kok9a799d72007-09-15 14:07:45 -07004953 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004954 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Auke Kok9a799d72007-09-15 14:07:45 -07004955 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
4956 err = -EIO;
4957 goto err_eeprom;
4958 }
4959
4960 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
4961 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
4962
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004963 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
4964 dev_err(&pdev->dev, "invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004965 err = -EIO;
4966 goto err_eeprom;
4967 }
4968
4969 init_timer(&adapter->watchdog_timer);
4970 adapter->watchdog_timer.function = &ixgbe_watchdog;
4971 adapter->watchdog_timer.data = (unsigned long)adapter;
4972
4973 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004974 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07004975
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004976 err = ixgbe_init_interrupt_scheme(adapter);
4977 if (err)
4978 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07004979
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004980 switch (pdev->device) {
4981 case IXGBE_DEV_ID_82599_KX4:
Waskiewicz Jr, Peter P495dce12009-04-23 11:15:18 +00004982 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
4983 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004984 break;
4985 default:
4986 adapter->wol = 0;
4987 break;
4988 }
4989 device_init_wakeup(&adapter->pdev->dev, true);
4990 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
4991
PJ Waskiewicz04f165e2009-04-09 22:27:57 +00004992 /* pick up the PCI bus settings for reporting later */
4993 hw->mac.ops.get_bus_info(hw);
4994
Auke Kok9a799d72007-09-15 14:07:45 -07004995 /* print bus type/speed/width info */
Johannes Berg7c510e42008-10-27 17:47:26 -07004996 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
PJ Waskiewicze8e26352009-02-27 15:45:05 +00004997 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
4998 (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
4999 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
5000 (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
5001 (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005002 "Unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07005003 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005004 ixgbe_read_pba_num_generic(hw, &part_num);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005005 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
5006 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
5007 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
5008 (part_num >> 8), (part_num & 0xff));
5009 else
5010 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5011 hw->mac.type, hw->phy.type,
5012 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07005013
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005014 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
Auke Kok0c254d82008-02-11 09:25:56 -08005015 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005016 "this card is not sufficient for optimal "
5017 "performance.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005018 dev_warn(&pdev->dev, "For optimal performance a x8 "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005019 "PCI-Express slot is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08005020 }
5021
Peter P Waskiewicz Jr34b03682009-02-05 23:54:42 -08005022 /* save off EEPROM version number */
5023 hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5024
Auke Kok9a799d72007-09-15 14:07:45 -07005025 /* reset the hardware with the new settings */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07005026 hw->mac.ops.start_hw(hw);
5027
Auke Kok9a799d72007-09-15 14:07:45 -07005028 strcpy(netdev->name, "eth%d");
5029 err = register_netdev(netdev);
5030 if (err)
5031 goto err_register;
5032
Jesse Brandeburg54386462009-04-17 20:44:27 +00005033 /* carrier off reporting is important to ethtool even BEFORE open */
5034 netif_carrier_off(netdev);
5035
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005036#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03005037 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005038 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
5039 /* always use CB2 mode, difference is masked
5040 * in the CB driver */
5041 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
5042 ixgbe_setup_dca(adapter);
5043 }
5044#endif
Auke Kok9a799d72007-09-15 14:07:45 -07005045
5046 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
5047 cards_found++;
5048 return 0;
5049
5050err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005051 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005052err_hw_init:
Alexander Duyck7a921c92009-05-06 10:43:28 +00005053 ixgbe_clear_interrupt_scheme(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005054err_sw_init:
5055err_eeprom:
Donald Skidmorec4900be2008-11-20 21:11:42 -08005056 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5057 del_timer_sync(&adapter->sfp_timer);
5058 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005059 cancel_work_sync(&adapter->multispeed_fiber_task);
5060 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005061 iounmap(hw->hw_addr);
5062err_ioremap:
5063 free_netdev(netdev);
5064err_alloc_etherdev:
5065 pci_release_regions(pdev);
5066err_pci_reg:
5067err_dma:
5068 pci_disable_device(pdev);
5069 return err;
5070}
5071
5072/**
5073 * ixgbe_remove - Device Removal Routine
5074 * @pdev: PCI device information struct
5075 *
5076 * ixgbe_remove is called by the PCI subsystem to alert the driver
5077 * that it should release a PCI device. The could be caused by a
5078 * Hot-Plug event, or because the driver is going to be removed from
5079 * memory.
5080 **/
5081static void __devexit ixgbe_remove(struct pci_dev *pdev)
5082{
5083 struct net_device *netdev = pci_get_drvdata(pdev);
5084 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005085 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005086
5087 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08005088 /* clear the module not found bit to make sure the worker won't
5089 * reschedule
5090 */
5091 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07005092 del_timer_sync(&adapter->watchdog_timer);
5093
Donald Skidmorec4900be2008-11-20 21:11:42 -08005094 del_timer_sync(&adapter->sfp_timer);
5095 cancel_work_sync(&adapter->watchdog_task);
5096 cancel_work_sync(&adapter->sfp_task);
PJ Waskiewicze8e26352009-02-27 15:45:05 +00005097 cancel_work_sync(&adapter->multispeed_fiber_task);
5098 cancel_work_sync(&adapter->sfp_config_module_task);
Auke Kok9a799d72007-09-15 14:07:45 -07005099 flush_scheduled_work();
5100
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005101#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005102 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5103 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5104 dca_remove_requester(&pdev->dev);
5105 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
5106 }
5107
5108#endif
Donald Skidmorec4900be2008-11-20 21:11:42 -08005109 if (netdev->reg_state == NETREG_REGISTERED)
5110 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005111
Alexander Duyck7a921c92009-05-06 10:43:28 +00005112 ixgbe_clear_interrupt_scheme(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08005113
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005114 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07005115
5116 iounmap(adapter->hw.hw_addr);
5117 pci_release_regions(pdev);
5118
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005119 DPRINTK(PROBE, INFO, "complete\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08005120
Auke Kok9a799d72007-09-15 14:07:45 -07005121 free_netdev(netdev);
5122
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005123 err = pci_disable_pcie_error_reporting(pdev);
5124 if (err)
5125 dev_err(&pdev->dev,
5126 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5127
Auke Kok9a799d72007-09-15 14:07:45 -07005128 pci_disable_device(pdev);
5129}
5130
5131/**
5132 * ixgbe_io_error_detected - called when PCI error is detected
5133 * @pdev: Pointer to PCI device
5134 * @state: The current pci connection state
5135 *
5136 * This function is called after a PCI bus error affecting
5137 * this device has been detected.
5138 */
5139static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005140 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07005141{
5142 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005143 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005144
5145 netif_device_detach(netdev);
5146
5147 if (netif_running(netdev))
5148 ixgbe_down(adapter);
5149 pci_disable_device(pdev);
5150
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005151 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07005152 return PCI_ERS_RESULT_NEED_RESET;
5153}
5154
5155/**
5156 * ixgbe_io_slot_reset - called after the pci bus has been reset.
5157 * @pdev: Pointer to PCI device
5158 *
5159 * Restart the card from scratch, as if from a cold-boot.
5160 */
5161static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5162{
5163 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005164 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005165 pci_ers_result_t result;
5166 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07005167
5168 if (pci_enable_device(pdev)) {
5169 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005170 "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005171 result = PCI_ERS_RESULT_DISCONNECT;
5172 } else {
5173 pci_set_master(pdev);
5174 pci_restore_state(pdev);
5175
Don Skidmoredd4d8ca2009-04-29 00:22:31 -07005176 pci_wake_from_d3(pdev, false);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005177
5178 ixgbe_reset(adapter);
PJ Waskiewicz88512532009-03-13 22:15:10 +00005179 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005180 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07005181 }
Auke Kok9a799d72007-09-15 14:07:45 -07005182
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005183 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5184 if (err) {
5185 dev_err(&pdev->dev,
5186 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
5187 /* non-fatal, continue */
5188 }
Auke Kok9a799d72007-09-15 14:07:45 -07005189
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08005190 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07005191}
5192
5193/**
5194 * ixgbe_io_resume - called when traffic can start flowing again.
5195 * @pdev: Pointer to PCI device
5196 *
5197 * This callback is called when the error recovery driver tells us that
5198 * its OK to resume normal operation.
5199 */
5200static void ixgbe_io_resume(struct pci_dev *pdev)
5201{
5202 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08005203 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005204
5205 if (netif_running(netdev)) {
5206 if (ixgbe_up(adapter)) {
5207 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
5208 return;
5209 }
5210 }
5211
5212 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07005213}
5214
5215static struct pci_error_handlers ixgbe_err_handler = {
5216 .error_detected = ixgbe_io_error_detected,
5217 .slot_reset = ixgbe_io_slot_reset,
5218 .resume = ixgbe_io_resume,
5219};
5220
5221static struct pci_driver ixgbe_driver = {
5222 .name = ixgbe_driver_name,
5223 .id_table = ixgbe_pci_tbl,
5224 .probe = ixgbe_probe,
5225 .remove = __devexit_p(ixgbe_remove),
5226#ifdef CONFIG_PM
5227 .suspend = ixgbe_suspend,
5228 .resume = ixgbe_resume,
5229#endif
5230 .shutdown = ixgbe_shutdown,
5231 .err_handler = &ixgbe_err_handler
5232};
5233
5234/**
5235 * ixgbe_init_module - Driver Registration Routine
5236 *
5237 * ixgbe_init_module is the first routine called when the driver is
5238 * loaded. All it does is register with the PCI subsystem.
5239 **/
5240static int __init ixgbe_init_module(void)
5241{
5242 int ret;
5243 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
5244 ixgbe_driver_string, ixgbe_driver_version);
5245
5246 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
5247
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005248#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005249 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005250#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005251
Auke Kok9a799d72007-09-15 14:07:45 -07005252 ret = pci_register_driver(&ixgbe_driver);
5253 return ret;
5254}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005255
Auke Kok9a799d72007-09-15 14:07:45 -07005256module_init(ixgbe_init_module);
5257
5258/**
5259 * ixgbe_exit_module - Driver Exit Cleanup Routine
5260 *
5261 * ixgbe_exit_module is called just before the driver is removed
5262 * from memory.
5263 **/
5264static void __exit ixgbe_exit_module(void)
5265{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005266#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005267 dca_unregister_notify(&dca_notifier);
5268#endif
Auke Kok9a799d72007-09-15 14:07:45 -07005269 pci_unregister_driver(&ixgbe_driver);
5270}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005271
Jeff Garzik5dd2d332008-10-16 05:09:31 -04005272#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005273static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005274 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005275{
5276 int ret_val;
5277
5278 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07005279 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005280
5281 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5282}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08005283
Alexander Duyckb4533682009-03-31 21:32:42 +00005284#endif /* CONFIG_IXGBE_DCA */
5285#ifdef DEBUG
5286/**
5287 * ixgbe_get_hw_dev_name - return device name string
5288 * used by hardware layer to print debugging information
5289 **/
5290char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
5291{
5292 struct ixgbe_adapter *adapter = hw->back;
5293 return adapter->netdev->name;
5294}
5295
5296#endif
Auke Kok9a799d72007-09-15 14:07:45 -07005297module_exit(ixgbe_exit_module);
5298
5299/* ixgbe_main.c */