blob: 5858ab2b48f020d3aec3bec4da8b34d89c6f5973 [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
43
44#include "ixgbe.h"
45#include "ixgbe_common.h"
46
47char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070048static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070050
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070051#define DRV_VERSION "1.3.18-k4"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070052const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070055
56static const struct ixgbe_info *ixgbe_info_tbl[] = {
Auke Kok3957d632007-10-31 15:22:10 -070057 [board_82598] = &ixgbe_82598_info,
Auke Kok9a799d72007-09-15 14:07:45 -070058};
59
60/* ixgbe_pci_tbl - PCI Device ID Table
61 *
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68static struct pci_device_id ixgbe_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070070 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070071 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070072 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070073 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070074 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070075 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070077
78 /* required last entry */
79 {0, }
80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -070083#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -080084static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p);
86static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL,
89 .priority = 0
90};
91#endif
92
Auke Kok9a799d72007-09-15 14:07:45 -070093MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
94MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
98#define DEFAULT_DEBUG_LEVEL_SHIFT 3
99
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800100static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
101{
102 u32 ctrl_ext;
103
104 /* Let firmware take over control of h/w */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108}
109
110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
111{
112 u32 ctrl_ext;
113
114 /* Let firmware know the driver has taken over */
115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
118}
Auke Kok9a799d72007-09-15 14:07:45 -0700119
120#ifdef DEBUG
121/**
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
124 **/
125char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
126{
127 struct ixgbe_adapter *adapter = hw->back;
128 struct net_device *netdev = adapter->netdev;
129 return netdev->name;
130}
131#endif
132
133static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134 u8 msix_vector)
135{
136 u32 ivar, index;
137
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = (int_alloc_entry >> 2) & 0x1F;
140 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
141 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
142 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
144}
145
146static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147 struct ixgbe_tx_buffer
148 *tx_buffer_info)
149{
150 if (tx_buffer_info->dma) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700151 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
Auke Kok9a799d72007-09-15 14:07:45 -0700152 tx_buffer_info->length, PCI_DMA_TODEVICE);
153 tx_buffer_info->dma = 0;
154 }
155 if (tx_buffer_info->skb) {
156 dev_kfree_skb_any(tx_buffer_info->skb);
157 tx_buffer_info->skb = NULL;
158 }
159 /* tx_buffer_info must be completely set up in the transmit path */
160}
161
162static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
163 struct ixgbe_ring *tx_ring,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700164 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700165{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700166 struct ixgbe_hw *hw = &adapter->hw;
167 u32 head, tail;
168
Auke Kok9a799d72007-09-15 14:07:45 -0700169 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700170 * check with the clearing of time_stamp and movement of eop */
171 head = IXGBE_READ_REG(hw, tx_ring->head);
172 tail = IXGBE_READ_REG(hw, tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -0700173 adapter->detect_tx_hung = false;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700174 if ((head != tail) &&
175 tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700176 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
177 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
178 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700179 union ixgbe_adv_tx_desc *tx_desc;
180 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
Auke Kok9a799d72007-09-15 14:07:45 -0700181 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700182 " Tx Queue <%d>\n"
183 " TDH, TDT <%x>, <%x>\n"
Auke Kok9a799d72007-09-15 14:07:45 -0700184 " next_to_use <%x>\n"
185 " next_to_clean <%x>\n"
186 "tx_buffer_info[next_to_clean]\n"
187 " time_stamp <%lx>\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700188 " jiffies <%lx>\n",
189 tx_ring->queue_index,
190 head, tail,
191 tx_ring->next_to_use, eop,
192 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700193 return true;
194 }
195
196 return false;
197}
198
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800199#define IXGBE_MAX_TXD_PWR 14
200#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
201
202/* Tx Descriptors needed, worst case */
203#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
204 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
205#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
206 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
207
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700208#define GET_TX_HEAD_FROM_RING(ring) (\
209 *(volatile u32 *) \
210 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
211static void ixgbe_tx_timeout(struct net_device *netdev);
212
Auke Kok9a799d72007-09-15 14:07:45 -0700213/**
214 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
215 * @adapter: board private structure
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700216 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700217 **/
218static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700219 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700220{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700221 union ixgbe_adv_tx_desc *tx_desc;
Auke Kok9a799d72007-09-15 14:07:45 -0700222 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700223 struct net_device *netdev = adapter->netdev;
224 struct sk_buff *skb;
225 unsigned int i;
226 u32 head, oldhead;
227 unsigned int count = 0;
228 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700229
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700230 rmb();
231 head = GET_TX_HEAD_FROM_RING(tx_ring);
232 head = le32_to_cpu(head);
Auke Kok9a799d72007-09-15 14:07:45 -0700233 i = tx_ring->next_to_clean;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700234 while (1) {
235 while (i != head) {
Auke Kok9a799d72007-09-15 14:07:45 -0700236 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
237 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700238 skb = tx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700239
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700240 if (skb) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800241 unsigned int segs, bytecount;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700242
243 /* gso_segs is currently only valid for tcp */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800244 segs = skb_shinfo(skb)->gso_segs ?: 1;
245 /* multiply data chunks by size of headers */
246 bytecount = ((segs - 1) * skb_headlen(skb)) +
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700247 skb->len;
248 total_packets += segs;
249 total_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800250 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700251
Auke Kok9a799d72007-09-15 14:07:45 -0700252 ixgbe_unmap_and_free_tx_resource(adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700253 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700254
255 i++;
256 if (i == tx_ring->count)
257 i = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700258
259 count++;
260 if (count == tx_ring->count)
261 goto done_cleaning;
Auke Kok9a799d72007-09-15 14:07:45 -0700262 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700263 oldhead = head;
264 rmb();
265 head = GET_TX_HEAD_FROM_RING(tx_ring);
266 head = le32_to_cpu(head);
267 if (head == oldhead)
268 goto done_cleaning;
269 } /* while (1) */
Auke Kok9a799d72007-09-15 14:07:45 -0700270
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700271done_cleaning:
Auke Kok9a799d72007-09-15 14:07:45 -0700272 tx_ring->next_to_clean = i;
273
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800274#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700275 if (unlikely(count && netif_carrier_ok(netdev) &&
276 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800277 /* Make sure that anybody stopping the queue after this
278 * sees the new next_to_clean.
279 */
280 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800281 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
282 !test_bit(__IXGBE_DOWN, &adapter->state)) {
283 netif_wake_subqueue(netdev, tx_ring->queue_index);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700284 ++adapter->restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800285 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800286 }
Auke Kok9a799d72007-09-15 14:07:45 -0700287
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700288 if (adapter->detect_tx_hung) {
289 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
290 /* schedule immediate reset if we believe we hung */
291 DPRINTK(PROBE, INFO,
292 "tx hang %d detected, resetting adapter\n",
293 adapter->tx_timeout_count + 1);
294 ixgbe_tx_timeout(adapter->netdev);
295 }
296 }
Auke Kok9a799d72007-09-15 14:07:45 -0700297
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700298 /* re-arm the interrupt */
299 if ((total_packets >= tx_ring->work_limit) ||
300 (count == tx_ring->count))
301 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
Auke Kok9a799d72007-09-15 14:07:45 -0700302
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700303 tx_ring->total_bytes += total_bytes;
304 tx_ring->total_packets += total_packets;
305 tx_ring->stats.bytes += total_bytes;
306 tx_ring->stats.packets += total_packets;
307 adapter->net_stats.tx_bytes += total_bytes;
308 adapter->net_stats.tx_packets += total_packets;
309 return (total_packets ? true : false);
Auke Kok9a799d72007-09-15 14:07:45 -0700310}
311
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -0700312#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800313static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700314 struct ixgbe_ring *rx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800315{
316 u32 rxctrl;
317 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700318 int q = rx_ring - adapter->rx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800319
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700320 if (rx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800321 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
322 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700323 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800324 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
325 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
326 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700327 rx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800328 }
329 put_cpu();
330}
331
332static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700333 struct ixgbe_ring *tx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800334{
335 u32 txctrl;
336 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700337 int q = tx_ring - adapter->tx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800338
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700339 if (tx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800340 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
341 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700342 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800343 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
344 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700345 tx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800346 }
347 put_cpu();
348}
349
350static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
351{
352 int i;
353
354 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
355 return;
356
357 for (i = 0; i < adapter->num_tx_queues; i++) {
358 adapter->tx_ring[i].cpu = -1;
359 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
360 }
361 for (i = 0; i < adapter->num_rx_queues; i++) {
362 adapter->rx_ring[i].cpu = -1;
363 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
364 }
365}
366
367static int __ixgbe_notify_dca(struct device *dev, void *data)
368{
369 struct net_device *netdev = dev_get_drvdata(dev);
370 struct ixgbe_adapter *adapter = netdev_priv(netdev);
371 unsigned long event = *(unsigned long *)data;
372
373 switch (event) {
374 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700375 /* if we're already enabled, don't do it again */
376 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
377 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800378 /* Always use CB2 mode, difference is masked
379 * in the CB driver. */
380 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
Denis V. Lunev652f0932008-03-27 14:39:17 +0300381 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700382 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800383 ixgbe_setup_dca(adapter);
384 break;
385 }
386 /* Fall Through since DCA is disabled. */
387 case DCA_PROVIDER_REMOVE:
388 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
389 dca_remove_requester(dev);
390 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
391 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
392 }
393 break;
394 }
395
Denis V. Lunev652f0932008-03-27 14:39:17 +0300396 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800397}
398
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700399#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
Auke Kok9a799d72007-09-15 14:07:45 -0700400/**
401 * ixgbe_receive_skb - Send a completed packet up the stack
402 * @adapter: board private structure
403 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700404 * @status: hardware indication of status of receive
405 * @rx_ring: rx descriptor ring (for a specific queue) to setup
406 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700407 **/
408static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700409 struct sk_buff *skb, u8 status,
410 struct ixgbe_ring *ring,
411 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700412{
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700413 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
414 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700415
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700416 if (adapter->netdev->features & NETIF_F_LRO &&
417 skb->ip_summed == CHECKSUM_UNNECESSARY) {
Auke Kok9a799d72007-09-15 14:07:45 -0700418 if (adapter->vlgrp && is_vlan)
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700419 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
420 adapter->vlgrp, tag,
421 rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700422 else
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700423 lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
424 ring->lro_used = true;
425 } else {
426 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
427 if (adapter->vlgrp && is_vlan)
428 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
429 else
430 netif_receive_skb(skb);
431 } else {
432 if (adapter->vlgrp && is_vlan)
433 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
434 else
435 netif_rx(skb);
436 }
Auke Kok9a799d72007-09-15 14:07:45 -0700437 }
438}
439
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800440/**
441 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
442 * @adapter: address of board private structure
443 * @status_err: hardware indication of status of receive
444 * @skb: skb currently being received and modified
445 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700446static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700447 u32 status_err, struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700448{
449 skb->ip_summed = CHECKSUM_NONE;
450
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700451 /* Rx csum disabled */
452 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700453 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800454
455 /* if IP and error */
456 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
457 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700458 adapter->hw_csum_rx_error++;
459 return;
460 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800461
462 if (!(status_err & IXGBE_RXD_STAT_L4CS))
463 return;
464
465 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
466 adapter->hw_csum_rx_error++;
467 return;
468 }
469
Auke Kok9a799d72007-09-15 14:07:45 -0700470 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800471 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700472 adapter->hw_csum_rx_good++;
473}
474
475/**
476 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
477 * @adapter: address of board private structure
478 **/
479static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700480 struct ixgbe_ring *rx_ring,
481 int cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -0700482{
483 struct net_device *netdev = adapter->netdev;
484 struct pci_dev *pdev = adapter->pdev;
485 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700486 struct ixgbe_rx_buffer *bi;
Auke Kok9a799d72007-09-15 14:07:45 -0700487 unsigned int i;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700488 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
Auke Kok9a799d72007-09-15 14:07:45 -0700489
490 i = rx_ring->next_to_use;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700491 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700492
493 while (cleaned_count--) {
494 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
495
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700496 if (!bi->page_dma &&
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700497 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700498 if (!bi->page) {
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700499 bi->page = alloc_page(GFP_ATOMIC);
500 if (!bi->page) {
501 adapter->alloc_rx_page_failed++;
502 goto no_buffers;
503 }
504 bi->page_offset = 0;
505 } else {
506 /* use a half page if we're re-using */
507 bi->page_offset ^= (PAGE_SIZE / 2);
Auke Kok9a799d72007-09-15 14:07:45 -0700508 }
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700509
510 bi->page_dma = pci_map_page(pdev, bi->page,
511 bi->page_offset,
512 (PAGE_SIZE / 2),
513 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700514 }
515
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700516 if (!bi->skb) {
517 struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz);
Auke Kok9a799d72007-09-15 14:07:45 -0700518
519 if (!skb) {
520 adapter->alloc_rx_buff_failed++;
521 goto no_buffers;
522 }
523
524 /*
525 * Make buffer alignment 2 beyond a 16 byte boundary
526 * this will result in a 16 byte aligned IP header after
527 * the 14 byte MAC header is removed
528 */
529 skb_reserve(skb, NET_IP_ALIGN);
530
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700531 bi->skb = skb;
532 bi->dma = pci_map_single(pdev, skb->data, bufsz,
533 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700534 }
535 /* Refresh the desc even if buffer_addrs didn't change because
536 * each write-back erases this info. */
537 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700538 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
539 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700540 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700541 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700542 }
543
544 i++;
545 if (i == rx_ring->count)
546 i = 0;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700547 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700548 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700549
Auke Kok9a799d72007-09-15 14:07:45 -0700550no_buffers:
551 if (rx_ring->next_to_use != i) {
552 rx_ring->next_to_use = i;
553 if (i-- == 0)
554 i = (rx_ring->count - 1);
555
556 /*
557 * Force memory writes to complete before letting h/w
558 * know there are new descriptors to fetch. (Only
559 * applicable for weak-ordered memory model archs,
560 * such as IA-64).
561 */
562 wmb();
563 writel(i, adapter->hw.hw_addr + rx_ring->tail);
564 }
565}
566
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700567static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
568{
569 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
570}
571
572static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
573{
574 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
575}
576
Auke Kok9a799d72007-09-15 14:07:45 -0700577static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700578 struct ixgbe_ring *rx_ring,
579 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -0700580{
581 struct net_device *netdev = adapter->netdev;
582 struct pci_dev *pdev = adapter->pdev;
583 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
584 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
585 struct sk_buff *skb;
586 unsigned int i;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700587 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700588 u16 hdr_info;
589 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700590 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800591 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700592
593 i = rx_ring->next_to_clean;
Auke Kok9a799d72007-09-15 14:07:45 -0700594 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
595 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
596 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700597
598 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700599 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700600 if (*work_done >= work_to_do)
601 break;
602 (*work_done)++;
603
604 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700605 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
606 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700607 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -0700608 if (hdr_info & IXGBE_RXDADV_SPH)
609 adapter->rx_hdr_split++;
610 if (len > IXGBE_RX_HDR_SIZE)
611 len = IXGBE_RX_HDR_SIZE;
612 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700613 } else {
Auke Kok9a799d72007-09-15 14:07:45 -0700614 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700615 }
Auke Kok9a799d72007-09-15 14:07:45 -0700616
617 cleaned = true;
618 skb = rx_buffer_info->skb;
619 prefetch(skb->data - NET_IP_ALIGN);
620 rx_buffer_info->skb = NULL;
621
622 if (len && !skb_shinfo(skb)->nr_frags) {
623 pci_unmap_single(pdev, rx_buffer_info->dma,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700624 rx_ring->rx_buf_len + NET_IP_ALIGN,
625 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700626 skb_put(skb, len);
627 }
628
629 if (upper_len) {
630 pci_unmap_page(pdev, rx_buffer_info->page_dma,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700631 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700632 rx_buffer_info->page_dma = 0;
633 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700634 rx_buffer_info->page,
635 rx_buffer_info->page_offset,
636 upper_len);
637
638 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
639 (page_count(rx_buffer_info->page) != 1))
640 rx_buffer_info->page = NULL;
641 else
642 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -0700643
644 skb->len += upper_len;
645 skb->data_len += upper_len;
646 skb->truesize += upper_len;
647 }
648
649 i++;
650 if (i == rx_ring->count)
651 i = 0;
652 next_buffer = &rx_ring->rx_buffer_info[i];
653
654 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
655 prefetch(next_rxd);
656
657 cleaned_count++;
658 if (staterr & IXGBE_RXD_STAT_EOP) {
659 rx_ring->stats.packets++;
660 rx_ring->stats.bytes += skb->len;
661 } else {
662 rx_buffer_info->skb = next_buffer->skb;
663 rx_buffer_info->dma = next_buffer->dma;
664 next_buffer->skb = skb;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700665 next_buffer->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700666 adapter->non_eop_descs++;
667 goto next_desc;
668 }
669
670 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
671 dev_kfree_skb_irq(skb);
672 goto next_desc;
673 }
674
675 ixgbe_rx_checksum(adapter, staterr, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800676
677 /* probably a little skewed due to removing CRC */
678 total_rx_bytes += skb->len;
679 total_rx_packets++;
680
Auke Kok9a799d72007-09-15 14:07:45 -0700681 skb->protocol = eth_type_trans(skb, netdev);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700682 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700683 netdev->last_rx = jiffies;
684
685next_desc:
686 rx_desc->wb.upper.status_error = 0;
687
688 /* return some buffers to hardware, one at a time is too slow */
689 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
690 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
691 cleaned_count = 0;
692 }
693
694 /* use prefetched values */
695 rx_desc = next_rxd;
696 rx_buffer_info = next_buffer;
697
698 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700699 }
700
701 if (rx_ring->lro_used) {
702 lro_flush_all(&rx_ring->lro_mgr);
703 rx_ring->lro_used = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700704 }
705
706 rx_ring->next_to_clean = i;
707 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
708
709 if (cleaned_count)
710 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
711
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800712 rx_ring->total_packets += total_rx_packets;
713 rx_ring->total_bytes += total_rx_bytes;
714 adapter->net_stats.rx_bytes += total_rx_bytes;
715 adapter->net_stats.rx_packets += total_rx_packets;
716
Auke Kok9a799d72007-09-15 14:07:45 -0700717 return cleaned;
718}
719
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800720static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700721/**
722 * ixgbe_configure_msix - Configure MSI-X hardware
723 * @adapter: board private structure
724 *
725 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
726 * interrupts.
727 **/
728static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
729{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800730 struct ixgbe_q_vector *q_vector;
731 int i, j, q_vectors, v_idx, r_idx;
732 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700733
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800734 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
735
736 /* Populate the IVAR table and set the ITR values to the
737 * corresponding register.
738 */
739 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
740 q_vector = &adapter->q_vector[v_idx];
741 /* XXX for_each_bit(...) */
742 r_idx = find_first_bit(q_vector->rxr_idx,
743 adapter->num_rx_queues);
744
745 for (i = 0; i < q_vector->rxr_count; i++) {
746 j = adapter->rx_ring[r_idx].reg_idx;
747 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
748 r_idx = find_next_bit(q_vector->rxr_idx,
749 adapter->num_rx_queues,
750 r_idx + 1);
751 }
752 r_idx = find_first_bit(q_vector->txr_idx,
753 adapter->num_tx_queues);
754
755 for (i = 0; i < q_vector->txr_count; i++) {
756 j = adapter->tx_ring[r_idx].reg_idx;
757 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
758 r_idx = find_next_bit(q_vector->txr_idx,
759 adapter->num_tx_queues,
760 r_idx + 1);
761 }
762
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700763 /* if this is a tx only vector halve the interrupt rate */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800764 if (q_vector->txr_count && !q_vector->rxr_count)
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700765 q_vector->eitr = (adapter->eitr_param >> 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800766 else
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700767 /* rx only */
768 q_vector->eitr = adapter->eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800769
770 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
771 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
Auke Kok9a799d72007-09-15 14:07:45 -0700772 }
773
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800774 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
775 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700776
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700777 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800778 mask = IXGBE_EIMS_ENABLE_MASK;
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700779 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800780 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -0700781}
782
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800783enum latency_range {
784 lowest_latency = 0,
785 low_latency = 1,
786 bulk_latency = 2,
787 latency_invalid = 255
788};
789
790/**
791 * ixgbe_update_itr - update the dynamic ITR value based on statistics
792 * @adapter: pointer to adapter
793 * @eitr: eitr setting (ints per sec) to give last timeslice
794 * @itr_setting: current throttle rate in ints/second
795 * @packets: the number of packets during this measurement interval
796 * @bytes: the number of bytes during this measurement interval
797 *
798 * Stores a new ITR value based on packets and byte
799 * counts during the last interrupt. The advantage of per interrupt
800 * computation is faster updates and more accurate ITR for the current
801 * traffic pattern. Constants in this function were computed
802 * based on theoretical maximum wire speed and thresholds were set based
803 * on testing data as well as attempting to minimize response time
804 * while increasing bulk throughput.
805 * this functionality is controlled by the InterruptThrottleRate module
806 * parameter (see ixgbe_param.c)
807 **/
808static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
809 u32 eitr, u8 itr_setting,
810 int packets, int bytes)
811{
812 unsigned int retval = itr_setting;
813 u32 timepassed_us;
814 u64 bytes_perint;
815
816 if (packets == 0)
817 goto update_itr_done;
818
819
820 /* simple throttlerate management
821 * 0-20MB/s lowest (100000 ints/s)
822 * 20-100MB/s low (20000 ints/s)
823 * 100-1249MB/s bulk (8000 ints/s)
824 */
825 /* what was last interrupt timeslice? */
826 timepassed_us = 1000000/eitr;
827 bytes_perint = bytes / timepassed_us; /* bytes/usec */
828
829 switch (itr_setting) {
830 case lowest_latency:
831 if (bytes_perint > adapter->eitr_low)
832 retval = low_latency;
833 break;
834 case low_latency:
835 if (bytes_perint > adapter->eitr_high)
836 retval = bulk_latency;
837 else if (bytes_perint <= adapter->eitr_low)
838 retval = lowest_latency;
839 break;
840 case bulk_latency:
841 if (bytes_perint <= adapter->eitr_high)
842 retval = low_latency;
843 break;
844 }
845
846update_itr_done:
847 return retval;
848}
849
850static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
851{
852 struct ixgbe_adapter *adapter = q_vector->adapter;
853 struct ixgbe_hw *hw = &adapter->hw;
854 u32 new_itr;
855 u8 current_itr, ret_itr;
856 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
857 sizeof(struct ixgbe_q_vector);
858 struct ixgbe_ring *rx_ring, *tx_ring;
859
860 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
861 for (i = 0; i < q_vector->txr_count; i++) {
862 tx_ring = &(adapter->tx_ring[r_idx]);
863 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700864 q_vector->tx_itr,
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800865 tx_ring->total_packets,
866 tx_ring->total_bytes);
867 /* if the result for this queue would decrease interrupt
868 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700869 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
870 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800871 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
872 r_idx + 1);
873 }
874
875 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
876 for (i = 0; i < q_vector->rxr_count; i++) {
877 rx_ring = &(adapter->rx_ring[r_idx]);
878 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700879 q_vector->rx_itr,
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800880 rx_ring->total_packets,
881 rx_ring->total_bytes);
882 /* if the result for this queue would decrease interrupt
883 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700884 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
885 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800886 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
887 r_idx + 1);
888 }
889
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700890 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800891
892 switch (current_itr) {
893 /* counts and packets in update_itr are dependent on these numbers */
894 case lowest_latency:
895 new_itr = 100000;
896 break;
897 case low_latency:
898 new_itr = 20000; /* aka hwitr = ~200 */
899 break;
900 case bulk_latency:
901 default:
902 new_itr = 8000;
903 break;
904 }
905
906 if (new_itr != q_vector->eitr) {
907 u32 itr_reg;
908 /* do an exponential smoothing */
909 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
910 q_vector->eitr = new_itr;
911 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
912 /* must write high and low 16 bits to reset counter */
913 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
914 itr_reg);
915 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
916 }
917
918 return;
919}
920
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -0700921
922static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
923{
924 struct ixgbe_hw *hw = &adapter->hw;
925
926 adapter->lsc_int++;
927 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
928 adapter->link_check_timeout = jiffies;
929 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
930 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
931 schedule_work(&adapter->watchdog_task);
932 }
933}
934
Auke Kok9a799d72007-09-15 14:07:45 -0700935static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
936{
937 struct net_device *netdev = data;
938 struct ixgbe_adapter *adapter = netdev_priv(netdev);
939 struct ixgbe_hw *hw = &adapter->hw;
940 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
941
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -0700942 if (eicr & IXGBE_EICR_LSC)
943 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -0800944
945 if (!test_bit(__IXGBE_DOWN, &adapter->state))
946 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -0700947
948 return IRQ_HANDLED;
949}
950
951static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
952{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800953 struct ixgbe_q_vector *q_vector = data;
954 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700955 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800956 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -0700957
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800958 if (!q_vector->txr_count)
959 return IRQ_HANDLED;
960
961 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
962 for (i = 0; i < q_vector->txr_count; i++) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700963 tx_ring = &(adapter->tx_ring[r_idx]);
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -0700964#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800965 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700966 ixgbe_update_tx_dca(adapter, tx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800967#endif
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700968 tx_ring->total_bytes = 0;
969 tx_ring->total_packets = 0;
970 ixgbe_clean_tx_irq(adapter, tx_ring);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800971 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
972 r_idx + 1);
973 }
974
Auke Kok9a799d72007-09-15 14:07:45 -0700975 return IRQ_HANDLED;
976}
977
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800978/**
979 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
980 * @irq: unused
981 * @data: pointer to our q_vector struct for this interrupt vector
982 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700983static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
984{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800985 struct ixgbe_q_vector *q_vector = data;
986 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700987 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800988 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700989 int i;
Auke Kok9a799d72007-09-15 14:07:45 -0700990
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800991 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700992 for (i = 0; i < q_vector->rxr_count; i++) {
993 rx_ring = &(adapter->rx_ring[r_idx]);
994 rx_ring->total_bytes = 0;
995 rx_ring->total_packets = 0;
996 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
997 r_idx + 1);
998 }
999
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001000 if (!q_vector->rxr_count)
1001 return IRQ_HANDLED;
1002
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001003 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001004 rx_ring = &(adapter->rx_ring[r_idx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001005 /* disable interrupts on this vector only */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001006 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001007 netif_rx_schedule(adapter->netdev, &q_vector->napi);
1008
Auke Kok9a799d72007-09-15 14:07:45 -07001009 return IRQ_HANDLED;
1010}
1011
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001012static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1013{
1014 ixgbe_msix_clean_rx(irq, data);
1015 ixgbe_msix_clean_tx(irq, data);
1016
1017 return IRQ_HANDLED;
1018}
1019
1020/**
1021 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1022 * @napi: napi struct with our devices info in it
1023 * @budget: amount of work driver is allowed to do this pass, in packets
1024 *
1025 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001026static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1027{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001028 struct ixgbe_q_vector *q_vector =
1029 container_of(napi, struct ixgbe_q_vector, napi);
1030 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001031 struct ixgbe_ring *rx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07001032 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001033 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001034
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001035 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001036 rx_ring = &(adapter->rx_ring[r_idx]);
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07001037#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001038 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001039 ixgbe_update_rx_dca(adapter, rx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001040#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001041
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001042 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001043
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001044 /* If all Rx work done, exit the polling mode */
1045 if (work_done < budget) {
1046 netif_rx_complete(adapter->netdev, napi);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001047 if (adapter->itr_setting & 3)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001048 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001049 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
Auke Kok9a799d72007-09-15 14:07:45 -07001051 }
1052
1053 return work_done;
1054}
1055
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001056static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1057 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07001058{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001059 a->q_vector[v_idx].adapter = a;
1060 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1061 a->q_vector[v_idx].rxr_count++;
1062 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1063}
Auke Kok9a799d72007-09-15 14:07:45 -07001064
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001065static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1066 int r_idx)
1067{
1068 a->q_vector[v_idx].adapter = a;
1069 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1070 a->q_vector[v_idx].txr_count++;
1071 a->tx_ring[r_idx].v_idx = 1 << v_idx;
1072}
Auke Kok9a799d72007-09-15 14:07:45 -07001073
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001074/**
1075 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1076 * @adapter: board private structure to initialize
1077 * @vectors: allotted vector count for descriptor rings
1078 *
1079 * This function maps descriptor rings to the queue-specific vectors
1080 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1081 * one vector per ring/queue, but on a constrained vector budget, we
1082 * group the rings as "efficiently" as possible. You would add new
1083 * mapping configurations in here.
1084 **/
1085static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1086 int vectors)
1087{
1088 int v_start = 0;
1089 int rxr_idx = 0, txr_idx = 0;
1090 int rxr_remaining = adapter->num_rx_queues;
1091 int txr_remaining = adapter->num_tx_queues;
1092 int i, j;
1093 int rqpv, tqpv;
1094 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001095
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001096 /* No mapping required if MSI-X is disabled. */
1097 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001098 goto out;
1099
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001100 /*
1101 * The ideal configuration...
1102 * We have enough vectors to map one per queue.
1103 */
1104 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1105 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1106 map_vector_to_rxq(adapter, v_start, rxr_idx);
1107
1108 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1109 map_vector_to_txq(adapter, v_start, txr_idx);
1110
1111 goto out;
1112 }
1113
1114 /*
1115 * If we don't have enough vectors for a 1-to-1
1116 * mapping, we'll have to group them so there are
1117 * multiple queues per vector.
1118 */
1119 /* Re-adjusting *qpv takes care of the remainder. */
1120 for (i = v_start; i < vectors; i++) {
1121 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1122 for (j = 0; j < rqpv; j++) {
1123 map_vector_to_rxq(adapter, i, rxr_idx);
1124 rxr_idx++;
1125 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001126 }
Auke Kok9a799d72007-09-15 14:07:45 -07001127 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001128 for (i = v_start; i < vectors; i++) {
1129 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1130 for (j = 0; j < tqpv; j++) {
1131 map_vector_to_txq(adapter, i, txr_idx);
1132 txr_idx++;
1133 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001134 }
Auke Kok9a799d72007-09-15 14:07:45 -07001135 }
1136
Auke Kok9a799d72007-09-15 14:07:45 -07001137out:
Auke Kok9a799d72007-09-15 14:07:45 -07001138 return err;
1139}
1140
1141/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001142 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1143 * @adapter: board private structure
1144 *
1145 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1146 * interrupts from the kernel.
1147 **/
1148static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1149{
1150 struct net_device *netdev = adapter->netdev;
1151 irqreturn_t (*handler)(int, void *);
1152 int i, vector, q_vectors, err;
1153
1154 /* Decrement for Other and TCP Timer vectors */
1155 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1156
1157 /* Map the Tx/Rx rings to the vectors we were allotted. */
1158 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1159 if (err)
1160 goto out;
1161
1162#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1163 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1164 &ixgbe_msix_clean_many)
1165 for (vector = 0; vector < q_vectors; vector++) {
1166 handler = SET_HANDLER(&adapter->q_vector[vector]);
1167 sprintf(adapter->name[vector], "%s:v%d-%s",
1168 netdev->name, vector,
1169 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1170 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1171 err = request_irq(adapter->msix_entries[vector].vector,
1172 handler, 0, adapter->name[vector],
1173 &(adapter->q_vector[vector]));
1174 if (err) {
1175 DPRINTK(PROBE, ERR,
1176 "request_irq failed for MSIX interrupt "
1177 "Error: %d\n", err);
1178 goto free_queue_irqs;
1179 }
1180 }
1181
1182 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1183 err = request_irq(adapter->msix_entries[vector].vector,
1184 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1185 if (err) {
1186 DPRINTK(PROBE, ERR,
1187 "request_irq for msix_lsc failed: %d\n", err);
1188 goto free_queue_irqs;
1189 }
1190
1191 return 0;
1192
1193free_queue_irqs:
1194 for (i = vector - 1; i >= 0; i--)
1195 free_irq(adapter->msix_entries[--vector].vector,
1196 &(adapter->q_vector[i]));
1197 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1198 pci_disable_msix(adapter->pdev);
1199 kfree(adapter->msix_entries);
1200 adapter->msix_entries = NULL;
1201out:
1202 return err;
1203}
1204
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001205static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1206{
1207 struct ixgbe_hw *hw = &adapter->hw;
1208 struct ixgbe_q_vector *q_vector = adapter->q_vector;
1209 u8 current_itr;
1210 u32 new_itr = q_vector->eitr;
1211 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1212 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1213
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001214 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1215 q_vector->tx_itr,
1216 tx_ring->total_packets,
1217 tx_ring->total_bytes);
1218 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1219 q_vector->rx_itr,
1220 rx_ring->total_packets,
1221 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001222
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001223 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001224
1225 switch (current_itr) {
1226 /* counts and packets in update_itr are dependent on these numbers */
1227 case lowest_latency:
1228 new_itr = 100000;
1229 break;
1230 case low_latency:
1231 new_itr = 20000; /* aka hwitr = ~200 */
1232 break;
1233 case bulk_latency:
1234 new_itr = 8000;
1235 break;
1236 default:
1237 break;
1238 }
1239
1240 if (new_itr != q_vector->eitr) {
1241 u32 itr_reg;
1242 /* do an exponential smoothing */
1243 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1244 q_vector->eitr = new_itr;
1245 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1246 /* must write high and low 16 bits to reset counter */
1247 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1248 }
1249
1250 return;
1251}
1252
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001253static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1254
1255/**
1256 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001257 * @irq: interrupt number
1258 * @data: pointer to a network interface device structure
1259 * @pt_regs: CPU registers structure
1260 **/
1261static irqreturn_t ixgbe_intr(int irq, void *data)
1262{
1263 struct net_device *netdev = data;
1264 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1265 struct ixgbe_hw *hw = &adapter->hw;
1266 u32 eicr;
1267
Auke Kok9a799d72007-09-15 14:07:45 -07001268
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001269 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1270 * therefore no explict interrupt disable is necessary */
1271 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001272 if (!eicr) {
1273 /* shared interrupt alert!
1274 * make sure interrupts are enabled because the read will
1275 * have disabled interrupts due to EIAM */
1276 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001277 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001278 }
Auke Kok9a799d72007-09-15 14:07:45 -07001279
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001280 if (eicr & IXGBE_EICR_LSC)
1281 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001282
1283 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001284 adapter->tx_ring[0].total_packets = 0;
1285 adapter->tx_ring[0].total_bytes = 0;
1286 adapter->rx_ring[0].total_packets = 0;
1287 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001288 /* would disable interrupts here but EIAM disabled it */
1289 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
Auke Kok9a799d72007-09-15 14:07:45 -07001290 }
1291
1292 return IRQ_HANDLED;
1293}
1294
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001295static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1296{
1297 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1298
1299 for (i = 0; i < q_vectors; i++) {
1300 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1301 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1302 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1303 q_vector->rxr_count = 0;
1304 q_vector->txr_count = 0;
1305 }
1306}
1307
Auke Kok9a799d72007-09-15 14:07:45 -07001308/**
1309 * ixgbe_request_irq - initialize interrupts
1310 * @adapter: board private structure
1311 *
1312 * Attempts to configure interrupts using the best available
1313 * capabilities of the hardware and kernel.
1314 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001315static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001316{
1317 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001318 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001319
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001320 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1321 err = ixgbe_request_msix_irqs(adapter);
1322 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1323 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1324 netdev->name, netdev);
1325 } else {
1326 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1327 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001328 }
1329
Auke Kok9a799d72007-09-15 14:07:45 -07001330 if (err)
1331 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1332
Auke Kok9a799d72007-09-15 14:07:45 -07001333 return err;
1334}
1335
1336static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1337{
1338 struct net_device *netdev = adapter->netdev;
1339
1340 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001341 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001342
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001343 q_vectors = adapter->num_msix_vectors;
1344
1345 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001346 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001347
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001348 i--;
1349 for (; i >= 0; i--) {
1350 free_irq(adapter->msix_entries[i].vector,
1351 &(adapter->q_vector[i]));
1352 }
1353
1354 ixgbe_reset_q_vectors(adapter);
1355 } else {
1356 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001357 }
1358}
1359
1360/**
1361 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1362 * @adapter: board private structure
1363 **/
1364static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1365{
Auke Kok9a799d72007-09-15 14:07:45 -07001366 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1367 IXGBE_WRITE_FLUSH(&adapter->hw);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001368 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1369 int i;
1370 for (i = 0; i < adapter->num_msix_vectors; i++)
1371 synchronize_irq(adapter->msix_entries[i].vector);
1372 } else {
1373 synchronize_irq(adapter->pdev->irq);
1374 }
Auke Kok9a799d72007-09-15 14:07:45 -07001375}
1376
1377/**
1378 * ixgbe_irq_enable - Enable default interrupt generation settings
1379 * @adapter: board private structure
1380 **/
1381static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1382{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001383 u32 mask;
1384 mask = IXGBE_EIMS_ENABLE_MASK;
1385 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001386 IXGBE_WRITE_FLUSH(&adapter->hw);
Auke Kok9a799d72007-09-15 14:07:45 -07001387}
1388
1389/**
1390 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1391 *
1392 **/
1393static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1394{
Auke Kok9a799d72007-09-15 14:07:45 -07001395 struct ixgbe_hw *hw = &adapter->hw;
1396
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001397 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001398 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07001399
1400 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001401 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1402
1403 map_vector_to_rxq(adapter, 0, 0);
1404 map_vector_to_txq(adapter, 0, 0);
1405
1406 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001407}
1408
1409/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001410 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001411 * @adapter: board private structure
1412 *
1413 * Configure the Tx unit of the MAC after a reset.
1414 **/
1415static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1416{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001417 u64 tdba, tdwba;
Auke Kok9a799d72007-09-15 14:07:45 -07001418 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001419 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001420
1421 /* Setup the HW Tx Head and Tail descriptor pointers */
1422 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001423 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1424 j = ring->reg_idx;
1425 tdba = ring->dma;
1426 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001427 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001428 (tdba & DMA_32BIT_MASK));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001429 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001430 tdwba = ring->dma +
1431 (ring->count * sizeof(union ixgbe_adv_tx_desc));
1432 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1433 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1434 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001435 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1436 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1437 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1438 adapter->tx_ring[i].head = IXGBE_TDH(j);
1439 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1440 /* Disable Tx Head Writeback RO bit, since this hoses
1441 * bookkeeping if things aren't delivered in order.
1442 */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001443 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001444 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001445 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001446 }
Auke Kok9a799d72007-09-15 14:07:45 -07001447}
1448
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001449#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07001450
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001451static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1452{
1453 struct ixgbe_ring *rx_ring;
1454 u32 srrctl;
1455 int queue0;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001456 unsigned long mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001457
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001458 /* program one srrctl register per VMDq index */
1459 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
1460 long shift, len;
1461 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1462 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1463 shift = find_first_bit(&mask, len);
1464 queue0 = index & mask;
1465 index = (index & mask) >> shift;
1466 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001467 } else {
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001468 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1469 queue0 = index & mask;
1470 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001471 }
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001472
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001473 rx_ring = &adapter->rx_ring[queue0];
1474
1475 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1476
1477 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1478 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1479
1480 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1481 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1482 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1483 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1484 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1485 IXGBE_SRRCTL_BSIZEHDR_MASK);
1486 } else {
1487 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1488
1489 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1490 srrctl |= IXGBE_RXBUFFER_2048 >>
1491 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1492 else
1493 srrctl |= rx_ring->rx_buf_len >>
1494 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1495 }
1496 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1497}
1498
Auke Kok9a799d72007-09-15 14:07:45 -07001499/**
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001500 * ixgbe_get_skb_hdr - helper function for LRO header processing
1501 * @skb: pointer to sk_buff to be added to LRO packet
1502 * @iphdr: pointer to tcp header structure
1503 * @tcph: pointer to tcp header structure
1504 * @hdr_flags: pointer to header flags
1505 * @priv: private data
1506 **/
1507static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1508 u64 *hdr_flags, void *priv)
1509{
1510 union ixgbe_adv_rx_desc *rx_desc = priv;
1511
1512 /* Verify that this is a valid IPv4 TCP packet */
Jesse Brandeburge9990a92008-08-26 04:27:24 -07001513 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1514 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001515 return -1;
1516
1517 /* Set network headers */
1518 skb_reset_network_header(skb);
1519 skb_set_transport_header(skb, ip_hdrlen(skb));
1520 *iphdr = ip_hdr(skb);
1521 *tcph = tcp_hdr(skb);
1522 *hdr_flags = LRO_IPV4 | LRO_TCP;
1523 return 0;
1524}
1525
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001526#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1527 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1528
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001529/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001530 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001531 * @adapter: board private structure
1532 *
1533 * Configure the Rx unit of the MAC after a reset.
1534 **/
1535static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1536{
1537 u64 rdba;
1538 struct ixgbe_hw *hw = &adapter->hw;
1539 struct net_device *netdev = adapter->netdev;
1540 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001541 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07001542 u32 rdlen, rxctrl, rxcsum;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001543 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1544 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1545 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Auke Kok9a799d72007-09-15 14:07:45 -07001546 u32 fctrl, hlreg0;
Auke Kok9a799d72007-09-15 14:07:45 -07001547 u32 pages;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001548 u32 reta = 0, mrqc;
1549 u32 rdrxctl;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001550 int rx_buf_len;
Auke Kok9a799d72007-09-15 14:07:45 -07001551
1552 /* Decide whether to use packet split mode or not */
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001553 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07001554
1555 /* Set the RX buffer length according to the mode */
1556 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001557 rx_buf_len = IXGBE_RX_HDR_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07001558 } else {
1559 if (netdev->mtu <= ETH_DATA_LEN)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001560 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07001561 else
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001562 rx_buf_len = ALIGN(max_frame, 1024);
Auke Kok9a799d72007-09-15 14:07:45 -07001563 }
1564
1565 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1566 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001567 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
Auke Kok9a799d72007-09-15 14:07:45 -07001568 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1569
1570 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1571 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1572 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1573 else
1574 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1575 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1576
1577 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1578
Auke Kok9a799d72007-09-15 14:07:45 -07001579 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1580 /* disable receives while setting up the descriptors */
1581 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1582 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1583
1584 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1585 * the Base and Length of the Rx Descriptor Ring */
1586 for (i = 0; i < adapter->num_rx_queues; i++) {
1587 rdba = adapter->rx_ring[i].dma;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001588 j = adapter->rx_ring[i].reg_idx;
1589 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
1590 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1591 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1592 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1593 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1594 adapter->rx_ring[i].head = IXGBE_RDH(j);
1595 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1596 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
Jesse Brandeburge9990a92008-08-26 04:27:24 -07001597 /* Intitial LRO Settings */
1598 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1599 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1600 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1601 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1602 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1603 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1604 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1605 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1606 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001607
1608 ixgbe_configure_srrctl(adapter, j);
Auke Kok9a799d72007-09-15 14:07:45 -07001609 }
1610
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001611 /*
1612 * For VMDq support of different descriptor types or
1613 * buffer sizes through the use of multiple SRRCTL
1614 * registers, RDRXCTL.MVMEN must be set to 1
1615 *
1616 * also, the manual doesn't mention it clearly but DCA hints
1617 * will only use queue 0's tags unless this bit is set. Side
1618 * effects of setting this bit are only that SRRCTL must be
1619 * fully programmed [0..15]
1620 */
1621 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1622 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1623 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1624
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001625
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001626 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07001627 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001628 for (i = 0, j = 0; i < 128; i++, j++) {
1629 if (j == adapter->ring_feature[RING_F_RSS].indices)
1630 j = 0;
1631 /* reta = 4-byte sliding window of
1632 * 0x00..(indices-1)(indices-1)00..etc. */
1633 reta = (reta << 8) | (j * 0x11);
1634 if ((i & 3) == 3)
1635 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07001636 }
1637
1638 /* Fill out hash function seeds */
1639 for (i = 0; i < 10; i++)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001640 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07001641
1642 mrqc = IXGBE_MRQC_RSSEN
1643 /* Perform hash on these packet types */
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001644 | IXGBE_MRQC_RSS_FIELD_IPV4
1645 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1646 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1647 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1648 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1649 | IXGBE_MRQC_RSS_FIELD_IPV6
1650 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1651 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1652 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
Auke Kok9a799d72007-09-15 14:07:45 -07001653 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Auke Kok9a799d72007-09-15 14:07:45 -07001654 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001655
1656 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1657
1658 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1659 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1660 /* Disable indicating checksum in descriptor, enables
1661 * RSS hash */
1662 rxcsum |= IXGBE_RXCSUM_PCSD;
1663 }
1664 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1665 /* Enable IPv4 payload checksum for UDP fragments
1666 * if PCSD is not set */
1667 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1668 }
1669
1670 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
Auke Kok9a799d72007-09-15 14:07:45 -07001671}
1672
1673static void ixgbe_vlan_rx_register(struct net_device *netdev,
1674 struct vlan_group *grp)
1675{
1676 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1677 u32 ctrl;
1678
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001679 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1680 ixgbe_irq_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001681 adapter->vlgrp = grp;
1682
1683 if (grp) {
1684 /* enable VLAN tag insert/strip */
1685 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
Patrick McHardy746b9f02008-07-16 20:15:45 -07001686 ctrl |= IXGBE_VLNCTRL_VME;
Auke Kok9a799d72007-09-15 14:07:45 -07001687 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1688 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1689 }
1690
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001691 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1692 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001693}
1694
1695static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1696{
1697 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1698
1699 /* add VID to filter table */
1700 ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1701}
1702
1703static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1704{
1705 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1706
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001707 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1708 ixgbe_irq_disable(adapter);
1709
Auke Kok9a799d72007-09-15 14:07:45 -07001710 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001711
1712 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1713 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001714
1715 /* remove VID from filter table */
1716 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1717}
1718
1719static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1720{
1721 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1722
1723 if (adapter->vlgrp) {
1724 u16 vid;
1725 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1726 if (!vlan_group_get_device(adapter->vlgrp, vid))
1727 continue;
1728 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1729 }
1730 }
1731}
1732
Christopher Leech2c5645c2008-08-26 04:27:02 -07001733static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1734{
1735 struct dev_mc_list *mc_ptr;
1736 u8 *addr = *mc_addr_ptr;
1737 *vmdq = 0;
1738
1739 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1740 if (mc_ptr->next)
1741 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1742 else
1743 *mc_addr_ptr = NULL;
1744
1745 return addr;
1746}
1747
Auke Kok9a799d72007-09-15 14:07:45 -07001748/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07001749 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07001750 * @netdev: network interface device structure
1751 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07001752 * The set_rx_method entry point is called whenever the unicast/multicast
1753 * address list or the network interface flags are updated. This routine is
1754 * responsible for configuring the hardware for proper unicast, multicast and
1755 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07001756 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07001757static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07001758{
1759 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1760 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07001761 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07001762 u8 *addr_list = NULL;
1763 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001764
1765 /* Check for Promiscuous and All Multicast modes */
1766
1767 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07001768 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07001769
1770 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07001771 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001772 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07001773 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07001774 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07001775 if (netdev->flags & IFF_ALLMULTI) {
1776 fctrl |= IXGBE_FCTRL_MPE;
1777 fctrl &= ~IXGBE_FCTRL_UPE;
1778 } else {
1779 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1780 }
Alexander Duyck3d016252008-08-26 18:30:04 -07001781 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07001782 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001783 }
1784
1785 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07001786 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001787
Christopher Leech2c5645c2008-08-26 04:27:02 -07001788 /* reprogram secondary unicast list */
1789 addr_count = netdev->uc_count;
1790 if (addr_count)
1791 addr_list = netdev->uc_list->dmi_addr;
1792 ixgbe_update_uc_addr_list(hw, addr_list, addr_count,
1793 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07001794
Christopher Leech2c5645c2008-08-26 04:27:02 -07001795 /* reprogram multicast list */
1796 addr_count = netdev->mc_count;
1797 if (addr_count)
1798 addr_list = netdev->mc_list->dmi_addr;
1799 ixgbe_update_mc_addr_list(hw, addr_list, addr_count,
1800 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07001801}
1802
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001803static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1804{
1805 int q_idx;
1806 struct ixgbe_q_vector *q_vector;
1807 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1808
1809 /* legacy and MSI only use one vector */
1810 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1811 q_vectors = 1;
1812
1813 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1814 q_vector = &adapter->q_vector[q_idx];
1815 if (!q_vector->rxr_count)
1816 continue;
1817 napi_enable(&q_vector->napi);
1818 }
1819}
1820
1821static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1822{
1823 int q_idx;
1824 struct ixgbe_q_vector *q_vector;
1825 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1826
1827 /* legacy and MSI only use one vector */
1828 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1829 q_vectors = 1;
1830
1831 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1832 q_vector = &adapter->q_vector[q_idx];
1833 if (!q_vector->rxr_count)
1834 continue;
1835 napi_disable(&q_vector->napi);
1836 }
1837}
1838
Auke Kok9a799d72007-09-15 14:07:45 -07001839static void ixgbe_configure(struct ixgbe_adapter *adapter)
1840{
1841 struct net_device *netdev = adapter->netdev;
1842 int i;
1843
Christopher Leech2c5645c2008-08-26 04:27:02 -07001844 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001845
1846 ixgbe_restore_vlan(adapter);
1847
1848 ixgbe_configure_tx(adapter);
1849 ixgbe_configure_rx(adapter);
1850 for (i = 0; i < adapter->num_rx_queues; i++)
1851 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1852 (adapter->rx_ring[i].count - 1));
1853}
1854
1855static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1856{
1857 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07001858 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001859 int i, j = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001860 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001861 u32 txdctl, rxdctl, mhadd;
1862 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07001863
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08001864 ixgbe_get_hw_control(adapter);
1865
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001866 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1867 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07001868 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1869 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1870 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1871 } else {
1872 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001873 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001874 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001875 /* XXX: to interrupt immediately for EICS writes, enable this */
1876 /* gpie |= IXGBE_GPIE_EIMEN; */
1877 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1878 }
1879
1880 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1881 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1882 * specifically only auto mask tx and rx interrupts */
1883 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07001884 }
1885
1886 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07001887 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1888 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1889 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1890
1891 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1892 }
1893
1894 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001895 j = adapter->tx_ring[i].reg_idx;
1896 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001897 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1898 txdctl |= (8 << 16);
Auke Kok9a799d72007-09-15 14:07:45 -07001899 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001900 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07001901 }
1902
1903 for (i = 0; i < adapter->num_rx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001904 j = adapter->rx_ring[i].reg_idx;
1905 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1906 /* enable PTHRESH=32 descriptors (half the internal cache)
1907 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1908 * this also removes a pesky rx_no_buffer_count increment */
1909 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07001910 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001911 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07001912 }
1913 /* enable all receives */
1914 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1915 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1916 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1917
1918 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1919 ixgbe_configure_msix(adapter);
1920 else
1921 ixgbe_configure_msi_and_legacy(adapter);
1922
1923 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001924 ixgbe_napi_enable_all(adapter);
1925
1926 /* clear any pending interrupts, may auto mask */
1927 IXGBE_READ_REG(hw, IXGBE_EICR);
1928
Auke Kok9a799d72007-09-15 14:07:45 -07001929 ixgbe_irq_enable(adapter);
1930
1931 /* bring the link up in the watchdog, this could race with our first
1932 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001933 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1934 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07001935 mod_timer(&adapter->watchdog_timer, jiffies);
1936 return 0;
1937}
1938
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001939void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1940{
1941 WARN_ON(in_interrupt());
1942 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1943 msleep(1);
1944 ixgbe_down(adapter);
1945 ixgbe_up(adapter);
1946 clear_bit(__IXGBE_RESETTING, &adapter->state);
1947}
1948
Auke Kok9a799d72007-09-15 14:07:45 -07001949int ixgbe_up(struct ixgbe_adapter *adapter)
1950{
1951 /* hardware has been reset, we need to reload some things */
1952 ixgbe_configure(adapter);
1953
1954 return ixgbe_up_complete(adapter);
1955}
1956
1957void ixgbe_reset(struct ixgbe_adapter *adapter)
1958{
1959 if (ixgbe_init_hw(&adapter->hw))
1960 DPRINTK(PROBE, ERR, "Hardware Error\n");
1961
1962 /* reprogram the RAR[0] in case user changed it. */
1963 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1964
1965}
1966
1967#ifdef CONFIG_PM
1968static int ixgbe_resume(struct pci_dev *pdev)
1969{
1970 struct net_device *netdev = pci_get_drvdata(pdev);
1971 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001972 u32 err;
Auke Kok9a799d72007-09-15 14:07:45 -07001973
1974 pci_set_power_state(pdev, PCI_D0);
1975 pci_restore_state(pdev);
1976 err = pci_enable_device(pdev);
1977 if (err) {
1978 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1979 "suspend\n");
1980 return err;
1981 }
1982 pci_set_master(pdev);
1983
1984 pci_enable_wake(pdev, PCI_D3hot, 0);
1985 pci_enable_wake(pdev, PCI_D3cold, 0);
1986
1987 if (netif_running(netdev)) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001988 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001989 if (err)
1990 return err;
1991 }
1992
1993 ixgbe_reset(adapter);
1994
1995 if (netif_running(netdev))
1996 ixgbe_up(adapter);
1997
1998 netif_device_attach(netdev);
1999
2000 return 0;
2001}
2002#endif
2003
2004/**
2005 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2006 * @adapter: board private structure
2007 * @rx_ring: ring to free buffers from
2008 **/
2009static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2010 struct ixgbe_ring *rx_ring)
2011{
2012 struct pci_dev *pdev = adapter->pdev;
2013 unsigned long size;
2014 unsigned int i;
2015
2016 /* Free all the Rx ring sk_buffs */
2017
2018 for (i = 0; i < rx_ring->count; i++) {
2019 struct ixgbe_rx_buffer *rx_buffer_info;
2020
2021 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2022 if (rx_buffer_info->dma) {
2023 pci_unmap_single(pdev, rx_buffer_info->dma,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07002024 rx_ring->rx_buf_len,
Auke Kok9a799d72007-09-15 14:07:45 -07002025 PCI_DMA_FROMDEVICE);
2026 rx_buffer_info->dma = 0;
2027 }
2028 if (rx_buffer_info->skb) {
2029 dev_kfree_skb(rx_buffer_info->skb);
2030 rx_buffer_info->skb = NULL;
2031 }
2032 if (!rx_buffer_info->page)
2033 continue;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002034 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
2035 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002036 rx_buffer_info->page_dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002037 put_page(rx_buffer_info->page);
2038 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002039 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002040 }
2041
2042 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2043 memset(rx_ring->rx_buffer_info, 0, size);
2044
2045 /* Zero out the descriptor ring */
2046 memset(rx_ring->desc, 0, rx_ring->size);
2047
2048 rx_ring->next_to_clean = 0;
2049 rx_ring->next_to_use = 0;
2050
2051 writel(0, adapter->hw.hw_addr + rx_ring->head);
2052 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2053}
2054
2055/**
2056 * ixgbe_clean_tx_ring - Free Tx Buffers
2057 * @adapter: board private structure
2058 * @tx_ring: ring to be cleaned
2059 **/
2060static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
2061 struct ixgbe_ring *tx_ring)
2062{
2063 struct ixgbe_tx_buffer *tx_buffer_info;
2064 unsigned long size;
2065 unsigned int i;
2066
2067 /* Free all the Tx ring sk_buffs */
2068
2069 for (i = 0; i < tx_ring->count; i++) {
2070 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2071 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2072 }
2073
2074 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2075 memset(tx_ring->tx_buffer_info, 0, size);
2076
2077 /* Zero out the descriptor ring */
2078 memset(tx_ring->desc, 0, tx_ring->size);
2079
2080 tx_ring->next_to_use = 0;
2081 tx_ring->next_to_clean = 0;
2082
2083 writel(0, adapter->hw.hw_addr + tx_ring->head);
2084 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2085}
2086
2087/**
Auke Kok9a799d72007-09-15 14:07:45 -07002088 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2089 * @adapter: board private structure
2090 **/
2091static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2092{
2093 int i;
2094
2095 for (i = 0; i < adapter->num_rx_queues; i++)
2096 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2097}
2098
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002099/**
2100 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2101 * @adapter: board private structure
2102 **/
2103static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2104{
2105 int i;
2106
2107 for (i = 0; i < adapter->num_tx_queues; i++)
2108 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2109}
2110
Auke Kok9a799d72007-09-15 14:07:45 -07002111void ixgbe_down(struct ixgbe_adapter *adapter)
2112{
2113 struct net_device *netdev = adapter->netdev;
2114 u32 rxctrl;
2115
2116 /* signal that we are down to the interrupt handler */
2117 set_bit(__IXGBE_DOWN, &adapter->state);
2118
2119 /* disable receives */
2120 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2122 rxctrl & ~IXGBE_RXCTRL_RXEN);
2123
2124 netif_tx_disable(netdev);
2125
2126 /* disable transmits in the hardware */
2127
2128 /* flush both disables */
2129 IXGBE_WRITE_FLUSH(&adapter->hw);
2130 msleep(10);
2131
2132 ixgbe_irq_disable(adapter);
2133
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002134 ixgbe_napi_disable_all(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002135 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002136 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07002137
2138 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002139 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002140
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07002141#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002142 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2143 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2144 dca_remove_requester(&adapter->pdev->dev);
2145 }
2146
2147#endif
Paul Larson6f4a0e42008-06-24 17:00:56 -07002148 if (!pci_channel_offline(adapter->pdev))
2149 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002150 ixgbe_clean_all_tx_rings(adapter);
2151 ixgbe_clean_all_rx_rings(adapter);
2152
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07002153#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002154 /* since we reset the hardware DCA settings were cleared */
2155 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2156 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2157 /* always use CB2 mode, difference is masked
2158 * in the CB driver */
2159 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
2160 ixgbe_setup_dca(adapter);
2161 }
2162#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002163}
2164
2165static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2166{
2167 struct net_device *netdev = pci_get_drvdata(pdev);
2168 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2169#ifdef CONFIG_PM
2170 int retval = 0;
2171#endif
2172
2173 netif_device_detach(netdev);
2174
2175 if (netif_running(netdev)) {
2176 ixgbe_down(adapter);
2177 ixgbe_free_irq(adapter);
2178 }
2179
2180#ifdef CONFIG_PM
2181 retval = pci_save_state(pdev);
2182 if (retval)
2183 return retval;
2184#endif
2185
2186 pci_enable_wake(pdev, PCI_D3hot, 0);
2187 pci_enable_wake(pdev, PCI_D3cold, 0);
2188
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002189 ixgbe_release_hw_control(adapter);
2190
Auke Kok9a799d72007-09-15 14:07:45 -07002191 pci_disable_device(pdev);
2192
2193 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2194
2195 return 0;
2196}
2197
2198static void ixgbe_shutdown(struct pci_dev *pdev)
2199{
2200 ixgbe_suspend(pdev, PMSG_SUSPEND);
2201}
2202
2203/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002204 * ixgbe_poll - NAPI Rx polling callback
2205 * @napi: structure for representing this polling device
2206 * @budget: how many packets driver is allowed to clean
2207 *
2208 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07002209 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002210static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07002211{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002212 struct ixgbe_q_vector *q_vector = container_of(napi,
2213 struct ixgbe_q_vector, napi);
2214 struct ixgbe_adapter *adapter = q_vector->adapter;
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002215 int tx_cleaned = 0, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002216
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07002217#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08002218 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2219 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2220 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2221 }
2222#endif
2223
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002224 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002225 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07002226
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002227 if (tx_cleaned)
2228 work_done = budget;
2229
David S. Miller53e52c72008-01-07 21:06:12 -08002230 /* If budget not fully consumed, exit the polling mode */
2231 if (work_done < budget) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002232 netif_rx_complete(adapter->netdev, napi);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002233 if (adapter->itr_setting & 3)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002234 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002235 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2236 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002237 }
2238
2239 return work_done;
2240}
2241
2242/**
2243 * ixgbe_tx_timeout - Respond to a Tx Hang
2244 * @netdev: network interface device structure
2245 **/
2246static void ixgbe_tx_timeout(struct net_device *netdev)
2247{
2248 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2249
2250 /* Do the reset outside of interrupt context */
2251 schedule_work(&adapter->reset_task);
2252}
2253
2254static void ixgbe_reset_task(struct work_struct *work)
2255{
2256 struct ixgbe_adapter *adapter;
2257 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2258
2259 adapter->tx_timeout_count++;
2260
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002261 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002262}
2263
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002264static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2265 int vectors)
2266{
2267 int err, vector_threshold;
2268
2269 /* We'll want at least 3 (vector_threshold):
2270 * 1) TxQ[0] Cleanup
2271 * 2) RxQ[0] Cleanup
2272 * 3) Other (Link Status Change, etc.)
2273 * 4) TCP Timer (optional)
2274 */
2275 vector_threshold = MIN_MSIX_COUNT;
2276
2277 /* The more we get, the more we will assign to Tx/Rx Cleanup
2278 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2279 * Right now, we simply care about how many we'll get; we'll
2280 * set them up later while requesting irq's.
2281 */
2282 while (vectors >= vector_threshold) {
2283 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2284 vectors);
2285 if (!err) /* Success in acquiring all requested vectors. */
2286 break;
2287 else if (err < 0)
2288 vectors = 0; /* Nasty failure, quit now */
2289 else /* err == number of vectors we should try again with */
2290 vectors = err;
2291 }
2292
2293 if (vectors < vector_threshold) {
2294 /* Can't allocate enough MSI-X interrupts? Oh well.
2295 * This just means we'll go with either a single MSI
2296 * vector or fall back to legacy interrupts.
2297 */
2298 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2299 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2300 kfree(adapter->msix_entries);
2301 adapter->msix_entries = NULL;
2302 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2303 adapter->num_tx_queues = 1;
2304 adapter->num_rx_queues = 1;
2305 } else {
2306 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2307 adapter->num_msix_vectors = vectors;
2308 }
2309}
2310
2311static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2312{
2313 int nrq, ntq;
2314 int feature_mask = 0, rss_i, rss_m;
2315
2316 /* Number of supported queues */
2317 switch (adapter->hw.mac.type) {
2318 case ixgbe_mac_82598EB:
2319 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2320 rss_m = 0;
2321 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2322
2323 switch (adapter->flags & feature_mask) {
2324 case (IXGBE_FLAG_RSS_ENABLED):
2325 rss_m = 0xF;
2326 nrq = rss_i;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08002327 ntq = rss_i;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002328 break;
2329 case 0:
2330 default:
2331 rss_i = 0;
2332 rss_m = 0;
2333 nrq = 1;
2334 ntq = 1;
2335 break;
2336 }
2337
2338 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2339 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2340 break;
2341 default:
2342 nrq = 1;
2343 ntq = 1;
2344 break;
2345 }
2346
2347 adapter->num_rx_queues = nrq;
2348 adapter->num_tx_queues = ntq;
2349}
2350
2351/**
2352 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2353 * @adapter: board private structure to initialize
2354 *
2355 * Once we know the feature-set enabled for the device, we'll cache
2356 * the register offset the descriptor ring is assigned to.
2357 **/
2358static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2359{
2360 /* TODO: Remove all uses of the indices in the cases where multiple
2361 * features are OR'd together, if the feature set makes sense.
2362 */
2363 int feature_mask = 0, rss_i;
2364 int i, txr_idx, rxr_idx;
2365
2366 /* Number of supported queues */
2367 switch (adapter->hw.mac.type) {
2368 case ixgbe_mac_82598EB:
2369 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2370 txr_idx = 0;
2371 rxr_idx = 0;
2372 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2373 switch (adapter->flags & feature_mask) {
2374 case (IXGBE_FLAG_RSS_ENABLED):
2375 for (i = 0; i < adapter->num_rx_queues; i++)
2376 adapter->rx_ring[i].reg_idx = i;
2377 for (i = 0; i < adapter->num_tx_queues; i++)
2378 adapter->tx_ring[i].reg_idx = i;
2379 break;
2380 case 0:
2381 default:
2382 break;
2383 }
2384 break;
2385 default:
2386 break;
2387 }
2388}
2389
Auke Kok9a799d72007-09-15 14:07:45 -07002390/**
2391 * ixgbe_alloc_queues - Allocate memory for all rings
2392 * @adapter: board private structure to initialize
2393 *
2394 * We allocate one ring per queue at run-time since we don't know the
2395 * number of queues at compile-time. The polling_netdev array is
2396 * intended for Multiqueue, but should work fine with a single queue.
2397 **/
2398static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2399{
2400 int i;
2401
2402 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2403 sizeof(struct ixgbe_ring), GFP_KERNEL);
2404 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002405 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07002406
2407 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2408 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002409 if (!adapter->rx_ring)
2410 goto err_rx_ring_allocation;
2411
2412 for (i = 0; i < adapter->num_tx_queues; i++) {
2413 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2414 adapter->tx_ring[i].queue_index = i;
2415 }
2416 for (i = 0; i < adapter->num_rx_queues; i++) {
2417 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2418 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07002419 }
2420
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002421 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002422
2423 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002424
2425err_rx_ring_allocation:
2426 kfree(adapter->tx_ring);
2427err_tx_ring_allocation:
2428 return -ENOMEM;
2429}
2430
2431/**
2432 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2433 * @adapter: board private structure to initialize
2434 *
2435 * Attempt to configure the interrupts using the best available
2436 * capabilities of the hardware and the kernel.
2437 **/
2438static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2439 *adapter)
2440{
2441 int err = 0;
2442 int vector, v_budget;
2443
2444 /*
2445 * It's easy to be greedy for MSI-X vectors, but it really
2446 * doesn't do us much good if we have a lot more vectors
2447 * than CPU's. So let's be conservative and only ask for
2448 * (roughly) twice the number of vectors as there are CPU's.
2449 */
2450 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2451 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2452
2453 /*
2454 * At the same time, hardware can only support a maximum of
2455 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2456 * we can easily reach upwards of 64 Rx descriptor queues and
2457 * 32 Tx queues. Thus, we cap it off in those rare cases where
2458 * the cpu count also exceeds our vector limit.
2459 */
2460 v_budget = min(v_budget, MAX_MSIX_COUNT);
2461
2462 /* A failure in MSI-X entry allocation isn't fatal, but it does
2463 * mean we disable MSI-X capabilities of the adapter. */
2464 adapter->msix_entries = kcalloc(v_budget,
2465 sizeof(struct msix_entry), GFP_KERNEL);
2466 if (!adapter->msix_entries) {
2467 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2468 ixgbe_set_num_queues(adapter);
2469 kfree(adapter->tx_ring);
2470 kfree(adapter->rx_ring);
2471 err = ixgbe_alloc_queues(adapter);
2472 if (err) {
2473 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2474 "for queues\n");
2475 goto out;
2476 }
2477
2478 goto try_msi;
2479 }
2480
2481 for (vector = 0; vector < v_budget; vector++)
2482 adapter->msix_entries[vector].entry = vector;
2483
2484 ixgbe_acquire_msix_vectors(adapter, v_budget);
2485
2486 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2487 goto out;
2488
2489try_msi:
2490 err = pci_enable_msi(adapter->pdev);
2491 if (!err) {
2492 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2493 } else {
2494 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2495 "falling back to legacy. Error: %d\n", err);
2496 /* reset err */
2497 err = 0;
2498 }
2499
2500out:
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08002501 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002502 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002503
2504 return err;
2505}
2506
2507static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2508{
2509 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2510 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2511 pci_disable_msix(adapter->pdev);
2512 kfree(adapter->msix_entries);
2513 adapter->msix_entries = NULL;
2514 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2515 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2516 pci_disable_msi(adapter->pdev);
2517 }
2518 return;
2519}
2520
2521/**
2522 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2523 * @adapter: board private structure to initialize
2524 *
2525 * We determine which interrupt scheme to use based on...
2526 * - Kernel support (MSI, MSI-X)
2527 * - which can be user-defined (via MODULE_PARAM)
2528 * - Hardware queue count (num_*_queues)
2529 * - defined by miscellaneous hardware support/features (RSS, etc.)
2530 **/
2531static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2532{
2533 int err;
2534
2535 /* Number of supported queues */
2536 ixgbe_set_num_queues(adapter);
2537
2538 err = ixgbe_alloc_queues(adapter);
2539 if (err) {
2540 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2541 goto err_alloc_queues;
2542 }
2543
2544 err = ixgbe_set_interrupt_capability(adapter);
2545 if (err) {
2546 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2547 goto err_set_interrupt;
2548 }
2549
2550 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2551 "Tx Queue count = %u\n",
2552 (adapter->num_rx_queues > 1) ? "Enabled" :
2553 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2554
2555 set_bit(__IXGBE_DOWN, &adapter->state);
2556
2557 return 0;
2558
2559err_set_interrupt:
2560 kfree(adapter->tx_ring);
2561 kfree(adapter->rx_ring);
2562err_alloc_queues:
2563 return err;
Auke Kok9a799d72007-09-15 14:07:45 -07002564}
2565
2566/**
2567 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2568 * @adapter: board private structure to initialize
2569 *
2570 * ixgbe_sw_init initializes the Adapter private data structure.
2571 * Fields are initialized based on PCI device information and
2572 * OS network device settings (MTU size).
2573 **/
2574static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2575{
2576 struct ixgbe_hw *hw = &adapter->hw;
2577 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002578 unsigned int rss;
2579
2580 /* Set capability flags */
2581 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2582 adapter->ring_feature[RING_F_RSS].indices = rss;
2583 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07002584
2585 /* default flow control settings */
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07002586 hw->fc.original_type = ixgbe_fc_none;
2587 hw->fc.type = ixgbe_fc_none;
2588 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2589 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2590 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2591 hw->fc.send_xon = true;
Auke Kok9a799d72007-09-15 14:07:45 -07002592
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002593 /* select 10G link by default */
Auke Kok9a799d72007-09-15 14:07:45 -07002594 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2595 if (hw->mac.ops.reset(hw)) {
2596 dev_err(&pdev->dev, "HW Init failed\n");
2597 return -EIO;
2598 }
Auke Kok3957d632007-10-31 15:22:10 -07002599 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
2600 false)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002601 dev_err(&pdev->dev, "Link Speed setup failed\n");
2602 return -EIO;
2603 }
2604
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002605 /* enable itr by default in dynamic mode */
2606 adapter->itr_setting = 1;
2607 adapter->eitr_param = 20000;
2608
2609 /* set defaults for eitr in MegaBytes */
2610 adapter->eitr_low = 10;
2611 adapter->eitr_high = 20;
2612
2613 /* set default ring sizes */
2614 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
2615 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
2616
Auke Kok9a799d72007-09-15 14:07:45 -07002617 /* initialize eeprom parameters */
2618 if (ixgbe_init_eeprom(hw)) {
2619 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2620 return -EIO;
2621 }
2622
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002623 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07002624 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2625
Auke Kok9a799d72007-09-15 14:07:45 -07002626 set_bit(__IXGBE_DOWN, &adapter->state);
2627
2628 return 0;
2629}
2630
2631/**
2632 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2633 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002634 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07002635 *
2636 * Return 0 on success, negative on failure
2637 **/
2638int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002639 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002640{
2641 struct pci_dev *pdev = adapter->pdev;
2642 int size;
2643
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002644 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2645 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002646 if (!tx_ring->tx_buffer_info)
2647 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002648 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07002649
2650 /* round up to nearest 4K */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002651 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
2652 sizeof(u32);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002653 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07002654
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002655 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2656 &tx_ring->dma);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002657 if (!tx_ring->desc)
2658 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07002659
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002660 tx_ring->next_to_use = 0;
2661 tx_ring->next_to_clean = 0;
2662 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07002663 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002664
2665err:
2666 vfree(tx_ring->tx_buffer_info);
2667 tx_ring->tx_buffer_info = NULL;
2668 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
2669 "descriptor ring\n");
2670 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07002671}
2672
2673/**
2674 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2675 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002676 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07002677 *
2678 * Returns 0 on success, negative on failure
2679 **/
2680int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002681 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002682{
2683 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002684 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07002685
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002686 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002687 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2688 if (!rx_ring->lro_mgr.lro_arr)
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002689 return -ENOMEM;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002690 memset(rx_ring->lro_mgr.lro_arr, 0, size);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002691
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002692 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2693 rx_ring->rx_buffer_info = vmalloc(size);
2694 if (!rx_ring->rx_buffer_info) {
Auke Kok9a799d72007-09-15 14:07:45 -07002695 DPRINTK(PROBE, ERR,
2696 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002697 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07002698 }
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002699 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07002700
Auke Kok9a799d72007-09-15 14:07:45 -07002701 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002702 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2703 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07002704
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002705 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07002706
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002707 if (!rx_ring->desc) {
Auke Kok9a799d72007-09-15 14:07:45 -07002708 DPRINTK(PROBE, ERR,
2709 "Memory allocation failed for the rx desc ring\n");
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002710 vfree(rx_ring->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002711 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07002712 }
2713
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002714 rx_ring->next_to_clean = 0;
2715 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002716
2717 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002718
2719alloc_failed:
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002720 vfree(rx_ring->lro_mgr.lro_arr);
2721 rx_ring->lro_mgr.lro_arr = NULL;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002722 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07002723}
2724
2725/**
2726 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2727 * @adapter: board private structure
2728 * @tx_ring: Tx descriptor ring for a specific queue
2729 *
2730 * Free all transmit software resources
2731 **/
2732static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002733 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002734{
2735 struct pci_dev *pdev = adapter->pdev;
2736
2737 ixgbe_clean_tx_ring(adapter, tx_ring);
2738
2739 vfree(tx_ring->tx_buffer_info);
2740 tx_ring->tx_buffer_info = NULL;
2741
2742 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2743
2744 tx_ring->desc = NULL;
2745}
2746
2747/**
2748 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2749 * @adapter: board private structure
2750 *
2751 * Free all transmit software resources
2752 **/
2753static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2754{
2755 int i;
2756
2757 for (i = 0; i < adapter->num_tx_queues; i++)
2758 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2759}
2760
2761/**
2762 * ixgbe_free_rx_resources - Free Rx Resources
2763 * @adapter: board private structure
2764 * @rx_ring: ring to clean the resources from
2765 *
2766 * Free all receive software resources
2767 **/
2768static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2769 struct ixgbe_ring *rx_ring)
2770{
2771 struct pci_dev *pdev = adapter->pdev;
2772
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002773 vfree(rx_ring->lro_mgr.lro_arr);
2774 rx_ring->lro_mgr.lro_arr = NULL;
2775
Auke Kok9a799d72007-09-15 14:07:45 -07002776 ixgbe_clean_rx_ring(adapter, rx_ring);
2777
2778 vfree(rx_ring->rx_buffer_info);
2779 rx_ring->rx_buffer_info = NULL;
2780
2781 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2782
2783 rx_ring->desc = NULL;
2784}
2785
2786/**
2787 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2788 * @adapter: board private structure
2789 *
2790 * Free all receive software resources
2791 **/
2792static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2793{
2794 int i;
2795
2796 for (i = 0; i < adapter->num_rx_queues; i++)
2797 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2798}
2799
2800/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002801 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
Auke Kok9a799d72007-09-15 14:07:45 -07002802 * @adapter: board private structure
2803 *
2804 * If this function returns with an error, then it's possible one or
2805 * more of the rings is populated (while the rest are not). It is the
2806 * callers duty to clean those orphaned rings.
2807 *
2808 * Return 0 on success, negative on failure
2809 **/
2810static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2811{
2812 int i, err = 0;
2813
2814 for (i = 0; i < adapter->num_tx_queues; i++) {
2815 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2816 if (err) {
2817 DPRINTK(PROBE, ERR,
2818 "Allocation for Tx Queue %u failed\n", i);
2819 break;
2820 }
2821 }
2822
2823 return err;
2824}
2825
2826/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002827 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
Auke Kok9a799d72007-09-15 14:07:45 -07002828 * @adapter: board private structure
2829 *
2830 * If this function returns with an error, then it's possible one or
2831 * more of the rings is populated (while the rest are not). It is the
2832 * callers duty to clean those orphaned rings.
2833 *
2834 * Return 0 on success, negative on failure
2835 **/
2836
2837static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2838{
2839 int i, err = 0;
2840
2841 for (i = 0; i < adapter->num_rx_queues; i++) {
2842 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2843 if (err) {
2844 DPRINTK(PROBE, ERR,
2845 "Allocation for Rx Queue %u failed\n", i);
2846 break;
2847 }
2848 }
2849
2850 return err;
2851}
2852
2853/**
2854 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2855 * @netdev: network interface device structure
2856 * @new_mtu: new value for maximum frame size
2857 *
2858 * Returns 0 on success, negative on failure
2859 **/
2860static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2861{
2862 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2863 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2864
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07002865 /* MTU < 68 is an error and causes problems on some kernels */
2866 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07002867 return -EINVAL;
2868
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002869 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2870 netdev->mtu, new_mtu);
2871 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07002872 netdev->mtu = new_mtu;
2873
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002874 if (netif_running(netdev))
2875 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002876
2877 return 0;
2878}
2879
2880/**
2881 * ixgbe_open - Called when a network interface is made active
2882 * @netdev: network interface device structure
2883 *
2884 * Returns 0 on success, negative value on failure
2885 *
2886 * The open entry point is called when a network interface is made
2887 * active by the system (IFF_UP). At this point all resources needed
2888 * for transmit and receive operations are allocated, the interrupt
2889 * handler is registered with the OS, the watchdog timer is started,
2890 * and the stack is notified that the interface is ready.
2891 **/
2892static int ixgbe_open(struct net_device *netdev)
2893{
2894 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2895 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002896
Auke Kok4bebfaa2008-02-11 09:26:01 -08002897 /* disallow open during test */
2898 if (test_bit(__IXGBE_TESTING, &adapter->state))
2899 return -EBUSY;
2900
Auke Kok9a799d72007-09-15 14:07:45 -07002901 /* allocate transmit descriptors */
2902 err = ixgbe_setup_all_tx_resources(adapter);
2903 if (err)
2904 goto err_setup_tx;
2905
Auke Kok9a799d72007-09-15 14:07:45 -07002906 /* allocate receive descriptors */
2907 err = ixgbe_setup_all_rx_resources(adapter);
2908 if (err)
2909 goto err_setup_rx;
2910
2911 ixgbe_configure(adapter);
2912
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002913 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002914 if (err)
2915 goto err_req_irq;
2916
Auke Kok9a799d72007-09-15 14:07:45 -07002917 err = ixgbe_up_complete(adapter);
2918 if (err)
2919 goto err_up;
2920
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002921 netif_tx_start_all_queues(netdev);
2922
Auke Kok9a799d72007-09-15 14:07:45 -07002923 return 0;
2924
2925err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002926 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002927 ixgbe_free_irq(adapter);
2928err_req_irq:
2929 ixgbe_free_all_rx_resources(adapter);
2930err_setup_rx:
2931 ixgbe_free_all_tx_resources(adapter);
2932err_setup_tx:
2933 ixgbe_reset(adapter);
2934
2935 return err;
2936}
2937
2938/**
2939 * ixgbe_close - Disables a network interface
2940 * @netdev: network interface device structure
2941 *
2942 * Returns 0, this is not allowed to fail
2943 *
2944 * The close entry point is called when an interface is de-activated
2945 * by the OS. The hardware is still under the drivers control, but
2946 * needs to be disabled. A global MAC reset is issued to stop the
2947 * hardware, and all transmit and receive resources are freed.
2948 **/
2949static int ixgbe_close(struct net_device *netdev)
2950{
2951 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002952
2953 ixgbe_down(adapter);
2954 ixgbe_free_irq(adapter);
2955
2956 ixgbe_free_all_tx_resources(adapter);
2957 ixgbe_free_all_rx_resources(adapter);
2958
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002959 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002960
2961 return 0;
2962}
2963
2964/**
2965 * ixgbe_update_stats - Update the board statistics counters.
2966 * @adapter: board private structure
2967 **/
2968void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2969{
2970 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002971 u64 total_mpc = 0;
2972 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07002973
2974 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002975 for (i = 0; i < 8; i++) {
2976 /* for packet buffers not used, the register should read 0 */
2977 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2978 missed_rx += mpc;
2979 adapter->stats.mpc[i] += mpc;
2980 total_mpc += adapter->stats.mpc[i];
2981 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2982 }
2983 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2984 /* work around hardware counting issue */
2985 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07002986
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002987 /* 82598 hardware only has a 32 bit counter in the high register */
Auke Kok9a799d72007-09-15 14:07:45 -07002988 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002989 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2990 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
Auke Kok9a799d72007-09-15 14:07:45 -07002991 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2992 adapter->stats.bprc += bprc;
2993 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2994 adapter->stats.mprc -= bprc;
2995 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2996 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2997 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2998 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2999 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3000 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3001 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07003002 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3003 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
Auke Kok9a799d72007-09-15 14:07:45 -07003004 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003005 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3006 adapter->stats.lxontxc += lxon;
3007 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3008 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07003009 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3010 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003011 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3012 /*
3013 * 82598 errata - tx of flow control packets is included in tx counters
3014 */
3015 xon_off_tot = lxon + lxoff;
3016 adapter->stats.gptc -= xon_off_tot;
3017 adapter->stats.mptc -= xon_off_tot;
3018 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07003019 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3020 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3021 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07003022 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3023 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003024 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07003025 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3026 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3027 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3028 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3029 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07003030 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3031
3032 /* Fill out the OS statistics structure */
Auke Kok9a799d72007-09-15 14:07:45 -07003033 adapter->net_stats.multicast = adapter->stats.mprc;
3034
3035 /* Rx Errors */
3036 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
3037 adapter->stats.rlec;
3038 adapter->net_stats.rx_dropped = 0;
3039 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3040 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003041 adapter->net_stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07003042}
3043
3044/**
3045 * ixgbe_watchdog - Timer Call-back
3046 * @data: pointer to adapter cast into an unsigned long
3047 **/
3048static void ixgbe_watchdog(unsigned long data)
3049{
3050 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003051 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003052
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003053 /* Do the watchdog outside of interrupt context due to the lovely
3054 * delays that some of the newer hardware requires */
3055 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3056 /* Cause software interrupt to ensure rx rings are cleaned */
3057 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3058 u32 eics =
3059 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3060 IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3061 } else {
3062 /* For legacy and MSI interrupts don't set any bits that
3063 * are enabled for EIAM, because this operation would
3064 * set *both* EIMS and EICS for any bit in EIAM */
3065 IXGBE_WRITE_REG(hw, IXGBE_EICS,
3066 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3067 }
3068 /* Reset the timer */
3069 mod_timer(&adapter->watchdog_timer,
3070 round_jiffies(jiffies + 2 * HZ));
3071 }
3072
3073 schedule_work(&adapter->watchdog_task);
3074}
3075
3076/**
3077 * ixgbe_watchdog_task - worker thread to bring link up
3078 * @work: pointer to work_struct containing our data
3079 **/
3080static void ixgbe_watchdog_task(struct work_struct *work)
3081{
3082 struct ixgbe_adapter *adapter = container_of(work,
3083 struct ixgbe_adapter,
3084 watchdog_task);
3085 struct net_device *netdev = adapter->netdev;
3086 struct ixgbe_hw *hw = &adapter->hw;
3087 u32 link_speed = adapter->link_speed;
3088 bool link_up = adapter->link_up;
3089
3090 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3091
3092 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3093 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3094 if (link_up ||
3095 time_after(jiffies, (adapter->link_check_timeout +
3096 IXGBE_TRY_LINK_TIMEOUT))) {
3097 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3098 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3099 }
3100 adapter->link_up = link_up;
3101 adapter->link_speed = link_speed;
3102 }
Auke Kok9a799d72007-09-15 14:07:45 -07003103
3104 if (link_up) {
3105 if (!netif_carrier_ok(netdev)) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003106 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3107 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Auke Kok9a799d72007-09-15 14:07:45 -07003108#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3109#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
3110 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003111 "Flow Control: %s\n",
3112 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
3113 "10 Gbps" :
3114 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3115 "1 Gbps" : "unknown speed")),
3116 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
3117 (FLOW_RX ? "RX" :
3118 (FLOW_TX ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07003119
3120 netif_carrier_on(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003121 netif_tx_wake_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003122 } else {
3123 /* Force detection of hung controller */
3124 adapter->detect_tx_hung = true;
3125 }
3126 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003127 adapter->link_up = false;
3128 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003129 if (netif_carrier_ok(netdev)) {
3130 DPRINTK(LINK, INFO, "NIC Link is Down\n");
3131 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003132 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003133 }
3134 }
3135
3136 ixgbe_update_stats(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003137 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
Auke Kok9a799d72007-09-15 14:07:45 -07003138}
3139
Auke Kok9a799d72007-09-15 14:07:45 -07003140static int ixgbe_tso(struct ixgbe_adapter *adapter,
3141 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
3142 u32 tx_flags, u8 *hdr_len)
3143{
3144 struct ixgbe_adv_tx_context_desc *context_desc;
3145 unsigned int i;
3146 int err;
3147 struct ixgbe_tx_buffer *tx_buffer_info;
3148 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3149 u32 mss_l4len_idx = 0, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07003150
3151 if (skb_is_gso(skb)) {
3152 if (skb_header_cloned(skb)) {
3153 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3154 if (err)
3155 return err;
3156 }
3157 l4len = tcp_hdrlen(skb);
3158 *hdr_len += l4len;
3159
Al Viro8327d002007-12-10 18:54:12 +00003160 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003161 struct iphdr *iph = ip_hdr(skb);
3162 iph->tot_len = 0;
3163 iph->check = 0;
3164 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3165 iph->daddr, 0,
3166 IPPROTO_TCP,
3167 0);
3168 adapter->hw_tso_ctxt++;
3169 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3170 ipv6_hdr(skb)->payload_len = 0;
3171 tcp_hdr(skb)->check =
3172 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3173 &ipv6_hdr(skb)->daddr,
3174 0, IPPROTO_TCP, 0);
3175 adapter->hw_tso6_ctxt++;
3176 }
3177
3178 i = tx_ring->next_to_use;
3179
3180 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3181 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3182
3183 /* VLAN MACLEN IPLEN */
3184 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3185 vlan_macip_lens |=
3186 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3187 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3188 IXGBE_ADVTXD_MACLEN_SHIFT);
3189 *hdr_len += skb_network_offset(skb);
3190 vlan_macip_lens |=
3191 (skb_transport_header(skb) - skb_network_header(skb));
3192 *hdr_len +=
3193 (skb_transport_header(skb) - skb_network_header(skb));
3194 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3195 context_desc->seqnum_seed = 0;
3196
3197 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3198 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3199 IXGBE_ADVTXD_DTYP_CTXT);
3200
Al Viro8327d002007-12-10 18:54:12 +00003201 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07003202 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3203 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3204 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3205
3206 /* MSS L4LEN IDX */
3207 mss_l4len_idx |=
3208 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3209 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07003210 /* use index 1 for TSO */
3211 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07003212 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3213
3214 tx_buffer_info->time_stamp = jiffies;
3215 tx_buffer_info->next_to_watch = i;
3216
3217 i++;
3218 if (i == tx_ring->count)
3219 i = 0;
3220 tx_ring->next_to_use = i;
3221
3222 return true;
3223 }
3224 return false;
3225}
3226
3227static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3228 struct ixgbe_ring *tx_ring,
3229 struct sk_buff *skb, u32 tx_flags)
3230{
3231 struct ixgbe_adv_tx_context_desc *context_desc;
3232 unsigned int i;
3233 struct ixgbe_tx_buffer *tx_buffer_info;
3234 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3235
3236 if (skb->ip_summed == CHECKSUM_PARTIAL ||
3237 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3238 i = tx_ring->next_to_use;
3239 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3240 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3241
3242 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3243 vlan_macip_lens |=
3244 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3245 vlan_macip_lens |= (skb_network_offset(skb) <<
3246 IXGBE_ADVTXD_MACLEN_SHIFT);
3247 if (skb->ip_summed == CHECKSUM_PARTIAL)
3248 vlan_macip_lens |= (skb_transport_header(skb) -
3249 skb_network_header(skb));
3250
3251 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3252 context_desc->seqnum_seed = 0;
3253
3254 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3255 IXGBE_ADVTXD_DTYP_CTXT);
3256
3257 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08003258 switch (skb->protocol) {
3259 case __constant_htons(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07003260 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08003261 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3262 type_tucmd_mlhl |=
3263 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3264 break;
Auke Kok9a799d72007-09-15 14:07:45 -07003265
Auke Kok41825d72008-02-12 15:20:33 -08003266 case __constant_htons(ETH_P_IPV6):
3267 /* XXX what about other V6 headers?? */
3268 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3269 type_tucmd_mlhl |=
3270 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3271 break;
3272
3273 default:
3274 if (unlikely(net_ratelimit())) {
3275 DPRINTK(PROBE, WARNING,
3276 "partial checksum but proto=%x!\n",
3277 skb->protocol);
3278 }
3279 break;
3280 }
Auke Kok9a799d72007-09-15 14:07:45 -07003281 }
3282
3283 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07003284 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07003285 context_desc->mss_l4len_idx = 0;
3286
3287 tx_buffer_info->time_stamp = jiffies;
3288 tx_buffer_info->next_to_watch = i;
3289 adapter->hw_csum_tx_good++;
3290 i++;
3291 if (i == tx_ring->count)
3292 i = 0;
3293 tx_ring->next_to_use = i;
3294
3295 return true;
3296 }
3297 return false;
3298}
3299
3300static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3301 struct ixgbe_ring *tx_ring,
3302 struct sk_buff *skb, unsigned int first)
3303{
3304 struct ixgbe_tx_buffer *tx_buffer_info;
3305 unsigned int len = skb->len;
3306 unsigned int offset = 0, size, count = 0, i;
3307 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3308 unsigned int f;
3309
3310 len -= skb->data_len;
3311
3312 i = tx_ring->next_to_use;
3313
3314 while (len) {
3315 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3316 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3317
3318 tx_buffer_info->length = size;
3319 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3320 skb->data + offset,
3321 size, PCI_DMA_TODEVICE);
3322 tx_buffer_info->time_stamp = jiffies;
3323 tx_buffer_info->next_to_watch = i;
3324
3325 len -= size;
3326 offset += size;
3327 count++;
3328 i++;
3329 if (i == tx_ring->count)
3330 i = 0;
3331 }
3332
3333 for (f = 0; f < nr_frags; f++) {
3334 struct skb_frag_struct *frag;
3335
3336 frag = &skb_shinfo(skb)->frags[f];
3337 len = frag->size;
3338 offset = frag->page_offset;
3339
3340 while (len) {
3341 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3342 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3343
3344 tx_buffer_info->length = size;
3345 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3346 frag->page,
3347 offset,
3348 size, PCI_DMA_TODEVICE);
3349 tx_buffer_info->time_stamp = jiffies;
3350 tx_buffer_info->next_to_watch = i;
3351
3352 len -= size;
3353 offset += size;
3354 count++;
3355 i++;
3356 if (i == tx_ring->count)
3357 i = 0;
3358 }
3359 }
3360 if (i == 0)
3361 i = tx_ring->count - 1;
3362 else
3363 i = i - 1;
3364 tx_ring->tx_buffer_info[i].skb = skb;
3365 tx_ring->tx_buffer_info[first].next_to_watch = i;
3366
3367 return count;
3368}
3369
3370static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3371 struct ixgbe_ring *tx_ring,
3372 int tx_flags, int count, u32 paylen, u8 hdr_len)
3373{
3374 union ixgbe_adv_tx_desc *tx_desc = NULL;
3375 struct ixgbe_tx_buffer *tx_buffer_info;
3376 u32 olinfo_status = 0, cmd_type_len = 0;
3377 unsigned int i;
3378 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3379
3380 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3381
3382 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3383
3384 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3385 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3386
3387 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3388 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3389
3390 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3391 IXGBE_ADVTXD_POPTS_SHIFT;
3392
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07003393 /* use index 1 context for tso */
3394 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07003395 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3396 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3397 IXGBE_ADVTXD_POPTS_SHIFT;
3398
3399 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3400 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3401 IXGBE_ADVTXD_POPTS_SHIFT;
3402
3403 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3404
3405 i = tx_ring->next_to_use;
3406 while (count--) {
3407 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3408 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3409 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3410 tx_desc->read.cmd_type_len =
3411 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3412 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3413
3414 i++;
3415 if (i == tx_ring->count)
3416 i = 0;
3417 }
3418
3419 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3420
3421 /*
3422 * Force memory writes to complete before letting h/w
3423 * know there are new descriptors to fetch. (Only
3424 * applicable for weak-ordered memory model archs,
3425 * such as IA-64).
3426 */
3427 wmb();
3428
3429 tx_ring->next_to_use = i;
3430 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3431}
3432
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003433static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3434 struct ixgbe_ring *tx_ring, int size)
3435{
3436 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3437
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003438 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003439 /* Herbert's original patch had:
3440 * smp_mb__after_netif_stop_queue();
3441 * but since that doesn't exist yet, just open code it. */
3442 smp_mb();
3443
3444 /* We need to check again in a case another CPU has just
3445 * made room available. */
3446 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3447 return -EBUSY;
3448
3449 /* A reprieve! - use start_queue because it doesn't call schedule */
Jesse Brandeburgaf721662008-09-11 19:54:23 -07003450 netif_start_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003451 ++adapter->restart_queue;
3452 return 0;
3453}
3454
3455static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3456 struct ixgbe_ring *tx_ring, int size)
3457{
3458 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3459 return 0;
3460 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3461}
3462
3463
Auke Kok9a799d72007-09-15 14:07:45 -07003464static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3465{
3466 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3467 struct ixgbe_ring *tx_ring;
3468 unsigned int len = skb->len;
3469 unsigned int first;
3470 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003471 u8 hdr_len = 0;
3472 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07003473 unsigned int mss = 0;
3474 int count = 0;
3475 unsigned int f;
3476 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3477 len -= skb->data_len;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003478 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003479 tx_ring = &adapter->tx_ring[r_idx];
Auke Kok9a799d72007-09-15 14:07:45 -07003480
Auke Kok9a799d72007-09-15 14:07:45 -07003481
3482 if (skb->len <= 0) {
3483 dev_kfree_skb(skb);
3484 return NETDEV_TX_OK;
3485 }
3486 mss = skb_shinfo(skb)->gso_size;
3487
3488 if (mss)
3489 count++;
3490 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3491 count++;
3492
3493 count += TXD_USE_COUNT(len);
3494 for (f = 0; f < nr_frags; f++)
3495 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3496
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003497 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003498 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07003499 return NETDEV_TX_BUSY;
3500 }
Auke Kok9a799d72007-09-15 14:07:45 -07003501 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3502 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3503 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3504 }
3505
Al Viro8327d002007-12-10 18:54:12 +00003506 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07003507 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3508 first = tx_ring->next_to_use;
3509 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3510 if (tso < 0) {
3511 dev_kfree_skb_any(skb);
3512 return NETDEV_TX_OK;
3513 }
3514
3515 if (tso)
3516 tx_flags |= IXGBE_TX_FLAGS_TSO;
3517 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3518 (skb->ip_summed == CHECKSUM_PARTIAL))
3519 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3520
3521 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3522 ixgbe_tx_map(adapter, tx_ring, skb, first),
3523 skb->len, hdr_len);
3524
3525 netdev->trans_start = jiffies;
3526
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003527 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07003528
3529 return NETDEV_TX_OK;
3530}
3531
3532/**
3533 * ixgbe_get_stats - Get System Network Statistics
3534 * @netdev: network interface device structure
3535 *
3536 * Returns the address of the device statistics structure.
3537 * The statistics are actually updated from the timer callback.
3538 **/
3539static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3540{
3541 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3542
3543 /* only return the current stats */
3544 return &adapter->net_stats;
3545}
3546
3547/**
3548 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3549 * @netdev: network interface device structure
3550 * @p: pointer to an address structure
3551 *
3552 * Returns 0 on success, negative on failure
3553 **/
3554static int ixgbe_set_mac(struct net_device *netdev, void *p)
3555{
3556 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3557 struct sockaddr *addr = p;
3558
3559 if (!is_valid_ether_addr(addr->sa_data))
3560 return -EADDRNOTAVAIL;
3561
3562 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3563 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3564
3565 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3566
3567 return 0;
3568}
3569
3570#ifdef CONFIG_NET_POLL_CONTROLLER
3571/*
3572 * Polling 'interrupt' - used by things like netconsole to send skbs
3573 * without having to re-enable interrupts. It's not called while
3574 * the interrupt routine is executing.
3575 */
3576static void ixgbe_netpoll(struct net_device *netdev)
3577{
3578 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3579
3580 disable_irq(adapter->pdev->irq);
3581 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3582 ixgbe_intr(adapter->pdev->irq, netdev);
3583 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3584 enable_irq(adapter->pdev->irq);
3585}
3586#endif
3587
3588/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003589 * ixgbe_napi_add_all - prep napi structs for use
3590 * @adapter: private struct
3591 * helper function to napi_add each possible q_vector->napi
3592 */
3593static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3594{
3595 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3596 int (*poll)(struct napi_struct *, int);
3597
3598 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3599 poll = &ixgbe_clean_rxonly;
3600 } else {
3601 poll = &ixgbe_poll;
3602 /* only one q_vector for legacy modes */
3603 q_vectors = 1;
3604 }
3605
3606 for (i = 0; i < q_vectors; i++) {
3607 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3608 netif_napi_add(adapter->netdev, &q_vector->napi,
3609 (*poll), 64);
3610 }
3611}
3612
3613/**
Auke Kok9a799d72007-09-15 14:07:45 -07003614 * ixgbe_probe - Device Initialization Routine
3615 * @pdev: PCI device information struct
3616 * @ent: entry in ixgbe_pci_tbl
3617 *
3618 * Returns 0 on success, negative on failure
3619 *
3620 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3621 * The OS initialization, configuring of the adapter private structure,
3622 * and a hardware reset occur.
3623 **/
3624static int __devinit ixgbe_probe(struct pci_dev *pdev,
3625 const struct pci_device_id *ent)
3626{
3627 struct net_device *netdev;
3628 struct ixgbe_adapter *adapter = NULL;
3629 struct ixgbe_hw *hw;
3630 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07003631 static int cards_found;
3632 int i, err, pci_using_dac;
3633 u16 link_status, link_speed, link_width;
3634 u32 part_num;
3635
3636 err = pci_enable_device(pdev);
3637 if (err)
3638 return err;
3639
3640 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
3641 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
3642 pci_using_dac = 1;
3643 } else {
3644 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3645 if (err) {
3646 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3647 if (err) {
3648 dev_err(&pdev->dev, "No usable DMA "
3649 "configuration, aborting\n");
3650 goto err_dma;
3651 }
3652 }
3653 pci_using_dac = 0;
3654 }
3655
3656 err = pci_request_regions(pdev, ixgbe_driver_name);
3657 if (err) {
3658 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3659 goto err_pci_reg;
3660 }
3661
3662 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07003663 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003664
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003665 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07003666 if (!netdev) {
3667 err = -ENOMEM;
3668 goto err_alloc_etherdev;
3669 }
3670
Auke Kok9a799d72007-09-15 14:07:45 -07003671 SET_NETDEV_DEV(netdev, &pdev->dev);
3672
3673 pci_set_drvdata(pdev, netdev);
3674 adapter = netdev_priv(netdev);
3675
3676 adapter->netdev = netdev;
3677 adapter->pdev = pdev;
3678 hw = &adapter->hw;
3679 hw->back = adapter;
3680 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3681
Jeff Kirsher05857982008-09-11 19:57:00 -07003682 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3683 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07003684 if (!hw->hw_addr) {
3685 err = -EIO;
3686 goto err_ioremap;
3687 }
3688
3689 for (i = 1; i <= 5; i++) {
3690 if (pci_resource_len(pdev, i) == 0)
3691 continue;
3692 }
3693
3694 netdev->open = &ixgbe_open;
3695 netdev->stop = &ixgbe_close;
3696 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3697 netdev->get_stats = &ixgbe_get_stats;
Christopher Leech2c5645c2008-08-26 04:27:02 -07003698 netdev->set_rx_mode = &ixgbe_set_rx_mode;
3699 netdev->set_multicast_list = &ixgbe_set_rx_mode;
Auke Kok9a799d72007-09-15 14:07:45 -07003700 netdev->set_mac_address = &ixgbe_set_mac;
3701 netdev->change_mtu = &ixgbe_change_mtu;
3702 ixgbe_set_ethtool_ops(netdev);
3703 netdev->tx_timeout = &ixgbe_tx_timeout;
3704 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07003705 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3706 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3707 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3708#ifdef CONFIG_NET_POLL_CONTROLLER
3709 netdev->poll_controller = ixgbe_netpoll;
3710#endif
3711 strcpy(netdev->name, pci_name(pdev));
3712
Auke Kok9a799d72007-09-15 14:07:45 -07003713 adapter->bd_number = cards_found;
3714
3715 /* PCI config space info */
3716 hw->vendor_id = pdev->vendor;
3717 hw->device_id = pdev->device;
3718 hw->revision_id = pdev->revision;
3719 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3720 hw->subsystem_device_id = pdev->subsystem_device;
3721
3722 /* Setup hw api */
3723 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003724 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07003725
3726 err = ii->get_invariants(hw);
3727 if (err)
3728 goto err_hw_init;
3729
3730 /* setup the private structure */
3731 err = ixgbe_sw_init(adapter);
3732 if (err)
3733 goto err_sw_init;
3734
3735 netdev->features = NETIF_F_SG |
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07003736 NETIF_F_IP_CSUM |
Auke Kok9a799d72007-09-15 14:07:45 -07003737 NETIF_F_HW_VLAN_TX |
3738 NETIF_F_HW_VLAN_RX |
3739 NETIF_F_HW_VLAN_FILTER;
3740
Jesse Brandeburge9990a92008-08-26 04:27:24 -07003741 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07003742 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07003743 netdev->features |= NETIF_F_TSO6;
Jesse Brandeburge9990a92008-08-26 04:27:24 -07003744 netdev->features |= NETIF_F_LRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07003745
3746 netdev->vlan_features |= NETIF_F_TSO;
3747 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07003748 netdev->vlan_features |= NETIF_F_IP_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07003749 netdev->vlan_features |= NETIF_F_SG;
3750
Auke Kok9a799d72007-09-15 14:07:45 -07003751 if (pci_using_dac)
3752 netdev->features |= NETIF_F_HIGHDMA;
3753
Auke Kok9a799d72007-09-15 14:07:45 -07003754 /* make sure the EEPROM is good */
3755 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
3756 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3757 err = -EIO;
3758 goto err_eeprom;
3759 }
3760
3761 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3762 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3763
3764 if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
3765 err = -EIO;
3766 goto err_eeprom;
3767 }
3768
3769 init_timer(&adapter->watchdog_timer);
3770 adapter->watchdog_timer.function = &ixgbe_watchdog;
3771 adapter->watchdog_timer.data = (unsigned long)adapter;
3772
3773 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003774 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07003775
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003776 err = ixgbe_init_interrupt_scheme(adapter);
3777 if (err)
3778 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07003779
3780 /* print bus type/speed/width info */
3781 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3782 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3783 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3784 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3785 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3786 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3787 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3788 "Unknown"),
3789 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3790 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3791 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3792 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3793 "Unknown"),
3794 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3795 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3796 ixgbe_read_part_num(hw, &part_num);
3797 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3798 hw->mac.type, hw->phy.type,
3799 (part_num >> 8), (part_num & 0xff));
3800
Auke Kok0c254d82008-02-11 09:25:56 -08003801 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3802 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3803 "this card is not sufficient for optimal "
3804 "performance.\n");
3805 dev_warn(&pdev->dev, "For optimal performance a x8 "
3806 "PCI-Express slot is required.\n");
3807 }
3808
Auke Kok9a799d72007-09-15 14:07:45 -07003809 /* reset the hardware with the new settings */
3810 ixgbe_start_hw(hw);
3811
3812 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003813 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003814
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003815 ixgbe_napi_add_all(adapter);
3816
Auke Kok9a799d72007-09-15 14:07:45 -07003817 strcpy(netdev->name, "eth%d");
3818 err = register_netdev(netdev);
3819 if (err)
3820 goto err_register;
3821
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07003822#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Denis V. Lunev652f0932008-03-27 14:39:17 +03003823 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003824 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3825 /* always use CB2 mode, difference is masked
3826 * in the CB driver */
3827 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3828 ixgbe_setup_dca(adapter);
3829 }
3830#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003831
3832 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
3833 cards_found++;
3834 return 0;
3835
3836err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003837 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003838err_hw_init:
3839err_sw_init:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003840 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003841err_eeprom:
3842 iounmap(hw->hw_addr);
3843err_ioremap:
3844 free_netdev(netdev);
3845err_alloc_etherdev:
3846 pci_release_regions(pdev);
3847err_pci_reg:
3848err_dma:
3849 pci_disable_device(pdev);
3850 return err;
3851}
3852
3853/**
3854 * ixgbe_remove - Device Removal Routine
3855 * @pdev: PCI device information struct
3856 *
3857 * ixgbe_remove is called by the PCI subsystem to alert the driver
3858 * that it should release a PCI device. The could be caused by a
3859 * Hot-Plug event, or because the driver is going to be removed from
3860 * memory.
3861 **/
3862static void __devexit ixgbe_remove(struct pci_dev *pdev)
3863{
3864 struct net_device *netdev = pci_get_drvdata(pdev);
3865 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3866
3867 set_bit(__IXGBE_DOWN, &adapter->state);
3868 del_timer_sync(&adapter->watchdog_timer);
3869
3870 flush_scheduled_work();
3871
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07003872#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003873 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3874 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3875 dca_remove_requester(&pdev->dev);
3876 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3877 }
3878
3879#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003880 unregister_netdev(netdev);
3881
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003882 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003883
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003884 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003885
3886 iounmap(adapter->hw.hw_addr);
3887 pci_release_regions(pdev);
3888
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003889 DPRINTK(PROBE, INFO, "complete\n");
3890 kfree(adapter->tx_ring);
3891 kfree(adapter->rx_ring);
3892
Auke Kok9a799d72007-09-15 14:07:45 -07003893 free_netdev(netdev);
3894
3895 pci_disable_device(pdev);
3896}
3897
3898/**
3899 * ixgbe_io_error_detected - called when PCI error is detected
3900 * @pdev: Pointer to PCI device
3901 * @state: The current pci connection state
3902 *
3903 * This function is called after a PCI bus error affecting
3904 * this device has been detected.
3905 */
3906static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3907 pci_channel_state_t state)
3908{
3909 struct net_device *netdev = pci_get_drvdata(pdev);
3910 struct ixgbe_adapter *adapter = netdev->priv;
3911
3912 netif_device_detach(netdev);
3913
3914 if (netif_running(netdev))
3915 ixgbe_down(adapter);
3916 pci_disable_device(pdev);
3917
3918 /* Request a slot slot reset. */
3919 return PCI_ERS_RESULT_NEED_RESET;
3920}
3921
3922/**
3923 * ixgbe_io_slot_reset - called after the pci bus has been reset.
3924 * @pdev: Pointer to PCI device
3925 *
3926 * Restart the card from scratch, as if from a cold-boot.
3927 */
3928static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3929{
3930 struct net_device *netdev = pci_get_drvdata(pdev);
3931 struct ixgbe_adapter *adapter = netdev->priv;
3932
3933 if (pci_enable_device(pdev)) {
3934 DPRINTK(PROBE, ERR,
3935 "Cannot re-enable PCI device after reset.\n");
3936 return PCI_ERS_RESULT_DISCONNECT;
3937 }
3938 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07003939 pci_restore_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003940
3941 pci_enable_wake(pdev, PCI_D3hot, 0);
3942 pci_enable_wake(pdev, PCI_D3cold, 0);
3943
3944 ixgbe_reset(adapter);
3945
3946 return PCI_ERS_RESULT_RECOVERED;
3947}
3948
3949/**
3950 * ixgbe_io_resume - called when traffic can start flowing again.
3951 * @pdev: Pointer to PCI device
3952 *
3953 * This callback is called when the error recovery driver tells us that
3954 * its OK to resume normal operation.
3955 */
3956static void ixgbe_io_resume(struct pci_dev *pdev)
3957{
3958 struct net_device *netdev = pci_get_drvdata(pdev);
3959 struct ixgbe_adapter *adapter = netdev->priv;
3960
3961 if (netif_running(netdev)) {
3962 if (ixgbe_up(adapter)) {
3963 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
3964 return;
3965 }
3966 }
3967
3968 netif_device_attach(netdev);
3969
3970}
3971
3972static struct pci_error_handlers ixgbe_err_handler = {
3973 .error_detected = ixgbe_io_error_detected,
3974 .slot_reset = ixgbe_io_slot_reset,
3975 .resume = ixgbe_io_resume,
3976};
3977
3978static struct pci_driver ixgbe_driver = {
3979 .name = ixgbe_driver_name,
3980 .id_table = ixgbe_pci_tbl,
3981 .probe = ixgbe_probe,
3982 .remove = __devexit_p(ixgbe_remove),
3983#ifdef CONFIG_PM
3984 .suspend = ixgbe_suspend,
3985 .resume = ixgbe_resume,
3986#endif
3987 .shutdown = ixgbe_shutdown,
3988 .err_handler = &ixgbe_err_handler
3989};
3990
3991/**
3992 * ixgbe_init_module - Driver Registration Routine
3993 *
3994 * ixgbe_init_module is the first routine called when the driver is
3995 * loaded. All it does is register with the PCI subsystem.
3996 **/
3997static int __init ixgbe_init_module(void)
3998{
3999 int ret;
4000 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
4001 ixgbe_driver_string, ixgbe_driver_version);
4002
4003 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
4004
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07004005#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004006 dca_register_notify(&dca_notifier);
4007
4008#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004009 ret = pci_register_driver(&ixgbe_driver);
4010 return ret;
4011}
4012module_init(ixgbe_init_module);
4013
4014/**
4015 * ixgbe_exit_module - Driver Exit Cleanup Routine
4016 *
4017 * ixgbe_exit_module is called just before the driver is removed
4018 * from memory.
4019 **/
4020static void __exit ixgbe_exit_module(void)
4021{
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07004022#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004023 dca_unregister_notify(&dca_notifier);
4024#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004025 pci_unregister_driver(&ixgbe_driver);
4026}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004027
Jesse Brandeburga1f96ee2008-09-11 19:54:48 -07004028#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004029static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
4030 void *p)
4031{
4032 int ret_val;
4033
4034 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
4035 __ixgbe_notify_dca);
4036
4037 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4038}
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07004039#endif /* CONFIG_DCA or CONFIG_DCA_MODULE */
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004040
Auke Kok9a799d72007-09-15 14:07:45 -07004041module_exit(ixgbe_exit_module);
4042
4043/* ixgbe_main.c */