blob: 87ef2db8c4301ab49c992d176a5a5069d8164ef7 [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
43
44#include "ixgbe.h"
45#include "ixgbe_common.h"
46
47char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070048static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070050
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070051#define DRV_VERSION "1.3.18-k4"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070052const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070055
56static const struct ixgbe_info *ixgbe_info_tbl[] = {
Auke Kok3957d632007-10-31 15:22:10 -070057 [board_82598] = &ixgbe_82598_info,
Auke Kok9a799d72007-09-15 14:07:45 -070058};
59
60/* ixgbe_pci_tbl - PCI Device ID Table
61 *
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68static struct pci_device_id ixgbe_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070070 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070071 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070072 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070073 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070074 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070075 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070077
78 /* required last entry */
79 {0, }
80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82
Jeb Cramerbd0362d2008-03-03 15:04:02 -080083#ifdef CONFIG_DCA
84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p);
86static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL,
89 .priority = 0
90};
91#endif
92
Auke Kok9a799d72007-09-15 14:07:45 -070093MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
94MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
98#define DEFAULT_DEBUG_LEVEL_SHIFT 3
99
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800100static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
101{
102 u32 ctrl_ext;
103
104 /* Let firmware take over control of h/w */
105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
107 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
108}
109
110static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
111{
112 u32 ctrl_ext;
113
114 /* Let firmware know the driver has taken over */
115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
117 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
118}
Auke Kok9a799d72007-09-15 14:07:45 -0700119
120#ifdef DEBUG
121/**
122 * ixgbe_get_hw_dev_name - return device name string
123 * used by hardware layer to print debugging information
124 **/
125char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
126{
127 struct ixgbe_adapter *adapter = hw->back;
128 struct net_device *netdev = adapter->netdev;
129 return netdev->name;
130}
131#endif
132
133static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
134 u8 msix_vector)
135{
136 u32 ivar, index;
137
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = (int_alloc_entry >> 2) & 0x1F;
140 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
141 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
142 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
143 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
144}
145
146static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
147 struct ixgbe_tx_buffer
148 *tx_buffer_info)
149{
150 if (tx_buffer_info->dma) {
151 pci_unmap_page(adapter->pdev,
152 tx_buffer_info->dma,
153 tx_buffer_info->length, PCI_DMA_TODEVICE);
154 tx_buffer_info->dma = 0;
155 }
156 if (tx_buffer_info->skb) {
157 dev_kfree_skb_any(tx_buffer_info->skb);
158 tx_buffer_info->skb = NULL;
159 }
160 /* tx_buffer_info must be completely set up in the transmit path */
161}
162
163static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
164 struct ixgbe_ring *tx_ring,
165 unsigned int eop,
166 union ixgbe_adv_tx_desc *eop_desc)
167{
168 /* Detect a transmit hang in hardware, this serializes the
169 * check with the clearing of time_stamp and movement of i */
170 adapter->detect_tx_hung = false;
171 if (tx_ring->tx_buffer_info[eop].dma &&
172 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
173 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
174 /* detected Tx unit hang */
175 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
176 " TDH <%x>\n"
177 " TDT <%x>\n"
178 " next_to_use <%x>\n"
179 " next_to_clean <%x>\n"
180 "tx_buffer_info[next_to_clean]\n"
181 " time_stamp <%lx>\n"
182 " next_to_watch <%x>\n"
183 " jiffies <%lx>\n"
184 " next_to_watch.status <%x>\n",
185 readl(adapter->hw.hw_addr + tx_ring->head),
186 readl(adapter->hw.hw_addr + tx_ring->tail),
187 tx_ring->next_to_use,
188 tx_ring->next_to_clean,
189 tx_ring->tx_buffer_info[eop].time_stamp,
190 eop, jiffies, eop_desc->wb.status);
191 return true;
192 }
193
194 return false;
195}
196
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800197#define IXGBE_MAX_TXD_PWR 14
198#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
199
200/* Tx Descriptors needed, worst case */
201#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
202 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
203#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
204 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
205
Auke Kok9a799d72007-09-15 14:07:45 -0700206/**
207 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
208 * @adapter: board private structure
209 **/
210static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
211 struct ixgbe_ring *tx_ring)
212{
213 struct net_device *netdev = adapter->netdev;
214 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
215 struct ixgbe_tx_buffer *tx_buffer_info;
216 unsigned int i, eop;
217 bool cleaned = false;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800218 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700219
220 i = tx_ring->next_to_clean;
221 eop = tx_ring->tx_buffer_info[i].next_to_watch;
222 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
223 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800224 cleaned = false;
225 while (!cleaned) {
Auke Kok9a799d72007-09-15 14:07:45 -0700226 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
227 tx_buffer_info = &tx_ring->tx_buffer_info[i];
228 cleaned = (i == eop);
229
230 tx_ring->stats.bytes += tx_buffer_info->length;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800231 if (cleaned) {
232 struct sk_buff *skb = tx_buffer_info->skb;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800233 unsigned int segs, bytecount;
234 segs = skb_shinfo(skb)->gso_segs ?: 1;
235 /* multiply data chunks by size of headers */
236 bytecount = ((segs - 1) * skb_headlen(skb)) +
237 skb->len;
238 total_tx_packets += segs;
239 total_tx_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800240 }
Auke Kok9a799d72007-09-15 14:07:45 -0700241 ixgbe_unmap_and_free_tx_resource(adapter,
242 tx_buffer_info);
243 tx_desc->wb.status = 0;
244
245 i++;
246 if (i == tx_ring->count)
247 i = 0;
248 }
249
250 tx_ring->stats.packets++;
251
252 eop = tx_ring->tx_buffer_info[i].next_to_watch;
253 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
254
255 /* weight of a sort for tx, avoid endless transmit cleanup */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800256 if (total_tx_packets >= tx_ring->work_limit)
Auke Kok9a799d72007-09-15 14:07:45 -0700257 break;
258 }
259
260 tx_ring->next_to_clean = i;
261
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800262#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
263 if (total_tx_packets && netif_carrier_ok(netdev) &&
264 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
265 /* Make sure that anybody stopping the queue after this
266 * sees the new next_to_clean.
267 */
268 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800269 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
270 !test_bit(__IXGBE_DOWN, &adapter->state)) {
271 netif_wake_subqueue(netdev, tx_ring->queue_index);
272 adapter->restart_queue++;
273 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800274 }
Auke Kok9a799d72007-09-15 14:07:45 -0700275
276 if (adapter->detect_tx_hung)
277 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800278 netif_stop_subqueue(netdev, tx_ring->queue_index);
Auke Kok9a799d72007-09-15 14:07:45 -0700279
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800280 if (total_tx_packets >= tx_ring->work_limit)
Auke Kok9a799d72007-09-15 14:07:45 -0700281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
282
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800283 tx_ring->total_bytes += total_tx_bytes;
284 tx_ring->total_packets += total_tx_packets;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800285 adapter->net_stats.tx_bytes += total_tx_bytes;
286 adapter->net_stats.tx_packets += total_tx_packets;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800287 cleaned = total_tx_packets ? true : false;
Auke Kok9a799d72007-09-15 14:07:45 -0700288 return cleaned;
289}
290
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800291#ifdef CONFIG_DCA
292static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
293 struct ixgbe_ring *rxr)
294{
295 u32 rxctrl;
296 int cpu = get_cpu();
297 int q = rxr - adapter->rx_ring;
298
299 if (rxr->cpu != cpu) {
300 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
301 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
302 rxctrl |= dca_get_tag(cpu);
303 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
304 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
305 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
306 rxr->cpu = cpu;
307 }
308 put_cpu();
309}
310
311static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
312 struct ixgbe_ring *txr)
313{
314 u32 txctrl;
315 int cpu = get_cpu();
316 int q = txr - adapter->tx_ring;
317
318 if (txr->cpu != cpu) {
319 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
320 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
321 txctrl |= dca_get_tag(cpu);
322 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
324 txr->cpu = cpu;
325 }
326 put_cpu();
327}
328
329static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
330{
331 int i;
332
333 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
334 return;
335
336 for (i = 0; i < adapter->num_tx_queues; i++) {
337 adapter->tx_ring[i].cpu = -1;
338 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
339 }
340 for (i = 0; i < adapter->num_rx_queues; i++) {
341 adapter->rx_ring[i].cpu = -1;
342 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
343 }
344}
345
346static int __ixgbe_notify_dca(struct device *dev, void *data)
347{
348 struct net_device *netdev = dev_get_drvdata(dev);
349 struct ixgbe_adapter *adapter = netdev_priv(netdev);
350 unsigned long event = *(unsigned long *)data;
351
352 switch (event) {
353 case DCA_PROVIDER_ADD:
354 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
355 /* Always use CB2 mode, difference is masked
356 * in the CB driver. */
357 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
Denis V. Lunev652f0932008-03-27 14:39:17 +0300358 if (dca_add_requester(dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800359 ixgbe_setup_dca(adapter);
360 break;
361 }
362 /* Fall Through since DCA is disabled. */
363 case DCA_PROVIDER_REMOVE:
364 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
365 dca_remove_requester(dev);
366 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
367 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
368 }
369 break;
370 }
371
Denis V. Lunev652f0932008-03-27 14:39:17 +0300372 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800373}
374
375#endif /* CONFIG_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700376/**
377 * ixgbe_receive_skb - Send a completed packet up the stack
378 * @adapter: board private structure
379 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700380 * @status: hardware indication of status of receive
381 * @rx_ring: rx descriptor ring (for a specific queue) to setup
382 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700383 **/
384static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700385 struct sk_buff *skb, u8 status,
386 struct ixgbe_ring *ring,
387 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700388{
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700389 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
390 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700391
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700392 if (adapter->netdev->features & NETIF_F_LRO &&
393 skb->ip_summed == CHECKSUM_UNNECESSARY) {
Auke Kok9a799d72007-09-15 14:07:45 -0700394 if (adapter->vlgrp && is_vlan)
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700395 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
396 adapter->vlgrp, tag,
397 rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700398 else
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700399 lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
400 ring->lro_used = true;
401 } else {
402 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
403 if (adapter->vlgrp && is_vlan)
404 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
405 else
406 netif_receive_skb(skb);
407 } else {
408 if (adapter->vlgrp && is_vlan)
409 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
410 else
411 netif_rx(skb);
412 }
Auke Kok9a799d72007-09-15 14:07:45 -0700413 }
414}
415
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800416/**
417 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
418 * @adapter: address of board private structure
419 * @status_err: hardware indication of status of receive
420 * @skb: skb currently being received and modified
421 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700422static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700423 u32 status_err, struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700424{
425 skb->ip_summed = CHECKSUM_NONE;
426
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700427 /* Rx csum disabled */
428 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700429 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800430
431 /* if IP and error */
432 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
433 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700434 adapter->hw_csum_rx_error++;
435 return;
436 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800437
438 if (!(status_err & IXGBE_RXD_STAT_L4CS))
439 return;
440
441 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
442 adapter->hw_csum_rx_error++;
443 return;
444 }
445
Auke Kok9a799d72007-09-15 14:07:45 -0700446 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800447 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700448 adapter->hw_csum_rx_good++;
449}
450
451/**
452 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
453 * @adapter: address of board private structure
454 **/
455static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
456 struct ixgbe_ring *rx_ring,
457 int cleaned_count)
458{
459 struct net_device *netdev = adapter->netdev;
460 struct pci_dev *pdev = adapter->pdev;
461 union ixgbe_adv_rx_desc *rx_desc;
462 struct ixgbe_rx_buffer *rx_buffer_info;
463 struct sk_buff *skb;
464 unsigned int i;
465 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
466
467 i = rx_ring->next_to_use;
468 rx_buffer_info = &rx_ring->rx_buffer_info[i];
469
470 while (cleaned_count--) {
471 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
472
473 if (!rx_buffer_info->page &&
474 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
475 rx_buffer_info->page = alloc_page(GFP_ATOMIC);
476 if (!rx_buffer_info->page) {
477 adapter->alloc_rx_page_failed++;
478 goto no_buffers;
479 }
480 rx_buffer_info->page_dma =
481 pci_map_page(pdev, rx_buffer_info->page,
482 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
483 }
484
485 if (!rx_buffer_info->skb) {
486 skb = netdev_alloc_skb(netdev, bufsz);
487
488 if (!skb) {
489 adapter->alloc_rx_buff_failed++;
490 goto no_buffers;
491 }
492
493 /*
494 * Make buffer alignment 2 beyond a 16 byte boundary
495 * this will result in a 16 byte aligned IP header after
496 * the 14 byte MAC header is removed
497 */
498 skb_reserve(skb, NET_IP_ALIGN);
499
500 rx_buffer_info->skb = skb;
501 rx_buffer_info->dma = pci_map_single(pdev, skb->data,
502 bufsz,
503 PCI_DMA_FROMDEVICE);
504 }
505 /* Refresh the desc even if buffer_addrs didn't change because
506 * each write-back erases this info. */
507 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
508 rx_desc->read.pkt_addr =
509 cpu_to_le64(rx_buffer_info->page_dma);
510 rx_desc->read.hdr_addr =
511 cpu_to_le64(rx_buffer_info->dma);
512 } else {
513 rx_desc->read.pkt_addr =
514 cpu_to_le64(rx_buffer_info->dma);
515 }
516
517 i++;
518 if (i == rx_ring->count)
519 i = 0;
520 rx_buffer_info = &rx_ring->rx_buffer_info[i];
521 }
522no_buffers:
523 if (rx_ring->next_to_use != i) {
524 rx_ring->next_to_use = i;
525 if (i-- == 0)
526 i = (rx_ring->count - 1);
527
528 /*
529 * Force memory writes to complete before letting h/w
530 * know there are new descriptors to fetch. (Only
531 * applicable for weak-ordered memory model archs,
532 * such as IA-64).
533 */
534 wmb();
535 writel(i, adapter->hw.hw_addr + rx_ring->tail);
536 }
537}
538
539static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
540 struct ixgbe_ring *rx_ring,
541 int *work_done, int work_to_do)
542{
543 struct net_device *netdev = adapter->netdev;
544 struct pci_dev *pdev = adapter->pdev;
545 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
546 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
547 struct sk_buff *skb;
548 unsigned int i;
549 u32 upper_len, len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700550 u16 hdr_info;
551 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700552 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800553 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700554
555 i = rx_ring->next_to_clean;
556 upper_len = 0;
557 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
558 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
559 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700560
561 while (staterr & IXGBE_RXD_STAT_DD) {
562 if (*work_done >= work_to_do)
563 break;
564 (*work_done)++;
565
566 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
567 hdr_info =
568 le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
569 len =
570 ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
571 IXGBE_RXDADV_HDRBUFLEN_SHIFT);
572 if (hdr_info & IXGBE_RXDADV_SPH)
573 adapter->rx_hdr_split++;
574 if (len > IXGBE_RX_HDR_SIZE)
575 len = IXGBE_RX_HDR_SIZE;
576 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
577 } else
578 len = le16_to_cpu(rx_desc->wb.upper.length);
579
580 cleaned = true;
581 skb = rx_buffer_info->skb;
582 prefetch(skb->data - NET_IP_ALIGN);
583 rx_buffer_info->skb = NULL;
584
585 if (len && !skb_shinfo(skb)->nr_frags) {
586 pci_unmap_single(pdev, rx_buffer_info->dma,
587 adapter->rx_buf_len + NET_IP_ALIGN,
588 PCI_DMA_FROMDEVICE);
589 skb_put(skb, len);
590 }
591
592 if (upper_len) {
593 pci_unmap_page(pdev, rx_buffer_info->page_dma,
594 PAGE_SIZE, PCI_DMA_FROMDEVICE);
595 rx_buffer_info->page_dma = 0;
596 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
597 rx_buffer_info->page, 0, upper_len);
598 rx_buffer_info->page = NULL;
599
600 skb->len += upper_len;
601 skb->data_len += upper_len;
602 skb->truesize += upper_len;
603 }
604
605 i++;
606 if (i == rx_ring->count)
607 i = 0;
608 next_buffer = &rx_ring->rx_buffer_info[i];
609
610 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
611 prefetch(next_rxd);
612
613 cleaned_count++;
614 if (staterr & IXGBE_RXD_STAT_EOP) {
615 rx_ring->stats.packets++;
616 rx_ring->stats.bytes += skb->len;
617 } else {
618 rx_buffer_info->skb = next_buffer->skb;
619 rx_buffer_info->dma = next_buffer->dma;
620 next_buffer->skb = skb;
621 adapter->non_eop_descs++;
622 goto next_desc;
623 }
624
625 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
626 dev_kfree_skb_irq(skb);
627 goto next_desc;
628 }
629
630 ixgbe_rx_checksum(adapter, staterr, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800631
632 /* probably a little skewed due to removing CRC */
633 total_rx_bytes += skb->len;
634 total_rx_packets++;
635
Auke Kok9a799d72007-09-15 14:07:45 -0700636 skb->protocol = eth_type_trans(skb, netdev);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700637 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700638 netdev->last_rx = jiffies;
639
640next_desc:
641 rx_desc->wb.upper.status_error = 0;
642
643 /* return some buffers to hardware, one at a time is too slow */
644 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
645 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
646 cleaned_count = 0;
647 }
648
649 /* use prefetched values */
650 rx_desc = next_rxd;
651 rx_buffer_info = next_buffer;
652
653 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700654 }
655
656 if (rx_ring->lro_used) {
657 lro_flush_all(&rx_ring->lro_mgr);
658 rx_ring->lro_used = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700659 }
660
661 rx_ring->next_to_clean = i;
662 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
663
664 if (cleaned_count)
665 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
666
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800667 adapter->net_stats.rx_bytes += total_rx_bytes;
668 adapter->net_stats.rx_packets += total_rx_packets;
669
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800670 rx_ring->total_packets += total_rx_packets;
671 rx_ring->total_bytes += total_rx_bytes;
672 adapter->net_stats.rx_bytes += total_rx_bytes;
673 adapter->net_stats.rx_packets += total_rx_packets;
674
Auke Kok9a799d72007-09-15 14:07:45 -0700675 return cleaned;
676}
677
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800678static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700679/**
680 * ixgbe_configure_msix - Configure MSI-X hardware
681 * @adapter: board private structure
682 *
683 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
684 * interrupts.
685 **/
686static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
687{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800688 struct ixgbe_q_vector *q_vector;
689 int i, j, q_vectors, v_idx, r_idx;
690 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700691
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800692 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
693
694 /* Populate the IVAR table and set the ITR values to the
695 * corresponding register.
696 */
697 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
698 q_vector = &adapter->q_vector[v_idx];
699 /* XXX for_each_bit(...) */
700 r_idx = find_first_bit(q_vector->rxr_idx,
701 adapter->num_rx_queues);
702
703 for (i = 0; i < q_vector->rxr_count; i++) {
704 j = adapter->rx_ring[r_idx].reg_idx;
705 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
706 r_idx = find_next_bit(q_vector->rxr_idx,
707 adapter->num_rx_queues,
708 r_idx + 1);
709 }
710 r_idx = find_first_bit(q_vector->txr_idx,
711 adapter->num_tx_queues);
712
713 for (i = 0; i < q_vector->txr_count; i++) {
714 j = adapter->tx_ring[r_idx].reg_idx;
715 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
716 r_idx = find_next_bit(q_vector->txr_idx,
717 adapter->num_tx_queues,
718 r_idx + 1);
719 }
720
721 /* if this is a tx only vector use half the irq (tx) rate */
722 if (q_vector->txr_count && !q_vector->rxr_count)
723 q_vector->eitr = adapter->tx_eitr;
724 else
725 /* rx only or mixed */
726 q_vector->eitr = adapter->rx_eitr;
727
728 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
729 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
Auke Kok9a799d72007-09-15 14:07:45 -0700730 }
731
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800732 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
733 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700734
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800735 /* set up to autoclear timer, lsc, and the vectors */
736 mask = IXGBE_EIMS_ENABLE_MASK;
737 mask &= ~IXGBE_EIMS_OTHER;
738 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -0700739}
740
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800741enum latency_range {
742 lowest_latency = 0,
743 low_latency = 1,
744 bulk_latency = 2,
745 latency_invalid = 255
746};
747
748/**
749 * ixgbe_update_itr - update the dynamic ITR value based on statistics
750 * @adapter: pointer to adapter
751 * @eitr: eitr setting (ints per sec) to give last timeslice
752 * @itr_setting: current throttle rate in ints/second
753 * @packets: the number of packets during this measurement interval
754 * @bytes: the number of bytes during this measurement interval
755 *
756 * Stores a new ITR value based on packets and byte
757 * counts during the last interrupt. The advantage of per interrupt
758 * computation is faster updates and more accurate ITR for the current
759 * traffic pattern. Constants in this function were computed
760 * based on theoretical maximum wire speed and thresholds were set based
761 * on testing data as well as attempting to minimize response time
762 * while increasing bulk throughput.
763 * this functionality is controlled by the InterruptThrottleRate module
764 * parameter (see ixgbe_param.c)
765 **/
766static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
767 u32 eitr, u8 itr_setting,
768 int packets, int bytes)
769{
770 unsigned int retval = itr_setting;
771 u32 timepassed_us;
772 u64 bytes_perint;
773
774 if (packets == 0)
775 goto update_itr_done;
776
777
778 /* simple throttlerate management
779 * 0-20MB/s lowest (100000 ints/s)
780 * 20-100MB/s low (20000 ints/s)
781 * 100-1249MB/s bulk (8000 ints/s)
782 */
783 /* what was last interrupt timeslice? */
784 timepassed_us = 1000000/eitr;
785 bytes_perint = bytes / timepassed_us; /* bytes/usec */
786
787 switch (itr_setting) {
788 case lowest_latency:
789 if (bytes_perint > adapter->eitr_low)
790 retval = low_latency;
791 break;
792 case low_latency:
793 if (bytes_perint > adapter->eitr_high)
794 retval = bulk_latency;
795 else if (bytes_perint <= adapter->eitr_low)
796 retval = lowest_latency;
797 break;
798 case bulk_latency:
799 if (bytes_perint <= adapter->eitr_high)
800 retval = low_latency;
801 break;
802 }
803
804update_itr_done:
805 return retval;
806}
807
808static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
809{
810 struct ixgbe_adapter *adapter = q_vector->adapter;
811 struct ixgbe_hw *hw = &adapter->hw;
812 u32 new_itr;
813 u8 current_itr, ret_itr;
814 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
815 sizeof(struct ixgbe_q_vector);
816 struct ixgbe_ring *rx_ring, *tx_ring;
817
818 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
819 for (i = 0; i < q_vector->txr_count; i++) {
820 tx_ring = &(adapter->tx_ring[r_idx]);
821 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
822 q_vector->tx_eitr,
823 tx_ring->total_packets,
824 tx_ring->total_bytes);
825 /* if the result for this queue would decrease interrupt
826 * rate for this vector then use that result */
827 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
828 q_vector->tx_eitr - 1 : ret_itr);
829 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
830 r_idx + 1);
831 }
832
833 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
834 for (i = 0; i < q_vector->rxr_count; i++) {
835 rx_ring = &(adapter->rx_ring[r_idx]);
836 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
837 q_vector->rx_eitr,
838 rx_ring->total_packets,
839 rx_ring->total_bytes);
840 /* if the result for this queue would decrease interrupt
841 * rate for this vector then use that result */
842 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
843 q_vector->rx_eitr - 1 : ret_itr);
844 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
845 r_idx + 1);
846 }
847
848 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
849
850 switch (current_itr) {
851 /* counts and packets in update_itr are dependent on these numbers */
852 case lowest_latency:
853 new_itr = 100000;
854 break;
855 case low_latency:
856 new_itr = 20000; /* aka hwitr = ~200 */
857 break;
858 case bulk_latency:
859 default:
860 new_itr = 8000;
861 break;
862 }
863
864 if (new_itr != q_vector->eitr) {
865 u32 itr_reg;
866 /* do an exponential smoothing */
867 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
868 q_vector->eitr = new_itr;
869 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
870 /* must write high and low 16 bits to reset counter */
871 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
872 itr_reg);
873 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
874 }
875
876 return;
877}
878
Auke Kok9a799d72007-09-15 14:07:45 -0700879static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
880{
881 struct net_device *netdev = data;
882 struct ixgbe_adapter *adapter = netdev_priv(netdev);
883 struct ixgbe_hw *hw = &adapter->hw;
884 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
885
886 if (eicr & IXGBE_EICR_LSC) {
887 adapter->lsc_int++;
888 if (!test_bit(__IXGBE_DOWN, &adapter->state))
889 mod_timer(&adapter->watchdog_timer, jiffies);
890 }
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -0800891
892 if (!test_bit(__IXGBE_DOWN, &adapter->state))
893 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -0700894
895 return IRQ_HANDLED;
896}
897
898static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
899{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800900 struct ixgbe_q_vector *q_vector = data;
901 struct ixgbe_adapter *adapter = q_vector->adapter;
902 struct ixgbe_ring *txr;
903 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -0700904
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800905 if (!q_vector->txr_count)
906 return IRQ_HANDLED;
907
908 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
909 for (i = 0; i < q_vector->txr_count; i++) {
910 txr = &(adapter->tx_ring[r_idx]);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800911#ifdef CONFIG_DCA
912 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
913 ixgbe_update_tx_dca(adapter, txr);
914#endif
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800915 txr->total_bytes = 0;
916 txr->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800917 ixgbe_clean_tx_irq(adapter, txr);
918 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
919 r_idx + 1);
920 }
921
Auke Kok9a799d72007-09-15 14:07:45 -0700922 return IRQ_HANDLED;
923}
924
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800925/**
926 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
927 * @irq: unused
928 * @data: pointer to our q_vector struct for this interrupt vector
929 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700930static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
931{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800932 struct ixgbe_q_vector *q_vector = data;
933 struct ixgbe_adapter *adapter = q_vector->adapter;
934 struct ixgbe_ring *rxr;
935 int r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -0700936
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800937 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
938 if (!q_vector->rxr_count)
939 return IRQ_HANDLED;
940
941 rxr = &(adapter->rx_ring[r_idx]);
942 /* disable interrupts on this vector only */
943 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800944 rxr->total_bytes = 0;
945 rxr->total_packets = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800946 netif_rx_schedule(adapter->netdev, &q_vector->napi);
947
Auke Kok9a799d72007-09-15 14:07:45 -0700948 return IRQ_HANDLED;
949}
950
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800951static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
952{
953 ixgbe_msix_clean_rx(irq, data);
954 ixgbe_msix_clean_tx(irq, data);
955
956 return IRQ_HANDLED;
957}
958
959/**
960 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
961 * @napi: napi struct with our devices info in it
962 * @budget: amount of work driver is allowed to do this pass, in packets
963 *
964 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700965static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
966{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800967 struct ixgbe_q_vector *q_vector =
968 container_of(napi, struct ixgbe_q_vector, napi);
969 struct ixgbe_adapter *adapter = q_vector->adapter;
970 struct ixgbe_ring *rxr;
Auke Kok9a799d72007-09-15 14:07:45 -0700971 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800972 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -0700973
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800974 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
975 rxr = &(adapter->rx_ring[r_idx]);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800976#ifdef CONFIG_DCA
977 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
978 ixgbe_update_rx_dca(adapter, rxr);
979#endif
Auke Kok9a799d72007-09-15 14:07:45 -0700980
981 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
982
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800983 /* If all Rx work done, exit the polling mode */
984 if (work_done < budget) {
985 netif_rx_complete(adapter->netdev, napi);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800986 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
987 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -0700988 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800989 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
Auke Kok9a799d72007-09-15 14:07:45 -0700990 }
991
992 return work_done;
993}
994
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800995static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
996 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -0700997{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800998 a->q_vector[v_idx].adapter = a;
999 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1000 a->q_vector[v_idx].rxr_count++;
1001 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1002}
Auke Kok9a799d72007-09-15 14:07:45 -07001003
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001004static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1005 int r_idx)
1006{
1007 a->q_vector[v_idx].adapter = a;
1008 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1009 a->q_vector[v_idx].txr_count++;
1010 a->tx_ring[r_idx].v_idx = 1 << v_idx;
1011}
Auke Kok9a799d72007-09-15 14:07:45 -07001012
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001013/**
1014 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1015 * @adapter: board private structure to initialize
1016 * @vectors: allotted vector count for descriptor rings
1017 *
1018 * This function maps descriptor rings to the queue-specific vectors
1019 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1020 * one vector per ring/queue, but on a constrained vector budget, we
1021 * group the rings as "efficiently" as possible. You would add new
1022 * mapping configurations in here.
1023 **/
1024static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1025 int vectors)
1026{
1027 int v_start = 0;
1028 int rxr_idx = 0, txr_idx = 0;
1029 int rxr_remaining = adapter->num_rx_queues;
1030 int txr_remaining = adapter->num_tx_queues;
1031 int i, j;
1032 int rqpv, tqpv;
1033 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001034
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001035 /* No mapping required if MSI-X is disabled. */
1036 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001037 goto out;
1038
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001039 /*
1040 * The ideal configuration...
1041 * We have enough vectors to map one per queue.
1042 */
1043 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1044 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1045 map_vector_to_rxq(adapter, v_start, rxr_idx);
1046
1047 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1048 map_vector_to_txq(adapter, v_start, txr_idx);
1049
1050 goto out;
1051 }
1052
1053 /*
1054 * If we don't have enough vectors for a 1-to-1
1055 * mapping, we'll have to group them so there are
1056 * multiple queues per vector.
1057 */
1058 /* Re-adjusting *qpv takes care of the remainder. */
1059 for (i = v_start; i < vectors; i++) {
1060 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1061 for (j = 0; j < rqpv; j++) {
1062 map_vector_to_rxq(adapter, i, rxr_idx);
1063 rxr_idx++;
1064 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001065 }
Auke Kok9a799d72007-09-15 14:07:45 -07001066 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001067 for (i = v_start; i < vectors; i++) {
1068 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1069 for (j = 0; j < tqpv; j++) {
1070 map_vector_to_txq(adapter, i, txr_idx);
1071 txr_idx++;
1072 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001073 }
Auke Kok9a799d72007-09-15 14:07:45 -07001074 }
1075
Auke Kok9a799d72007-09-15 14:07:45 -07001076out:
Auke Kok9a799d72007-09-15 14:07:45 -07001077 return err;
1078}
1079
1080/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001081 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1082 * @adapter: board private structure
1083 *
1084 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1085 * interrupts from the kernel.
1086 **/
1087static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1088{
1089 struct net_device *netdev = adapter->netdev;
1090 irqreturn_t (*handler)(int, void *);
1091 int i, vector, q_vectors, err;
1092
1093 /* Decrement for Other and TCP Timer vectors */
1094 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1095
1096 /* Map the Tx/Rx rings to the vectors we were allotted. */
1097 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1098 if (err)
1099 goto out;
1100
1101#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1102 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1103 &ixgbe_msix_clean_many)
1104 for (vector = 0; vector < q_vectors; vector++) {
1105 handler = SET_HANDLER(&adapter->q_vector[vector]);
1106 sprintf(adapter->name[vector], "%s:v%d-%s",
1107 netdev->name, vector,
1108 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1109 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1110 err = request_irq(adapter->msix_entries[vector].vector,
1111 handler, 0, adapter->name[vector],
1112 &(adapter->q_vector[vector]));
1113 if (err) {
1114 DPRINTK(PROBE, ERR,
1115 "request_irq failed for MSIX interrupt "
1116 "Error: %d\n", err);
1117 goto free_queue_irqs;
1118 }
1119 }
1120
1121 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1122 err = request_irq(adapter->msix_entries[vector].vector,
1123 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1124 if (err) {
1125 DPRINTK(PROBE, ERR,
1126 "request_irq for msix_lsc failed: %d\n", err);
1127 goto free_queue_irqs;
1128 }
1129
1130 return 0;
1131
1132free_queue_irqs:
1133 for (i = vector - 1; i >= 0; i--)
1134 free_irq(adapter->msix_entries[--vector].vector,
1135 &(adapter->q_vector[i]));
1136 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1137 pci_disable_msix(adapter->pdev);
1138 kfree(adapter->msix_entries);
1139 adapter->msix_entries = NULL;
1140out:
1141 return err;
1142}
1143
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001144static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1145{
1146 struct ixgbe_hw *hw = &adapter->hw;
1147 struct ixgbe_q_vector *q_vector = adapter->q_vector;
1148 u8 current_itr;
1149 u32 new_itr = q_vector->eitr;
1150 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1151 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1152
1153 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
1154 q_vector->tx_eitr,
1155 tx_ring->total_packets,
1156 tx_ring->total_bytes);
1157 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
1158 q_vector->rx_eitr,
1159 rx_ring->total_packets,
1160 rx_ring->total_bytes);
1161
1162 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
1163
1164 switch (current_itr) {
1165 /* counts and packets in update_itr are dependent on these numbers */
1166 case lowest_latency:
1167 new_itr = 100000;
1168 break;
1169 case low_latency:
1170 new_itr = 20000; /* aka hwitr = ~200 */
1171 break;
1172 case bulk_latency:
1173 new_itr = 8000;
1174 break;
1175 default:
1176 break;
1177 }
1178
1179 if (new_itr != q_vector->eitr) {
1180 u32 itr_reg;
1181 /* do an exponential smoothing */
1182 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1183 q_vector->eitr = new_itr;
1184 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1185 /* must write high and low 16 bits to reset counter */
1186 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1187 }
1188
1189 return;
1190}
1191
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001192static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1193
1194/**
1195 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001196 * @irq: interrupt number
1197 * @data: pointer to a network interface device structure
1198 * @pt_regs: CPU registers structure
1199 **/
1200static irqreturn_t ixgbe_intr(int irq, void *data)
1201{
1202 struct net_device *netdev = data;
1203 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1204 struct ixgbe_hw *hw = &adapter->hw;
1205 u32 eicr;
1206
Auke Kok9a799d72007-09-15 14:07:45 -07001207
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001208 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1209 * therefore no explict interrupt disable is necessary */
1210 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Auke Kok9a799d72007-09-15 14:07:45 -07001211 if (!eicr)
1212 return IRQ_NONE; /* Not our interrupt */
1213
1214 if (eicr & IXGBE_EICR_LSC) {
1215 adapter->lsc_int++;
1216 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1217 mod_timer(&adapter->watchdog_timer, jiffies);
1218 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001219
1220
1221 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001222 adapter->tx_ring[0].total_packets = 0;
1223 adapter->tx_ring[0].total_bytes = 0;
1224 adapter->rx_ring[0].total_packets = 0;
1225 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001226 /* would disable interrupts here but EIAM disabled it */
1227 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
Auke Kok9a799d72007-09-15 14:07:45 -07001228 }
1229
1230 return IRQ_HANDLED;
1231}
1232
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001233static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1234{
1235 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1236
1237 for (i = 0; i < q_vectors; i++) {
1238 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1239 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1240 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1241 q_vector->rxr_count = 0;
1242 q_vector->txr_count = 0;
1243 }
1244}
1245
Auke Kok9a799d72007-09-15 14:07:45 -07001246/**
1247 * ixgbe_request_irq - initialize interrupts
1248 * @adapter: board private structure
1249 *
1250 * Attempts to configure interrupts using the best available
1251 * capabilities of the hardware and kernel.
1252 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001253static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001254{
1255 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001256 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001257
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001258 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1259 err = ixgbe_request_msix_irqs(adapter);
1260 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1261 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1262 netdev->name, netdev);
1263 } else {
1264 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1265 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001266 }
1267
Auke Kok9a799d72007-09-15 14:07:45 -07001268 if (err)
1269 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1270
Auke Kok9a799d72007-09-15 14:07:45 -07001271 return err;
1272}
1273
1274static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1275{
1276 struct net_device *netdev = adapter->netdev;
1277
1278 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001279 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001280
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001281 q_vectors = adapter->num_msix_vectors;
1282
1283 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001284 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001285
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001286 i--;
1287 for (; i >= 0; i--) {
1288 free_irq(adapter->msix_entries[i].vector,
1289 &(adapter->q_vector[i]));
1290 }
1291
1292 ixgbe_reset_q_vectors(adapter);
1293 } else {
1294 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001295 }
1296}
1297
1298/**
1299 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1300 * @adapter: board private structure
1301 **/
1302static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1303{
Auke Kok9a799d72007-09-15 14:07:45 -07001304 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1305 IXGBE_WRITE_FLUSH(&adapter->hw);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001306 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1307 int i;
1308 for (i = 0; i < adapter->num_msix_vectors; i++)
1309 synchronize_irq(adapter->msix_entries[i].vector);
1310 } else {
1311 synchronize_irq(adapter->pdev->irq);
1312 }
Auke Kok9a799d72007-09-15 14:07:45 -07001313}
1314
1315/**
1316 * ixgbe_irq_enable - Enable default interrupt generation settings
1317 * @adapter: board private structure
1318 **/
1319static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1320{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001321 u32 mask;
1322 mask = IXGBE_EIMS_ENABLE_MASK;
1323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001324 IXGBE_WRITE_FLUSH(&adapter->hw);
Auke Kok9a799d72007-09-15 14:07:45 -07001325}
1326
1327/**
1328 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1329 *
1330 **/
1331static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1332{
Auke Kok9a799d72007-09-15 14:07:45 -07001333 struct ixgbe_hw *hw = &adapter->hw;
1334
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001335 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1336 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
Auke Kok9a799d72007-09-15 14:07:45 -07001337
1338 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001339 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1340
1341 map_vector_to_rxq(adapter, 0, 0);
1342 map_vector_to_txq(adapter, 0, 0);
1343
1344 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001345}
1346
1347/**
1348 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
1349 * @adapter: board private structure
1350 *
1351 * Configure the Tx unit of the MAC after a reset.
1352 **/
1353static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1354{
1355 u64 tdba;
1356 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001357 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001358
1359 /* Setup the HW Tx Head and Tail descriptor pointers */
1360 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001361 j = adapter->tx_ring[i].reg_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001362 tdba = adapter->tx_ring[i].dma;
1363 tdlen = adapter->tx_ring[i].count *
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001364 sizeof(union ixgbe_adv_tx_desc);
1365 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1366 (tdba & DMA_32BIT_MASK));
1367 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1368 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1369 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1370 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1371 adapter->tx_ring[i].head = IXGBE_TDH(j);
1372 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1373 /* Disable Tx Head Writeback RO bit, since this hoses
1374 * bookkeeping if things aren't delivered in order.
1375 */
1376 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1377 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1378 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001379 }
Auke Kok9a799d72007-09-15 14:07:45 -07001380}
1381
1382#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1383 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1384
1385#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1386/**
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001387 * ixgbe_get_skb_hdr - helper function for LRO header processing
1388 * @skb: pointer to sk_buff to be added to LRO packet
1389 * @iphdr: pointer to tcp header structure
1390 * @tcph: pointer to tcp header structure
1391 * @hdr_flags: pointer to header flags
1392 * @priv: private data
1393 **/
1394static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1395 u64 *hdr_flags, void *priv)
1396{
1397 union ixgbe_adv_rx_desc *rx_desc = priv;
1398
1399 /* Verify that this is a valid IPv4 TCP packet */
1400 if (!(rx_desc->wb.lower.lo_dword.pkt_info &
1401 (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)))
1402 return -1;
1403
1404 /* Set network headers */
1405 skb_reset_network_header(skb);
1406 skb_set_transport_header(skb, ip_hdrlen(skb));
1407 *iphdr = ip_hdr(skb);
1408 *tcph = tcp_hdr(skb);
1409 *hdr_flags = LRO_IPV4 | LRO_TCP;
1410 return 0;
1411}
1412
1413/**
Auke Kok9a799d72007-09-15 14:07:45 -07001414 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
1415 * @adapter: board private structure
1416 *
1417 * Configure the Rx unit of the MAC after a reset.
1418 **/
1419static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1420{
1421 u64 rdba;
1422 struct ixgbe_hw *hw = &adapter->hw;
1423 struct net_device *netdev = adapter->netdev;
1424 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001425 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07001426 u32 rdlen, rxctrl, rxcsum;
1427 u32 random[10];
Auke Kok9a799d72007-09-15 14:07:45 -07001428 u32 fctrl, hlreg0;
Auke Kok9a799d72007-09-15 14:07:45 -07001429 u32 pages;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001430 u32 reta = 0, mrqc, srrctl;
Auke Kok9a799d72007-09-15 14:07:45 -07001431
1432 /* Decide whether to use packet split mode or not */
1433 if (netdev->mtu > ETH_DATA_LEN)
1434 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1435 else
1436 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1437
1438 /* Set the RX buffer length according to the mode */
1439 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1440 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
1441 } else {
1442 if (netdev->mtu <= ETH_DATA_LEN)
1443 adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1444 else
1445 adapter->rx_buf_len = ALIGN(max_frame, 1024);
1446 }
1447
1448 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1449 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001450 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
Auke Kok9a799d72007-09-15 14:07:45 -07001451 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1452
1453 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1454 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1455 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1456 else
1457 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1458 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1459
1460 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1461
1462 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
1463 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1464 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1465
1466 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1467 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1468 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1469 srrctl |= ((IXGBE_RX_HDR_SIZE <<
1470 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1471 IXGBE_SRRCTL_BSIZEHDR_MASK);
1472 } else {
1473 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1474
1475 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1476 srrctl |=
1477 IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1478 else
1479 srrctl |=
1480 adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1481 }
1482 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
1483
1484 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1485 /* disable receives while setting up the descriptors */
1486 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1487 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1488
1489 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1490 * the Base and Length of the Rx Descriptor Ring */
1491 for (i = 0; i < adapter->num_rx_queues; i++) {
1492 rdba = adapter->rx_ring[i].dma;
1493 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
1494 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
1495 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
1496 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
1497 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
1498 adapter->rx_ring[i].head = IXGBE_RDH(i);
1499 adapter->rx_ring[i].tail = IXGBE_RDT(i);
1500 }
1501
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001502 /* Intitial LRO Settings */
1503 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1504 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1505 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1506 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1507 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1508 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1509 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1510 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1511 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1512
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001513 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07001514 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001515 for (i = 0, j = 0; i < 128; i++, j++) {
1516 if (j == adapter->ring_feature[RING_F_RSS].indices)
1517 j = 0;
1518 /* reta = 4-byte sliding window of
1519 * 0x00..(indices-1)(indices-1)00..etc. */
1520 reta = (reta << 8) | (j * 0x11);
1521 if ((i & 3) == 3)
1522 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07001523 }
1524
1525 /* Fill out hash function seeds */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001526 /* XXX use a random constant here to glue certain flows */
1527 get_random_bytes(&random[0], 40);
Auke Kok9a799d72007-09-15 14:07:45 -07001528 for (i = 0; i < 10; i++)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001529 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07001530
1531 mrqc = IXGBE_MRQC_RSSEN
1532 /* Perform hash on these packet types */
1533 | IXGBE_MRQC_RSS_FIELD_IPV4
1534 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1535 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1536 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1537 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1538 | IXGBE_MRQC_RSS_FIELD_IPV6
1539 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1540 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1541 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1542 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Auke Kok9a799d72007-09-15 14:07:45 -07001543 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001544
1545 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1546
1547 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1548 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1549 /* Disable indicating checksum in descriptor, enables
1550 * RSS hash */
1551 rxcsum |= IXGBE_RXCSUM_PCSD;
1552 }
1553 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1554 /* Enable IPv4 payload checksum for UDP fragments
1555 * if PCSD is not set */
1556 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1557 }
1558
1559 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
Auke Kok9a799d72007-09-15 14:07:45 -07001560}
1561
1562static void ixgbe_vlan_rx_register(struct net_device *netdev,
1563 struct vlan_group *grp)
1564{
1565 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1566 u32 ctrl;
1567
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001568 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1569 ixgbe_irq_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001570 adapter->vlgrp = grp;
1571
1572 if (grp) {
1573 /* enable VLAN tag insert/strip */
1574 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
Patrick McHardy746b9f02008-07-16 20:15:45 -07001575 ctrl |= IXGBE_VLNCTRL_VME;
Auke Kok9a799d72007-09-15 14:07:45 -07001576 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1577 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1578 }
1579
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001580 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1581 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001582}
1583
1584static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1585{
1586 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1587
1588 /* add VID to filter table */
1589 ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1590}
1591
1592static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1593{
1594 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1595
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001596 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1597 ixgbe_irq_disable(adapter);
1598
Auke Kok9a799d72007-09-15 14:07:45 -07001599 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001600
1601 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1602 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001603
1604 /* remove VID from filter table */
1605 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1606}
1607
1608static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1609{
1610 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1611
1612 if (adapter->vlgrp) {
1613 u16 vid;
1614 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1615 if (!vlan_group_get_device(adapter->vlgrp, vid))
1616 continue;
1617 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1618 }
1619 }
1620}
1621
Christopher Leech2c5645c2008-08-26 04:27:02 -07001622static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1623{
1624 struct dev_mc_list *mc_ptr;
1625 u8 *addr = *mc_addr_ptr;
1626 *vmdq = 0;
1627
1628 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1629 if (mc_ptr->next)
1630 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1631 else
1632 *mc_addr_ptr = NULL;
1633
1634 return addr;
1635}
1636
Auke Kok9a799d72007-09-15 14:07:45 -07001637/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07001638 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07001639 * @netdev: network interface device structure
1640 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07001641 * The set_rx_method entry point is called whenever the unicast/multicast
1642 * address list or the network interface flags are updated. This routine is
1643 * responsible for configuring the hardware for proper unicast, multicast and
1644 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07001645 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07001646static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07001647{
1648 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1649 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07001650 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07001651 u8 *addr_list = NULL;
1652 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001653
1654 /* Check for Promiscuous and All Multicast modes */
1655
1656 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07001657 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07001658
1659 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07001660 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001661 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07001662 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07001663 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07001664 if (netdev->flags & IFF_ALLMULTI) {
1665 fctrl |= IXGBE_FCTRL_MPE;
1666 fctrl &= ~IXGBE_FCTRL_UPE;
1667 } else {
1668 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1669 }
Alexander Duyck3d016252008-08-26 18:30:04 -07001670 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07001671 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001672 }
1673
1674 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07001675 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001676
Christopher Leech2c5645c2008-08-26 04:27:02 -07001677 /* reprogram secondary unicast list */
1678 addr_count = netdev->uc_count;
1679 if (addr_count)
1680 addr_list = netdev->uc_list->dmi_addr;
1681 ixgbe_update_uc_addr_list(hw, addr_list, addr_count,
1682 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07001683
Christopher Leech2c5645c2008-08-26 04:27:02 -07001684 /* reprogram multicast list */
1685 addr_count = netdev->mc_count;
1686 if (addr_count)
1687 addr_list = netdev->mc_list->dmi_addr;
1688 ixgbe_update_mc_addr_list(hw, addr_list, addr_count,
1689 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07001690}
1691
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001692static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1693{
1694 int q_idx;
1695 struct ixgbe_q_vector *q_vector;
1696 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1697
1698 /* legacy and MSI only use one vector */
1699 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1700 q_vectors = 1;
1701
1702 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1703 q_vector = &adapter->q_vector[q_idx];
1704 if (!q_vector->rxr_count)
1705 continue;
1706 napi_enable(&q_vector->napi);
1707 }
1708}
1709
1710static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1711{
1712 int q_idx;
1713 struct ixgbe_q_vector *q_vector;
1714 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1715
1716 /* legacy and MSI only use one vector */
1717 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1718 q_vectors = 1;
1719
1720 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1721 q_vector = &adapter->q_vector[q_idx];
1722 if (!q_vector->rxr_count)
1723 continue;
1724 napi_disable(&q_vector->napi);
1725 }
1726}
1727
Auke Kok9a799d72007-09-15 14:07:45 -07001728static void ixgbe_configure(struct ixgbe_adapter *adapter)
1729{
1730 struct net_device *netdev = adapter->netdev;
1731 int i;
1732
Christopher Leech2c5645c2008-08-26 04:27:02 -07001733 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001734
1735 ixgbe_restore_vlan(adapter);
1736
1737 ixgbe_configure_tx(adapter);
1738 ixgbe_configure_rx(adapter);
1739 for (i = 0; i < adapter->num_rx_queues; i++)
1740 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1741 (adapter->rx_ring[i].count - 1));
1742}
1743
1744static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1745{
1746 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07001747 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001748 int i, j = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001749 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001750 u32 txdctl, rxdctl, mhadd;
1751 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07001752
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08001753 ixgbe_get_hw_control(adapter);
1754
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001755 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1756 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07001757 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1758 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1759 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1760 } else {
1761 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001762 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001763 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001764 /* XXX: to interrupt immediately for EICS writes, enable this */
1765 /* gpie |= IXGBE_GPIE_EIMEN; */
1766 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1767 }
1768
1769 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1770 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1771 * specifically only auto mask tx and rx interrupts */
1772 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07001773 }
1774
1775 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07001776 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1777 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1778 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1779
1780 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1781 }
1782
1783 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001784 j = adapter->tx_ring[i].reg_idx;
1785 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Auke Kok9a799d72007-09-15 14:07:45 -07001786 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001787 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07001788 }
1789
1790 for (i = 0; i < adapter->num_rx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001791 j = adapter->rx_ring[i].reg_idx;
1792 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1793 /* enable PTHRESH=32 descriptors (half the internal cache)
1794 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1795 * this also removes a pesky rx_no_buffer_count increment */
1796 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07001797 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001798 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07001799 }
1800 /* enable all receives */
1801 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1802 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1803 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1804
1805 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1806 ixgbe_configure_msix(adapter);
1807 else
1808 ixgbe_configure_msi_and_legacy(adapter);
1809
1810 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001811 ixgbe_napi_enable_all(adapter);
1812
1813 /* clear any pending interrupts, may auto mask */
1814 IXGBE_READ_REG(hw, IXGBE_EICR);
1815
Auke Kok9a799d72007-09-15 14:07:45 -07001816 ixgbe_irq_enable(adapter);
1817
1818 /* bring the link up in the watchdog, this could race with our first
1819 * link up interrupt but shouldn't be a problem */
1820 mod_timer(&adapter->watchdog_timer, jiffies);
1821 return 0;
1822}
1823
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001824void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1825{
1826 WARN_ON(in_interrupt());
1827 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1828 msleep(1);
1829 ixgbe_down(adapter);
1830 ixgbe_up(adapter);
1831 clear_bit(__IXGBE_RESETTING, &adapter->state);
1832}
1833
Auke Kok9a799d72007-09-15 14:07:45 -07001834int ixgbe_up(struct ixgbe_adapter *adapter)
1835{
1836 /* hardware has been reset, we need to reload some things */
1837 ixgbe_configure(adapter);
1838
1839 return ixgbe_up_complete(adapter);
1840}
1841
1842void ixgbe_reset(struct ixgbe_adapter *adapter)
1843{
1844 if (ixgbe_init_hw(&adapter->hw))
1845 DPRINTK(PROBE, ERR, "Hardware Error\n");
1846
1847 /* reprogram the RAR[0] in case user changed it. */
1848 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1849
1850}
1851
1852#ifdef CONFIG_PM
1853static int ixgbe_resume(struct pci_dev *pdev)
1854{
1855 struct net_device *netdev = pci_get_drvdata(pdev);
1856 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001857 u32 err;
Auke Kok9a799d72007-09-15 14:07:45 -07001858
1859 pci_set_power_state(pdev, PCI_D0);
1860 pci_restore_state(pdev);
1861 err = pci_enable_device(pdev);
1862 if (err) {
1863 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1864 "suspend\n");
1865 return err;
1866 }
1867 pci_set_master(pdev);
1868
1869 pci_enable_wake(pdev, PCI_D3hot, 0);
1870 pci_enable_wake(pdev, PCI_D3cold, 0);
1871
1872 if (netif_running(netdev)) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001873 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001874 if (err)
1875 return err;
1876 }
1877
1878 ixgbe_reset(adapter);
1879
1880 if (netif_running(netdev))
1881 ixgbe_up(adapter);
1882
1883 netif_device_attach(netdev);
1884
1885 return 0;
1886}
1887#endif
1888
1889/**
1890 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1891 * @adapter: board private structure
1892 * @rx_ring: ring to free buffers from
1893 **/
1894static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1895 struct ixgbe_ring *rx_ring)
1896{
1897 struct pci_dev *pdev = adapter->pdev;
1898 unsigned long size;
1899 unsigned int i;
1900
1901 /* Free all the Rx ring sk_buffs */
1902
1903 for (i = 0; i < rx_ring->count; i++) {
1904 struct ixgbe_rx_buffer *rx_buffer_info;
1905
1906 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1907 if (rx_buffer_info->dma) {
1908 pci_unmap_single(pdev, rx_buffer_info->dma,
1909 adapter->rx_buf_len,
1910 PCI_DMA_FROMDEVICE);
1911 rx_buffer_info->dma = 0;
1912 }
1913 if (rx_buffer_info->skb) {
1914 dev_kfree_skb(rx_buffer_info->skb);
1915 rx_buffer_info->skb = NULL;
1916 }
1917 if (!rx_buffer_info->page)
1918 continue;
1919 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
1920 PCI_DMA_FROMDEVICE);
1921 rx_buffer_info->page_dma = 0;
1922
1923 put_page(rx_buffer_info->page);
1924 rx_buffer_info->page = NULL;
1925 }
1926
1927 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
1928 memset(rx_ring->rx_buffer_info, 0, size);
1929
1930 /* Zero out the descriptor ring */
1931 memset(rx_ring->desc, 0, rx_ring->size);
1932
1933 rx_ring->next_to_clean = 0;
1934 rx_ring->next_to_use = 0;
1935
1936 writel(0, adapter->hw.hw_addr + rx_ring->head);
1937 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1938}
1939
1940/**
1941 * ixgbe_clean_tx_ring - Free Tx Buffers
1942 * @adapter: board private structure
1943 * @tx_ring: ring to be cleaned
1944 **/
1945static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1946 struct ixgbe_ring *tx_ring)
1947{
1948 struct ixgbe_tx_buffer *tx_buffer_info;
1949 unsigned long size;
1950 unsigned int i;
1951
1952 /* Free all the Tx ring sk_buffs */
1953
1954 for (i = 0; i < tx_ring->count; i++) {
1955 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1956 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1957 }
1958
1959 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
1960 memset(tx_ring->tx_buffer_info, 0, size);
1961
1962 /* Zero out the descriptor ring */
1963 memset(tx_ring->desc, 0, tx_ring->size);
1964
1965 tx_ring->next_to_use = 0;
1966 tx_ring->next_to_clean = 0;
1967
1968 writel(0, adapter->hw.hw_addr + tx_ring->head);
1969 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1970}
1971
1972/**
Auke Kok9a799d72007-09-15 14:07:45 -07001973 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1974 * @adapter: board private structure
1975 **/
1976static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
1977{
1978 int i;
1979
1980 for (i = 0; i < adapter->num_rx_queues; i++)
1981 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1982}
1983
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001984/**
1985 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1986 * @adapter: board private structure
1987 **/
1988static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1989{
1990 int i;
1991
1992 for (i = 0; i < adapter->num_tx_queues; i++)
1993 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1994}
1995
Auke Kok9a799d72007-09-15 14:07:45 -07001996void ixgbe_down(struct ixgbe_adapter *adapter)
1997{
1998 struct net_device *netdev = adapter->netdev;
1999 u32 rxctrl;
2000
2001 /* signal that we are down to the interrupt handler */
2002 set_bit(__IXGBE_DOWN, &adapter->state);
2003
2004 /* disable receives */
2005 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
2006 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
2007 rxctrl & ~IXGBE_RXCTRL_RXEN);
2008
2009 netif_tx_disable(netdev);
2010
2011 /* disable transmits in the hardware */
2012
2013 /* flush both disables */
2014 IXGBE_WRITE_FLUSH(&adapter->hw);
2015 msleep(10);
2016
2017 ixgbe_irq_disable(adapter);
2018
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002019 ixgbe_napi_disable_all(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002020 del_timer_sync(&adapter->watchdog_timer);
2021
2022 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002023 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002024
Paul Larson6f4a0e42008-06-24 17:00:56 -07002025 if (!pci_channel_offline(adapter->pdev))
2026 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002027 ixgbe_clean_all_tx_rings(adapter);
2028 ixgbe_clean_all_rx_rings(adapter);
2029
2030}
2031
2032static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
2033{
2034 struct net_device *netdev = pci_get_drvdata(pdev);
2035 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2036#ifdef CONFIG_PM
2037 int retval = 0;
2038#endif
2039
2040 netif_device_detach(netdev);
2041
2042 if (netif_running(netdev)) {
2043 ixgbe_down(adapter);
2044 ixgbe_free_irq(adapter);
2045 }
2046
2047#ifdef CONFIG_PM
2048 retval = pci_save_state(pdev);
2049 if (retval)
2050 return retval;
2051#endif
2052
2053 pci_enable_wake(pdev, PCI_D3hot, 0);
2054 pci_enable_wake(pdev, PCI_D3cold, 0);
2055
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002056 ixgbe_release_hw_control(adapter);
2057
Auke Kok9a799d72007-09-15 14:07:45 -07002058 pci_disable_device(pdev);
2059
2060 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2061
2062 return 0;
2063}
2064
2065static void ixgbe_shutdown(struct pci_dev *pdev)
2066{
2067 ixgbe_suspend(pdev, PMSG_SUSPEND);
2068}
2069
2070/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002071 * ixgbe_poll - NAPI Rx polling callback
2072 * @napi: structure for representing this polling device
2073 * @budget: how many packets driver is allowed to clean
2074 *
2075 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07002076 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002077static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07002078{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002079 struct ixgbe_q_vector *q_vector = container_of(napi,
2080 struct ixgbe_q_vector, napi);
2081 struct ixgbe_adapter *adapter = q_vector->adapter;
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002082 int tx_cleaned = 0, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002083
Jeb Cramerbd0362d2008-03-03 15:04:02 -08002084#ifdef CONFIG_DCA
2085 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2086 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2087 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2088 }
2089#endif
2090
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002091 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002092 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07002093
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002094 if (tx_cleaned)
2095 work_done = budget;
2096
David S. Miller53e52c72008-01-07 21:06:12 -08002097 /* If budget not fully consumed, exit the polling mode */
2098 if (work_done < budget) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002099 netif_rx_complete(adapter->netdev, napi);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002100 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
2101 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002102 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2103 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002104 }
2105
2106 return work_done;
2107}
2108
2109/**
2110 * ixgbe_tx_timeout - Respond to a Tx Hang
2111 * @netdev: network interface device structure
2112 **/
2113static void ixgbe_tx_timeout(struct net_device *netdev)
2114{
2115 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2116
2117 /* Do the reset outside of interrupt context */
2118 schedule_work(&adapter->reset_task);
2119}
2120
2121static void ixgbe_reset_task(struct work_struct *work)
2122{
2123 struct ixgbe_adapter *adapter;
2124 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2125
2126 adapter->tx_timeout_count++;
2127
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002128 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002129}
2130
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002131static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2132 int vectors)
2133{
2134 int err, vector_threshold;
2135
2136 /* We'll want at least 3 (vector_threshold):
2137 * 1) TxQ[0] Cleanup
2138 * 2) RxQ[0] Cleanup
2139 * 3) Other (Link Status Change, etc.)
2140 * 4) TCP Timer (optional)
2141 */
2142 vector_threshold = MIN_MSIX_COUNT;
2143
2144 /* The more we get, the more we will assign to Tx/Rx Cleanup
2145 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2146 * Right now, we simply care about how many we'll get; we'll
2147 * set them up later while requesting irq's.
2148 */
2149 while (vectors >= vector_threshold) {
2150 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2151 vectors);
2152 if (!err) /* Success in acquiring all requested vectors. */
2153 break;
2154 else if (err < 0)
2155 vectors = 0; /* Nasty failure, quit now */
2156 else /* err == number of vectors we should try again with */
2157 vectors = err;
2158 }
2159
2160 if (vectors < vector_threshold) {
2161 /* Can't allocate enough MSI-X interrupts? Oh well.
2162 * This just means we'll go with either a single MSI
2163 * vector or fall back to legacy interrupts.
2164 */
2165 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2166 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2167 kfree(adapter->msix_entries);
2168 adapter->msix_entries = NULL;
2169 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2170 adapter->num_tx_queues = 1;
2171 adapter->num_rx_queues = 1;
2172 } else {
2173 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2174 adapter->num_msix_vectors = vectors;
2175 }
2176}
2177
2178static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2179{
2180 int nrq, ntq;
2181 int feature_mask = 0, rss_i, rss_m;
2182
2183 /* Number of supported queues */
2184 switch (adapter->hw.mac.type) {
2185 case ixgbe_mac_82598EB:
2186 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2187 rss_m = 0;
2188 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2189
2190 switch (adapter->flags & feature_mask) {
2191 case (IXGBE_FLAG_RSS_ENABLED):
2192 rss_m = 0xF;
2193 nrq = rss_i;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08002194 ntq = rss_i;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002195 break;
2196 case 0:
2197 default:
2198 rss_i = 0;
2199 rss_m = 0;
2200 nrq = 1;
2201 ntq = 1;
2202 break;
2203 }
2204
2205 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2206 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2207 break;
2208 default:
2209 nrq = 1;
2210 ntq = 1;
2211 break;
2212 }
2213
2214 adapter->num_rx_queues = nrq;
2215 adapter->num_tx_queues = ntq;
2216}
2217
2218/**
2219 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2220 * @adapter: board private structure to initialize
2221 *
2222 * Once we know the feature-set enabled for the device, we'll cache
2223 * the register offset the descriptor ring is assigned to.
2224 **/
2225static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2226{
2227 /* TODO: Remove all uses of the indices in the cases where multiple
2228 * features are OR'd together, if the feature set makes sense.
2229 */
2230 int feature_mask = 0, rss_i;
2231 int i, txr_idx, rxr_idx;
2232
2233 /* Number of supported queues */
2234 switch (adapter->hw.mac.type) {
2235 case ixgbe_mac_82598EB:
2236 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2237 txr_idx = 0;
2238 rxr_idx = 0;
2239 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2240 switch (adapter->flags & feature_mask) {
2241 case (IXGBE_FLAG_RSS_ENABLED):
2242 for (i = 0; i < adapter->num_rx_queues; i++)
2243 adapter->rx_ring[i].reg_idx = i;
2244 for (i = 0; i < adapter->num_tx_queues; i++)
2245 adapter->tx_ring[i].reg_idx = i;
2246 break;
2247 case 0:
2248 default:
2249 break;
2250 }
2251 break;
2252 default:
2253 break;
2254 }
2255}
2256
Auke Kok9a799d72007-09-15 14:07:45 -07002257/**
2258 * ixgbe_alloc_queues - Allocate memory for all rings
2259 * @adapter: board private structure to initialize
2260 *
2261 * We allocate one ring per queue at run-time since we don't know the
2262 * number of queues at compile-time. The polling_netdev array is
2263 * intended for Multiqueue, but should work fine with a single queue.
2264 **/
2265static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
2266{
2267 int i;
2268
2269 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
2270 sizeof(struct ixgbe_ring), GFP_KERNEL);
2271 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002272 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07002273
2274 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
2275 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002276 if (!adapter->rx_ring)
2277 goto err_rx_ring_allocation;
2278
2279 for (i = 0; i < adapter->num_tx_queues; i++) {
2280 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2281 adapter->tx_ring[i].queue_index = i;
2282 }
2283 for (i = 0; i < adapter->num_rx_queues; i++) {
2284 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2285 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07002286 }
2287
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002288 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002289
2290 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002291
2292err_rx_ring_allocation:
2293 kfree(adapter->tx_ring);
2294err_tx_ring_allocation:
2295 return -ENOMEM;
2296}
2297
2298/**
2299 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2300 * @adapter: board private structure to initialize
2301 *
2302 * Attempt to configure the interrupts using the best available
2303 * capabilities of the hardware and the kernel.
2304 **/
2305static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2306 *adapter)
2307{
2308 int err = 0;
2309 int vector, v_budget;
2310
2311 /*
2312 * It's easy to be greedy for MSI-X vectors, but it really
2313 * doesn't do us much good if we have a lot more vectors
2314 * than CPU's. So let's be conservative and only ask for
2315 * (roughly) twice the number of vectors as there are CPU's.
2316 */
2317 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2318 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2319
2320 /*
2321 * At the same time, hardware can only support a maximum of
2322 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2323 * we can easily reach upwards of 64 Rx descriptor queues and
2324 * 32 Tx queues. Thus, we cap it off in those rare cases where
2325 * the cpu count also exceeds our vector limit.
2326 */
2327 v_budget = min(v_budget, MAX_MSIX_COUNT);
2328
2329 /* A failure in MSI-X entry allocation isn't fatal, but it does
2330 * mean we disable MSI-X capabilities of the adapter. */
2331 adapter->msix_entries = kcalloc(v_budget,
2332 sizeof(struct msix_entry), GFP_KERNEL);
2333 if (!adapter->msix_entries) {
2334 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2335 ixgbe_set_num_queues(adapter);
2336 kfree(adapter->tx_ring);
2337 kfree(adapter->rx_ring);
2338 err = ixgbe_alloc_queues(adapter);
2339 if (err) {
2340 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2341 "for queues\n");
2342 goto out;
2343 }
2344
2345 goto try_msi;
2346 }
2347
2348 for (vector = 0; vector < v_budget; vector++)
2349 adapter->msix_entries[vector].entry = vector;
2350
2351 ixgbe_acquire_msix_vectors(adapter, v_budget);
2352
2353 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2354 goto out;
2355
2356try_msi:
2357 err = pci_enable_msi(adapter->pdev);
2358 if (!err) {
2359 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2360 } else {
2361 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2362 "falling back to legacy. Error: %d\n", err);
2363 /* reset err */
2364 err = 0;
2365 }
2366
2367out:
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08002368 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002369 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002370
2371 return err;
2372}
2373
2374static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2375{
2376 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2377 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2378 pci_disable_msix(adapter->pdev);
2379 kfree(adapter->msix_entries);
2380 adapter->msix_entries = NULL;
2381 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2382 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2383 pci_disable_msi(adapter->pdev);
2384 }
2385 return;
2386}
2387
2388/**
2389 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2390 * @adapter: board private structure to initialize
2391 *
2392 * We determine which interrupt scheme to use based on...
2393 * - Kernel support (MSI, MSI-X)
2394 * - which can be user-defined (via MODULE_PARAM)
2395 * - Hardware queue count (num_*_queues)
2396 * - defined by miscellaneous hardware support/features (RSS, etc.)
2397 **/
2398static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2399{
2400 int err;
2401
2402 /* Number of supported queues */
2403 ixgbe_set_num_queues(adapter);
2404
2405 err = ixgbe_alloc_queues(adapter);
2406 if (err) {
2407 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2408 goto err_alloc_queues;
2409 }
2410
2411 err = ixgbe_set_interrupt_capability(adapter);
2412 if (err) {
2413 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2414 goto err_set_interrupt;
2415 }
2416
2417 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2418 "Tx Queue count = %u\n",
2419 (adapter->num_rx_queues > 1) ? "Enabled" :
2420 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2421
2422 set_bit(__IXGBE_DOWN, &adapter->state);
2423
2424 return 0;
2425
2426err_set_interrupt:
2427 kfree(adapter->tx_ring);
2428 kfree(adapter->rx_ring);
2429err_alloc_queues:
2430 return err;
Auke Kok9a799d72007-09-15 14:07:45 -07002431}
2432
2433/**
2434 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2435 * @adapter: board private structure to initialize
2436 *
2437 * ixgbe_sw_init initializes the Adapter private data structure.
2438 * Fields are initialized based on PCI device information and
2439 * OS network device settings (MTU size).
2440 **/
2441static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2442{
2443 struct ixgbe_hw *hw = &adapter->hw;
2444 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002445 unsigned int rss;
2446
2447 /* Set capability flags */
2448 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2449 adapter->ring_feature[RING_F_RSS].indices = rss;
2450 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07002451
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002452 /* Enable Dynamic interrupt throttling by default */
2453 adapter->rx_eitr = 1;
2454 adapter->tx_eitr = 1;
2455
Auke Kok9a799d72007-09-15 14:07:45 -07002456 /* default flow control settings */
2457 hw->fc.original_type = ixgbe_fc_full;
2458 hw->fc.type = ixgbe_fc_full;
2459
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002460 /* select 10G link by default */
Auke Kok9a799d72007-09-15 14:07:45 -07002461 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
2462 if (hw->mac.ops.reset(hw)) {
2463 dev_err(&pdev->dev, "HW Init failed\n");
2464 return -EIO;
2465 }
Auke Kok3957d632007-10-31 15:22:10 -07002466 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
2467 false)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002468 dev_err(&pdev->dev, "Link Speed setup failed\n");
2469 return -EIO;
2470 }
2471
2472 /* initialize eeprom parameters */
2473 if (ixgbe_init_eeprom(hw)) {
2474 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2475 return -EIO;
2476 }
2477
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002478 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07002479 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2480
Auke Kok9a799d72007-09-15 14:07:45 -07002481 set_bit(__IXGBE_DOWN, &adapter->state);
2482
2483 return 0;
2484}
2485
2486/**
2487 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2488 * @adapter: board private structure
2489 * @txdr: tx descriptor ring (for a specific queue) to setup
2490 *
2491 * Return 0 on success, negative on failure
2492 **/
2493int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
2494 struct ixgbe_ring *txdr)
2495{
2496 struct pci_dev *pdev = adapter->pdev;
2497 int size;
2498
2499 size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
2500 txdr->tx_buffer_info = vmalloc(size);
2501 if (!txdr->tx_buffer_info) {
2502 DPRINTK(PROBE, ERR,
2503 "Unable to allocate memory for the transmit descriptor ring\n");
2504 return -ENOMEM;
2505 }
2506 memset(txdr->tx_buffer_info, 0, size);
2507
2508 /* round up to nearest 4K */
2509 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
2510 txdr->size = ALIGN(txdr->size, 4096);
2511
2512 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
2513 if (!txdr->desc) {
2514 vfree(txdr->tx_buffer_info);
2515 DPRINTK(PROBE, ERR,
2516 "Memory allocation failed for the tx desc ring\n");
2517 return -ENOMEM;
2518 }
2519
Auke Kok9a799d72007-09-15 14:07:45 -07002520 txdr->next_to_use = 0;
2521 txdr->next_to_clean = 0;
2522 txdr->work_limit = txdr->count;
Auke Kok9a799d72007-09-15 14:07:45 -07002523
2524 return 0;
2525}
2526
2527/**
2528 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2529 * @adapter: board private structure
2530 * @rxdr: rx descriptor ring (for a specific queue) to setup
2531 *
2532 * Returns 0 on success, negative on failure
2533 **/
2534int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
2535 struct ixgbe_ring *rxdr)
2536{
2537 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002538 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07002539
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002540 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
2541 rxdr->lro_mgr.lro_arr = vmalloc(size);
2542 if (!rxdr->lro_mgr.lro_arr)
2543 return -ENOMEM;
2544 memset(rxdr->lro_mgr.lro_arr, 0, size);
2545
Auke Kok9a799d72007-09-15 14:07:45 -07002546 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
2547 rxdr->rx_buffer_info = vmalloc(size);
2548 if (!rxdr->rx_buffer_info) {
2549 DPRINTK(PROBE, ERR,
2550 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002551 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07002552 }
2553 memset(rxdr->rx_buffer_info, 0, size);
2554
Auke Kok9a799d72007-09-15 14:07:45 -07002555 /* Round up to nearest 4K */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002556 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -07002557 rxdr->size = ALIGN(rxdr->size, 4096);
2558
2559 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
2560
2561 if (!rxdr->desc) {
2562 DPRINTK(PROBE, ERR,
2563 "Memory allocation failed for the rx desc ring\n");
2564 vfree(rxdr->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002565 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07002566 }
2567
2568 rxdr->next_to_clean = 0;
2569 rxdr->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002570
2571 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002572
2573alloc_failed:
2574 vfree(rxdr->lro_mgr.lro_arr);
2575 rxdr->lro_mgr.lro_arr = NULL;
2576 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07002577}
2578
2579/**
2580 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2581 * @adapter: board private structure
2582 * @tx_ring: Tx descriptor ring for a specific queue
2583 *
2584 * Free all transmit software resources
2585 **/
2586static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2587 struct ixgbe_ring *tx_ring)
2588{
2589 struct pci_dev *pdev = adapter->pdev;
2590
2591 ixgbe_clean_tx_ring(adapter, tx_ring);
2592
2593 vfree(tx_ring->tx_buffer_info);
2594 tx_ring->tx_buffer_info = NULL;
2595
2596 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2597
2598 tx_ring->desc = NULL;
2599}
2600
2601/**
2602 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
2603 * @adapter: board private structure
2604 *
2605 * Free all transmit software resources
2606 **/
2607static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
2608{
2609 int i;
2610
2611 for (i = 0; i < adapter->num_tx_queues; i++)
2612 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
2613}
2614
2615/**
2616 * ixgbe_free_rx_resources - Free Rx Resources
2617 * @adapter: board private structure
2618 * @rx_ring: ring to clean the resources from
2619 *
2620 * Free all receive software resources
2621 **/
2622static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
2623 struct ixgbe_ring *rx_ring)
2624{
2625 struct pci_dev *pdev = adapter->pdev;
2626
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002627 vfree(rx_ring->lro_mgr.lro_arr);
2628 rx_ring->lro_mgr.lro_arr = NULL;
2629
Auke Kok9a799d72007-09-15 14:07:45 -07002630 ixgbe_clean_rx_ring(adapter, rx_ring);
2631
2632 vfree(rx_ring->rx_buffer_info);
2633 rx_ring->rx_buffer_info = NULL;
2634
2635 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2636
2637 rx_ring->desc = NULL;
2638}
2639
2640/**
2641 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
2642 * @adapter: board private structure
2643 *
2644 * Free all receive software resources
2645 **/
2646static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
2647{
2648 int i;
2649
2650 for (i = 0; i < adapter->num_rx_queues; i++)
2651 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
2652}
2653
2654/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002655 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
Auke Kok9a799d72007-09-15 14:07:45 -07002656 * @adapter: board private structure
2657 *
2658 * If this function returns with an error, then it's possible one or
2659 * more of the rings is populated (while the rest are not). It is the
2660 * callers duty to clean those orphaned rings.
2661 *
2662 * Return 0 on success, negative on failure
2663 **/
2664static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2665{
2666 int i, err = 0;
2667
2668 for (i = 0; i < adapter->num_tx_queues; i++) {
2669 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2670 if (err) {
2671 DPRINTK(PROBE, ERR,
2672 "Allocation for Tx Queue %u failed\n", i);
2673 break;
2674 }
2675 }
2676
2677 return err;
2678}
2679
2680/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002681 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
Auke Kok9a799d72007-09-15 14:07:45 -07002682 * @adapter: board private structure
2683 *
2684 * If this function returns with an error, then it's possible one or
2685 * more of the rings is populated (while the rest are not). It is the
2686 * callers duty to clean those orphaned rings.
2687 *
2688 * Return 0 on success, negative on failure
2689 **/
2690
2691static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2692{
2693 int i, err = 0;
2694
2695 for (i = 0; i < adapter->num_rx_queues; i++) {
2696 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2697 if (err) {
2698 DPRINTK(PROBE, ERR,
2699 "Allocation for Rx Queue %u failed\n", i);
2700 break;
2701 }
2702 }
2703
2704 return err;
2705}
2706
2707/**
2708 * ixgbe_change_mtu - Change the Maximum Transfer Unit
2709 * @netdev: network interface device structure
2710 * @new_mtu: new value for maximum frame size
2711 *
2712 * Returns 0 on success, negative on failure
2713 **/
2714static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
2715{
2716 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2717 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2718
2719 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
2720 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
2721 return -EINVAL;
2722
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002723 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2724 netdev->mtu, new_mtu);
2725 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07002726 netdev->mtu = new_mtu;
2727
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002728 if (netif_running(netdev))
2729 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002730
2731 return 0;
2732}
2733
2734/**
2735 * ixgbe_open - Called when a network interface is made active
2736 * @netdev: network interface device structure
2737 *
2738 * Returns 0 on success, negative value on failure
2739 *
2740 * The open entry point is called when a network interface is made
2741 * active by the system (IFF_UP). At this point all resources needed
2742 * for transmit and receive operations are allocated, the interrupt
2743 * handler is registered with the OS, the watchdog timer is started,
2744 * and the stack is notified that the interface is ready.
2745 **/
2746static int ixgbe_open(struct net_device *netdev)
2747{
2748 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2749 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07002750
Auke Kok4bebfaa2008-02-11 09:26:01 -08002751 /* disallow open during test */
2752 if (test_bit(__IXGBE_TESTING, &adapter->state))
2753 return -EBUSY;
2754
Auke Kok9a799d72007-09-15 14:07:45 -07002755 /* allocate transmit descriptors */
2756 err = ixgbe_setup_all_tx_resources(adapter);
2757 if (err)
2758 goto err_setup_tx;
2759
Auke Kok9a799d72007-09-15 14:07:45 -07002760 /* allocate receive descriptors */
2761 err = ixgbe_setup_all_rx_resources(adapter);
2762 if (err)
2763 goto err_setup_rx;
2764
2765 ixgbe_configure(adapter);
2766
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002767 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002768 if (err)
2769 goto err_req_irq;
2770
Auke Kok9a799d72007-09-15 14:07:45 -07002771 err = ixgbe_up_complete(adapter);
2772 if (err)
2773 goto err_up;
2774
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07002775 netif_tx_start_all_queues(netdev);
2776
Auke Kok9a799d72007-09-15 14:07:45 -07002777 return 0;
2778
2779err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002780 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002781 ixgbe_free_irq(adapter);
2782err_req_irq:
2783 ixgbe_free_all_rx_resources(adapter);
2784err_setup_rx:
2785 ixgbe_free_all_tx_resources(adapter);
2786err_setup_tx:
2787 ixgbe_reset(adapter);
2788
2789 return err;
2790}
2791
2792/**
2793 * ixgbe_close - Disables a network interface
2794 * @netdev: network interface device structure
2795 *
2796 * Returns 0, this is not allowed to fail
2797 *
2798 * The close entry point is called when an interface is de-activated
2799 * by the OS. The hardware is still under the drivers control, but
2800 * needs to be disabled. A global MAC reset is issued to stop the
2801 * hardware, and all transmit and receive resources are freed.
2802 **/
2803static int ixgbe_close(struct net_device *netdev)
2804{
2805 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002806
2807 ixgbe_down(adapter);
2808 ixgbe_free_irq(adapter);
2809
2810 ixgbe_free_all_tx_resources(adapter);
2811 ixgbe_free_all_rx_resources(adapter);
2812
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08002813 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002814
2815 return 0;
2816}
2817
2818/**
2819 * ixgbe_update_stats - Update the board statistics counters.
2820 * @adapter: board private structure
2821 **/
2822void ixgbe_update_stats(struct ixgbe_adapter *adapter)
2823{
2824 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002825 u64 total_mpc = 0;
2826 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07002827
2828 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002829 for (i = 0; i < 8; i++) {
2830 /* for packet buffers not used, the register should read 0 */
2831 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
2832 missed_rx += mpc;
2833 adapter->stats.mpc[i] += mpc;
2834 total_mpc += adapter->stats.mpc[i];
2835 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
2836 }
2837 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
2838 /* work around hardware counting issue */
2839 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07002840
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002841 /* 82598 hardware only has a 32 bit counter in the high register */
Auke Kok9a799d72007-09-15 14:07:45 -07002842 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002843 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2844 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
Auke Kok9a799d72007-09-15 14:07:45 -07002845 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
2846 adapter->stats.bprc += bprc;
2847 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
2848 adapter->stats.mprc -= bprc;
2849 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
2850 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
2851 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
2852 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
2853 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
2854 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
2855 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07002856 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
2857 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
Auke Kok9a799d72007-09-15 14:07:45 -07002858 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002859 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2860 adapter->stats.lxontxc += lxon;
2861 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2862 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07002863 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2864 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002865 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2866 /*
2867 * 82598 errata - tx of flow control packets is included in tx counters
2868 */
2869 xon_off_tot = lxon + lxoff;
2870 adapter->stats.gptc -= xon_off_tot;
2871 adapter->stats.mptc -= xon_off_tot;
2872 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07002873 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2874 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2875 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07002876 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2877 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002878 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07002879 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2880 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2881 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2882 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2883 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07002884 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2885
2886 /* Fill out the OS statistics structure */
Auke Kok9a799d72007-09-15 14:07:45 -07002887 adapter->net_stats.multicast = adapter->stats.mprc;
2888
2889 /* Rx Errors */
2890 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2891 adapter->stats.rlec;
2892 adapter->net_stats.rx_dropped = 0;
2893 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2894 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08002895 adapter->net_stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07002896}
2897
2898/**
2899 * ixgbe_watchdog - Timer Call-back
2900 * @data: pointer to adapter cast into an unsigned long
2901 **/
2902static void ixgbe_watchdog(unsigned long data)
2903{
2904 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2905 struct net_device *netdev = adapter->netdev;
2906 bool link_up;
2907 u32 link_speed = 0;
2908
Auke Kok3957d632007-10-31 15:22:10 -07002909 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
Auke Kok9a799d72007-09-15 14:07:45 -07002910
2911 if (link_up) {
2912 if (!netif_carrier_ok(netdev)) {
2913 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2914 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
2915#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2916#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2917 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
2918 "Flow Control: %s\n",
2919 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2920 "10 Gbps" :
2921 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
Emil Tantilov5a059e92008-03-03 14:37:42 -08002922 "1 Gbps" : "unknown speed")),
Auke Kok9a799d72007-09-15 14:07:45 -07002923 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2924 (FLOW_RX ? "RX" :
2925 (FLOW_TX ? "TX" : "None"))));
2926
2927 netif_carrier_on(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002928 netif_tx_wake_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002929 } else {
2930 /* Force detection of hung controller */
2931 adapter->detect_tx_hung = true;
2932 }
2933 } else {
2934 if (netif_carrier_ok(netdev)) {
2935 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2936 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002937 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002938 }
2939 }
2940
2941 ixgbe_update_stats(adapter);
2942
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002943 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2944 /* Cause software interrupt to ensure rx rings are cleaned */
2945 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2946 u32 eics =
2947 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2948 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2949 } else {
2950 /* for legacy and MSI interrupts don't set any bits that
2951 * are enabled for EIAM, because this operation would
2952 * set *both* EIMS and EICS for any bit in EIAM */
2953 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2954 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2955 }
2956 /* Reset the timer */
Auke Kok9a799d72007-09-15 14:07:45 -07002957 mod_timer(&adapter->watchdog_timer,
2958 round_jiffies(jiffies + 2 * HZ));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002959 }
Auke Kok9a799d72007-09-15 14:07:45 -07002960}
2961
Auke Kok9a799d72007-09-15 14:07:45 -07002962static int ixgbe_tso(struct ixgbe_adapter *adapter,
2963 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2964 u32 tx_flags, u8 *hdr_len)
2965{
2966 struct ixgbe_adv_tx_context_desc *context_desc;
2967 unsigned int i;
2968 int err;
2969 struct ixgbe_tx_buffer *tx_buffer_info;
2970 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2971 u32 mss_l4len_idx = 0, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07002972
2973 if (skb_is_gso(skb)) {
2974 if (skb_header_cloned(skb)) {
2975 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2976 if (err)
2977 return err;
2978 }
2979 l4len = tcp_hdrlen(skb);
2980 *hdr_len += l4len;
2981
Al Viro8327d002007-12-10 18:54:12 +00002982 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002983 struct iphdr *iph = ip_hdr(skb);
2984 iph->tot_len = 0;
2985 iph->check = 0;
2986 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2987 iph->daddr, 0,
2988 IPPROTO_TCP,
2989 0);
2990 adapter->hw_tso_ctxt++;
2991 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2992 ipv6_hdr(skb)->payload_len = 0;
2993 tcp_hdr(skb)->check =
2994 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2995 &ipv6_hdr(skb)->daddr,
2996 0, IPPROTO_TCP, 0);
2997 adapter->hw_tso6_ctxt++;
2998 }
2999
3000 i = tx_ring->next_to_use;
3001
3002 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3003 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3004
3005 /* VLAN MACLEN IPLEN */
3006 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3007 vlan_macip_lens |=
3008 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3009 vlan_macip_lens |= ((skb_network_offset(skb)) <<
3010 IXGBE_ADVTXD_MACLEN_SHIFT);
3011 *hdr_len += skb_network_offset(skb);
3012 vlan_macip_lens |=
3013 (skb_transport_header(skb) - skb_network_header(skb));
3014 *hdr_len +=
3015 (skb_transport_header(skb) - skb_network_header(skb));
3016 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3017 context_desc->seqnum_seed = 0;
3018
3019 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3020 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3021 IXGBE_ADVTXD_DTYP_CTXT);
3022
Al Viro8327d002007-12-10 18:54:12 +00003023 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07003024 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3025 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3026 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3027
3028 /* MSS L4LEN IDX */
3029 mss_l4len_idx |=
3030 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3031 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
3032 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3033
3034 tx_buffer_info->time_stamp = jiffies;
3035 tx_buffer_info->next_to_watch = i;
3036
3037 i++;
3038 if (i == tx_ring->count)
3039 i = 0;
3040 tx_ring->next_to_use = i;
3041
3042 return true;
3043 }
3044 return false;
3045}
3046
3047static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
3048 struct ixgbe_ring *tx_ring,
3049 struct sk_buff *skb, u32 tx_flags)
3050{
3051 struct ixgbe_adv_tx_context_desc *context_desc;
3052 unsigned int i;
3053 struct ixgbe_tx_buffer *tx_buffer_info;
3054 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3055
3056 if (skb->ip_summed == CHECKSUM_PARTIAL ||
3057 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3058 i = tx_ring->next_to_use;
3059 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3060 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3061
3062 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3063 vlan_macip_lens |=
3064 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3065 vlan_macip_lens |= (skb_network_offset(skb) <<
3066 IXGBE_ADVTXD_MACLEN_SHIFT);
3067 if (skb->ip_summed == CHECKSUM_PARTIAL)
3068 vlan_macip_lens |= (skb_transport_header(skb) -
3069 skb_network_header(skb));
3070
3071 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3072 context_desc->seqnum_seed = 0;
3073
3074 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
3075 IXGBE_ADVTXD_DTYP_CTXT);
3076
3077 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08003078 switch (skb->protocol) {
3079 case __constant_htons(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07003080 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08003081 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3082 type_tucmd_mlhl |=
3083 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3084 break;
Auke Kok9a799d72007-09-15 14:07:45 -07003085
Auke Kok41825d72008-02-12 15:20:33 -08003086 case __constant_htons(ETH_P_IPV6):
3087 /* XXX what about other V6 headers?? */
3088 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3089 type_tucmd_mlhl |=
3090 IXGBE_ADVTXD_TUCMD_L4T_TCP;
3091 break;
3092
3093 default:
3094 if (unlikely(net_ratelimit())) {
3095 DPRINTK(PROBE, WARNING,
3096 "partial checksum but proto=%x!\n",
3097 skb->protocol);
3098 }
3099 break;
3100 }
Auke Kok9a799d72007-09-15 14:07:45 -07003101 }
3102
3103 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3104 context_desc->mss_l4len_idx = 0;
3105
3106 tx_buffer_info->time_stamp = jiffies;
3107 tx_buffer_info->next_to_watch = i;
3108 adapter->hw_csum_tx_good++;
3109 i++;
3110 if (i == tx_ring->count)
3111 i = 0;
3112 tx_ring->next_to_use = i;
3113
3114 return true;
3115 }
3116 return false;
3117}
3118
3119static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
3120 struct ixgbe_ring *tx_ring,
3121 struct sk_buff *skb, unsigned int first)
3122{
3123 struct ixgbe_tx_buffer *tx_buffer_info;
3124 unsigned int len = skb->len;
3125 unsigned int offset = 0, size, count = 0, i;
3126 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3127 unsigned int f;
3128
3129 len -= skb->data_len;
3130
3131 i = tx_ring->next_to_use;
3132
3133 while (len) {
3134 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3135 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3136
3137 tx_buffer_info->length = size;
3138 tx_buffer_info->dma = pci_map_single(adapter->pdev,
3139 skb->data + offset,
3140 size, PCI_DMA_TODEVICE);
3141 tx_buffer_info->time_stamp = jiffies;
3142 tx_buffer_info->next_to_watch = i;
3143
3144 len -= size;
3145 offset += size;
3146 count++;
3147 i++;
3148 if (i == tx_ring->count)
3149 i = 0;
3150 }
3151
3152 for (f = 0; f < nr_frags; f++) {
3153 struct skb_frag_struct *frag;
3154
3155 frag = &skb_shinfo(skb)->frags[f];
3156 len = frag->size;
3157 offset = frag->page_offset;
3158
3159 while (len) {
3160 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3161 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3162
3163 tx_buffer_info->length = size;
3164 tx_buffer_info->dma = pci_map_page(adapter->pdev,
3165 frag->page,
3166 offset,
3167 size, PCI_DMA_TODEVICE);
3168 tx_buffer_info->time_stamp = jiffies;
3169 tx_buffer_info->next_to_watch = i;
3170
3171 len -= size;
3172 offset += size;
3173 count++;
3174 i++;
3175 if (i == tx_ring->count)
3176 i = 0;
3177 }
3178 }
3179 if (i == 0)
3180 i = tx_ring->count - 1;
3181 else
3182 i = i - 1;
3183 tx_ring->tx_buffer_info[i].skb = skb;
3184 tx_ring->tx_buffer_info[first].next_to_watch = i;
3185
3186 return count;
3187}
3188
3189static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
3190 struct ixgbe_ring *tx_ring,
3191 int tx_flags, int count, u32 paylen, u8 hdr_len)
3192{
3193 union ixgbe_adv_tx_desc *tx_desc = NULL;
3194 struct ixgbe_tx_buffer *tx_buffer_info;
3195 u32 olinfo_status = 0, cmd_type_len = 0;
3196 unsigned int i;
3197 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3198
3199 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3200
3201 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3202
3203 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3204 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3205
3206 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3207 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3208
3209 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3210 IXGBE_ADVTXD_POPTS_SHIFT;
3211
3212 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3213 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3214 IXGBE_ADVTXD_POPTS_SHIFT;
3215
3216 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3217 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3218 IXGBE_ADVTXD_POPTS_SHIFT;
3219
3220 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3221
3222 i = tx_ring->next_to_use;
3223 while (count--) {
3224 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3225 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3226 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3227 tx_desc->read.cmd_type_len =
3228 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3229 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3230
3231 i++;
3232 if (i == tx_ring->count)
3233 i = 0;
3234 }
3235
3236 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3237
3238 /*
3239 * Force memory writes to complete before letting h/w
3240 * know there are new descriptors to fetch. (Only
3241 * applicable for weak-ordered memory model archs,
3242 * such as IA-64).
3243 */
3244 wmb();
3245
3246 tx_ring->next_to_use = i;
3247 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3248}
3249
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003250static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
3251 struct ixgbe_ring *tx_ring, int size)
3252{
3253 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3254
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003255 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003256 /* Herbert's original patch had:
3257 * smp_mb__after_netif_stop_queue();
3258 * but since that doesn't exist yet, just open code it. */
3259 smp_mb();
3260
3261 /* We need to check again in a case another CPU has just
3262 * made room available. */
3263 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3264 return -EBUSY;
3265
3266 /* A reprieve! - use start_queue because it doesn't call schedule */
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003267 netif_wake_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003268 ++adapter->restart_queue;
3269 return 0;
3270}
3271
3272static int ixgbe_maybe_stop_tx(struct net_device *netdev,
3273 struct ixgbe_ring *tx_ring, int size)
3274{
3275 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3276 return 0;
3277 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3278}
3279
3280
Auke Kok9a799d72007-09-15 14:07:45 -07003281static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3282{
3283 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3284 struct ixgbe_ring *tx_ring;
3285 unsigned int len = skb->len;
3286 unsigned int first;
3287 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003288 u8 hdr_len = 0;
3289 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07003290 unsigned int mss = 0;
3291 int count = 0;
3292 unsigned int f;
3293 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3294 len -= skb->data_len;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003295 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003296 tx_ring = &adapter->tx_ring[r_idx];
Auke Kok9a799d72007-09-15 14:07:45 -07003297
Auke Kok9a799d72007-09-15 14:07:45 -07003298
3299 if (skb->len <= 0) {
3300 dev_kfree_skb(skb);
3301 return NETDEV_TX_OK;
3302 }
3303 mss = skb_shinfo(skb)->gso_size;
3304
3305 if (mss)
3306 count++;
3307 else if (skb->ip_summed == CHECKSUM_PARTIAL)
3308 count++;
3309
3310 count += TXD_USE_COUNT(len);
3311 for (f = 0; f < nr_frags; f++)
3312 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3313
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003314 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003315 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07003316 return NETDEV_TX_BUSY;
3317 }
Auke Kok9a799d72007-09-15 14:07:45 -07003318 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3319 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3320 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
3321 }
3322
Al Viro8327d002007-12-10 18:54:12 +00003323 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07003324 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3325 first = tx_ring->next_to_use;
3326 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3327 if (tso < 0) {
3328 dev_kfree_skb_any(skb);
3329 return NETDEV_TX_OK;
3330 }
3331
3332 if (tso)
3333 tx_flags |= IXGBE_TX_FLAGS_TSO;
3334 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3335 (skb->ip_summed == CHECKSUM_PARTIAL))
3336 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3337
3338 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
3339 ixgbe_tx_map(adapter, tx_ring, skb, first),
3340 skb->len, hdr_len);
3341
3342 netdev->trans_start = jiffies;
3343
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003344 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07003345
3346 return NETDEV_TX_OK;
3347}
3348
3349/**
3350 * ixgbe_get_stats - Get System Network Statistics
3351 * @netdev: network interface device structure
3352 *
3353 * Returns the address of the device statistics structure.
3354 * The statistics are actually updated from the timer callback.
3355 **/
3356static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3357{
3358 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3359
3360 /* only return the current stats */
3361 return &adapter->net_stats;
3362}
3363
3364/**
3365 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3366 * @netdev: network interface device structure
3367 * @p: pointer to an address structure
3368 *
3369 * Returns 0 on success, negative on failure
3370 **/
3371static int ixgbe_set_mac(struct net_device *netdev, void *p)
3372{
3373 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3374 struct sockaddr *addr = p;
3375
3376 if (!is_valid_ether_addr(addr->sa_data))
3377 return -EADDRNOTAVAIL;
3378
3379 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3380 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
3381
3382 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
3383
3384 return 0;
3385}
3386
3387#ifdef CONFIG_NET_POLL_CONTROLLER
3388/*
3389 * Polling 'interrupt' - used by things like netconsole to send skbs
3390 * without having to re-enable interrupts. It's not called while
3391 * the interrupt routine is executing.
3392 */
3393static void ixgbe_netpoll(struct net_device *netdev)
3394{
3395 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3396
3397 disable_irq(adapter->pdev->irq);
3398 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3399 ixgbe_intr(adapter->pdev->irq, netdev);
3400 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3401 enable_irq(adapter->pdev->irq);
3402}
3403#endif
3404
3405/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003406 * ixgbe_napi_add_all - prep napi structs for use
3407 * @adapter: private struct
3408 * helper function to napi_add each possible q_vector->napi
3409 */
3410static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3411{
3412 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3413 int (*poll)(struct napi_struct *, int);
3414
3415 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3416 poll = &ixgbe_clean_rxonly;
3417 } else {
3418 poll = &ixgbe_poll;
3419 /* only one q_vector for legacy modes */
3420 q_vectors = 1;
3421 }
3422
3423 for (i = 0; i < q_vectors; i++) {
3424 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3425 netif_napi_add(adapter->netdev, &q_vector->napi,
3426 (*poll), 64);
3427 }
3428}
3429
3430/**
Auke Kok9a799d72007-09-15 14:07:45 -07003431 * ixgbe_probe - Device Initialization Routine
3432 * @pdev: PCI device information struct
3433 * @ent: entry in ixgbe_pci_tbl
3434 *
3435 * Returns 0 on success, negative on failure
3436 *
3437 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3438 * The OS initialization, configuring of the adapter private structure,
3439 * and a hardware reset occur.
3440 **/
3441static int __devinit ixgbe_probe(struct pci_dev *pdev,
3442 const struct pci_device_id *ent)
3443{
3444 struct net_device *netdev;
3445 struct ixgbe_adapter *adapter = NULL;
3446 struct ixgbe_hw *hw;
3447 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
3448 unsigned long mmio_start, mmio_len;
3449 static int cards_found;
3450 int i, err, pci_using_dac;
3451 u16 link_status, link_speed, link_width;
3452 u32 part_num;
3453
3454 err = pci_enable_device(pdev);
3455 if (err)
3456 return err;
3457
3458 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
3459 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
3460 pci_using_dac = 1;
3461 } else {
3462 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3463 if (err) {
3464 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3465 if (err) {
3466 dev_err(&pdev->dev, "No usable DMA "
3467 "configuration, aborting\n");
3468 goto err_dma;
3469 }
3470 }
3471 pci_using_dac = 0;
3472 }
3473
3474 err = pci_request_regions(pdev, ixgbe_driver_name);
3475 if (err) {
3476 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3477 goto err_pci_reg;
3478 }
3479
3480 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07003481 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003482
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003483 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07003484 if (!netdev) {
3485 err = -ENOMEM;
3486 goto err_alloc_etherdev;
3487 }
3488
Auke Kok9a799d72007-09-15 14:07:45 -07003489 SET_NETDEV_DEV(netdev, &pdev->dev);
3490
3491 pci_set_drvdata(pdev, netdev);
3492 adapter = netdev_priv(netdev);
3493
3494 adapter->netdev = netdev;
3495 adapter->pdev = pdev;
3496 hw = &adapter->hw;
3497 hw->back = adapter;
3498 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3499
3500 mmio_start = pci_resource_start(pdev, 0);
3501 mmio_len = pci_resource_len(pdev, 0);
3502
3503 hw->hw_addr = ioremap(mmio_start, mmio_len);
3504 if (!hw->hw_addr) {
3505 err = -EIO;
3506 goto err_ioremap;
3507 }
3508
3509 for (i = 1; i <= 5; i++) {
3510 if (pci_resource_len(pdev, i) == 0)
3511 continue;
3512 }
3513
3514 netdev->open = &ixgbe_open;
3515 netdev->stop = &ixgbe_close;
3516 netdev->hard_start_xmit = &ixgbe_xmit_frame;
3517 netdev->get_stats = &ixgbe_get_stats;
Christopher Leech2c5645c2008-08-26 04:27:02 -07003518 netdev->set_rx_mode = &ixgbe_set_rx_mode;
3519 netdev->set_multicast_list = &ixgbe_set_rx_mode;
Auke Kok9a799d72007-09-15 14:07:45 -07003520 netdev->set_mac_address = &ixgbe_set_mac;
3521 netdev->change_mtu = &ixgbe_change_mtu;
3522 ixgbe_set_ethtool_ops(netdev);
3523 netdev->tx_timeout = &ixgbe_tx_timeout;
3524 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07003525 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
3526 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
3527 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
3528#ifdef CONFIG_NET_POLL_CONTROLLER
3529 netdev->poll_controller = ixgbe_netpoll;
3530#endif
3531 strcpy(netdev->name, pci_name(pdev));
3532
3533 netdev->mem_start = mmio_start;
3534 netdev->mem_end = mmio_start + mmio_len;
3535
3536 adapter->bd_number = cards_found;
3537
3538 /* PCI config space info */
3539 hw->vendor_id = pdev->vendor;
3540 hw->device_id = pdev->device;
3541 hw->revision_id = pdev->revision;
3542 hw->subsystem_vendor_id = pdev->subsystem_vendor;
3543 hw->subsystem_device_id = pdev->subsystem_device;
3544
3545 /* Setup hw api */
3546 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003547 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07003548
3549 err = ii->get_invariants(hw);
3550 if (err)
3551 goto err_hw_init;
3552
3553 /* setup the private structure */
3554 err = ixgbe_sw_init(adapter);
3555 if (err)
3556 goto err_sw_init;
3557
3558 netdev->features = NETIF_F_SG |
3559 NETIF_F_HW_CSUM |
3560 NETIF_F_HW_VLAN_TX |
3561 NETIF_F_HW_VLAN_RX |
3562 NETIF_F_HW_VLAN_FILTER;
3563
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003564 netdev->features |= NETIF_F_LRO;
Auke Kok9a799d72007-09-15 14:07:45 -07003565 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07003566 netdev->features |= NETIF_F_TSO6;
Jeff Kirsherad31c402008-06-05 04:05:30 -07003567
3568 netdev->vlan_features |= NETIF_F_TSO;
3569 netdev->vlan_features |= NETIF_F_TSO6;
3570 netdev->vlan_features |= NETIF_F_HW_CSUM;
3571 netdev->vlan_features |= NETIF_F_SG;
3572
Auke Kok9a799d72007-09-15 14:07:45 -07003573 if (pci_using_dac)
3574 netdev->features |= NETIF_F_HIGHDMA;
3575
Auke Kok9a799d72007-09-15 14:07:45 -07003576 /* make sure the EEPROM is good */
3577 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
3578 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
3579 err = -EIO;
3580 goto err_eeprom;
3581 }
3582
3583 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
3584 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
3585
3586 if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
3587 err = -EIO;
3588 goto err_eeprom;
3589 }
3590
3591 init_timer(&adapter->watchdog_timer);
3592 adapter->watchdog_timer.function = &ixgbe_watchdog;
3593 adapter->watchdog_timer.data = (unsigned long)adapter;
3594
3595 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
3596
3597 /* initialize default flow control settings */
3598 hw->fc.original_type = ixgbe_fc_full;
3599 hw->fc.type = ixgbe_fc_full;
3600 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3601 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3602 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3603
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003604 err = ixgbe_init_interrupt_scheme(adapter);
3605 if (err)
3606 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07003607
3608 /* print bus type/speed/width info */
3609 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
3610 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
3611 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
3612 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
3613 "%02x:%02x:%02x:%02x:%02x:%02x\n",
3614 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
3615 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
3616 "Unknown"),
3617 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
3618 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
3619 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
3620 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
3621 "Unknown"),
3622 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
3623 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
3624 ixgbe_read_part_num(hw, &part_num);
3625 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3626 hw->mac.type, hw->phy.type,
3627 (part_num >> 8), (part_num & 0xff));
3628
Auke Kok0c254d82008-02-11 09:25:56 -08003629 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
3630 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
3631 "this card is not sufficient for optimal "
3632 "performance.\n");
3633 dev_warn(&pdev->dev, "For optimal performance a x8 "
3634 "PCI-Express slot is required.\n");
3635 }
3636
Auke Kok9a799d72007-09-15 14:07:45 -07003637 /* reset the hardware with the new settings */
3638 ixgbe_start_hw(hw);
3639
3640 netif_carrier_off(netdev);
David S. Millerfd2ea0a2008-07-17 01:56:23 -07003641 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003642
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003643 ixgbe_napi_add_all(adapter);
3644
Auke Kok9a799d72007-09-15 14:07:45 -07003645 strcpy(netdev->name, "eth%d");
3646 err = register_netdev(netdev);
3647 if (err)
3648 goto err_register;
3649
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003650#ifdef CONFIG_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03003651 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003652 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3653 /* always use CB2 mode, difference is masked
3654 * in the CB driver */
3655 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3656 ixgbe_setup_dca(adapter);
3657 }
3658#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003659
3660 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
3661 cards_found++;
3662 return 0;
3663
3664err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003665 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003666err_hw_init:
3667err_sw_init:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003668 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003669err_eeprom:
3670 iounmap(hw->hw_addr);
3671err_ioremap:
3672 free_netdev(netdev);
3673err_alloc_etherdev:
3674 pci_release_regions(pdev);
3675err_pci_reg:
3676err_dma:
3677 pci_disable_device(pdev);
3678 return err;
3679}
3680
3681/**
3682 * ixgbe_remove - Device Removal Routine
3683 * @pdev: PCI device information struct
3684 *
3685 * ixgbe_remove is called by the PCI subsystem to alert the driver
3686 * that it should release a PCI device. The could be caused by a
3687 * Hot-Plug event, or because the driver is going to be removed from
3688 * memory.
3689 **/
3690static void __devexit ixgbe_remove(struct pci_dev *pdev)
3691{
3692 struct net_device *netdev = pci_get_drvdata(pdev);
3693 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3694
3695 set_bit(__IXGBE_DOWN, &adapter->state);
3696 del_timer_sync(&adapter->watchdog_timer);
3697
3698 flush_scheduled_work();
3699
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003700#ifdef CONFIG_DCA
3701 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3702 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3703 dca_remove_requester(&pdev->dev);
3704 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3705 }
3706
3707#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003708 unregister_netdev(netdev);
3709
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003710 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003711
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003712 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003713
3714 iounmap(adapter->hw.hw_addr);
3715 pci_release_regions(pdev);
3716
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003717 DPRINTK(PROBE, INFO, "complete\n");
3718 kfree(adapter->tx_ring);
3719 kfree(adapter->rx_ring);
3720
Auke Kok9a799d72007-09-15 14:07:45 -07003721 free_netdev(netdev);
3722
3723 pci_disable_device(pdev);
3724}
3725
3726/**
3727 * ixgbe_io_error_detected - called when PCI error is detected
3728 * @pdev: Pointer to PCI device
3729 * @state: The current pci connection state
3730 *
3731 * This function is called after a PCI bus error affecting
3732 * this device has been detected.
3733 */
3734static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
3735 pci_channel_state_t state)
3736{
3737 struct net_device *netdev = pci_get_drvdata(pdev);
3738 struct ixgbe_adapter *adapter = netdev->priv;
3739
3740 netif_device_detach(netdev);
3741
3742 if (netif_running(netdev))
3743 ixgbe_down(adapter);
3744 pci_disable_device(pdev);
3745
3746 /* Request a slot slot reset. */
3747 return PCI_ERS_RESULT_NEED_RESET;
3748}
3749
3750/**
3751 * ixgbe_io_slot_reset - called after the pci bus has been reset.
3752 * @pdev: Pointer to PCI device
3753 *
3754 * Restart the card from scratch, as if from a cold-boot.
3755 */
3756static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
3757{
3758 struct net_device *netdev = pci_get_drvdata(pdev);
3759 struct ixgbe_adapter *adapter = netdev->priv;
3760
3761 if (pci_enable_device(pdev)) {
3762 DPRINTK(PROBE, ERR,
3763 "Cannot re-enable PCI device after reset.\n");
3764 return PCI_ERS_RESULT_DISCONNECT;
3765 }
3766 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07003767 pci_restore_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003768
3769 pci_enable_wake(pdev, PCI_D3hot, 0);
3770 pci_enable_wake(pdev, PCI_D3cold, 0);
3771
3772 ixgbe_reset(adapter);
3773
3774 return PCI_ERS_RESULT_RECOVERED;
3775}
3776
3777/**
3778 * ixgbe_io_resume - called when traffic can start flowing again.
3779 * @pdev: Pointer to PCI device
3780 *
3781 * This callback is called when the error recovery driver tells us that
3782 * its OK to resume normal operation.
3783 */
3784static void ixgbe_io_resume(struct pci_dev *pdev)
3785{
3786 struct net_device *netdev = pci_get_drvdata(pdev);
3787 struct ixgbe_adapter *adapter = netdev->priv;
3788
3789 if (netif_running(netdev)) {
3790 if (ixgbe_up(adapter)) {
3791 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
3792 return;
3793 }
3794 }
3795
3796 netif_device_attach(netdev);
3797
3798}
3799
3800static struct pci_error_handlers ixgbe_err_handler = {
3801 .error_detected = ixgbe_io_error_detected,
3802 .slot_reset = ixgbe_io_slot_reset,
3803 .resume = ixgbe_io_resume,
3804};
3805
3806static struct pci_driver ixgbe_driver = {
3807 .name = ixgbe_driver_name,
3808 .id_table = ixgbe_pci_tbl,
3809 .probe = ixgbe_probe,
3810 .remove = __devexit_p(ixgbe_remove),
3811#ifdef CONFIG_PM
3812 .suspend = ixgbe_suspend,
3813 .resume = ixgbe_resume,
3814#endif
3815 .shutdown = ixgbe_shutdown,
3816 .err_handler = &ixgbe_err_handler
3817};
3818
3819/**
3820 * ixgbe_init_module - Driver Registration Routine
3821 *
3822 * ixgbe_init_module is the first routine called when the driver is
3823 * loaded. All it does is register with the PCI subsystem.
3824 **/
3825static int __init ixgbe_init_module(void)
3826{
3827 int ret;
3828 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
3829 ixgbe_driver_string, ixgbe_driver_version);
3830
3831 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
3832
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003833#ifdef CONFIG_DCA
3834 dca_register_notify(&dca_notifier);
3835
3836#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003837 ret = pci_register_driver(&ixgbe_driver);
3838 return ret;
3839}
3840module_init(ixgbe_init_module);
3841
3842/**
3843 * ixgbe_exit_module - Driver Exit Cleanup Routine
3844 *
3845 * ixgbe_exit_module is called just before the driver is removed
3846 * from memory.
3847 **/
3848static void __exit ixgbe_exit_module(void)
3849{
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003850#ifdef CONFIG_DCA
3851 dca_unregister_notify(&dca_notifier);
3852#endif
Auke Kok9a799d72007-09-15 14:07:45 -07003853 pci_unregister_driver(&ixgbe_driver);
3854}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08003855
3856#ifdef CONFIG_DCA
3857static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3858 void *p)
3859{
3860 int ret_val;
3861
3862 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3863 __ixgbe_notify_dca);
3864
3865 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3866}
3867#endif /* CONFIG_DCA */
3868
Auke Kok9a799d72007-09-15 14:07:45 -07003869module_exit(ixgbe_exit_module);
3870
3871/* ixgbe_main.c */