blob: 28bb20330de751998239ab9b1c774d3a3ba6472d [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include <linux/types.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <linux/netdevice.h>
33#include <linux/vmalloc.h>
34#include <linux/string.h>
35#include <linux/in.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/ipv6.h>
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
41#include <linux/ethtool.h>
42#include <linux/if_vlan.h>
43
44#include "ixgbe.h"
45#include "ixgbe_common.h"
46
47char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070048static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070050
51#define DRV_VERSION "1.1.18"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070052const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070055
56static const struct ixgbe_info *ixgbe_info_tbl[] = {
Auke Kok3957d632007-10-31 15:22:10 -070057 [board_82598] = &ixgbe_82598_info,
Auke Kok9a799d72007-09-15 14:07:45 -070058};
59
60/* ixgbe_pci_tbl - PCI Device ID Table
61 *
62 * Wildcard entries (PCI_ANY_ID) should come last
63 * Last entry must be all 0s
64 *
65 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66 * Class, Class Mask, private data (not used) }
67 */
68static struct pci_device_id ixgbe_pci_tbl[] = {
69 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070070 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070071 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070072 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070073 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070074 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070075 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070076 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070077
78 /* required last entry */
79 {0, }
80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82
83MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
84MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
85MODULE_LICENSE("GPL");
86MODULE_VERSION(DRV_VERSION);
87
88#define DEFAULT_DEBUG_LEVEL_SHIFT 3
89
90
91#ifdef DEBUG
92/**
93 * ixgbe_get_hw_dev_name - return device name string
94 * used by hardware layer to print debugging information
95 **/
96char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
97{
98 struct ixgbe_adapter *adapter = hw->back;
99 struct net_device *netdev = adapter->netdev;
100 return netdev->name;
101}
102#endif
103
104static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
105 u8 msix_vector)
106{
107 u32 ivar, index;
108
109 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
110 index = (int_alloc_entry >> 2) & 0x1F;
111 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
112 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
113 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
114 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
115}
116
117static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
118 struct ixgbe_tx_buffer
119 *tx_buffer_info)
120{
121 if (tx_buffer_info->dma) {
122 pci_unmap_page(adapter->pdev,
123 tx_buffer_info->dma,
124 tx_buffer_info->length, PCI_DMA_TODEVICE);
125 tx_buffer_info->dma = 0;
126 }
127 if (tx_buffer_info->skb) {
128 dev_kfree_skb_any(tx_buffer_info->skb);
129 tx_buffer_info->skb = NULL;
130 }
131 /* tx_buffer_info must be completely set up in the transmit path */
132}
133
134static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
135 struct ixgbe_ring *tx_ring,
136 unsigned int eop,
137 union ixgbe_adv_tx_desc *eop_desc)
138{
139 /* Detect a transmit hang in hardware, this serializes the
140 * check with the clearing of time_stamp and movement of i */
141 adapter->detect_tx_hung = false;
142 if (tx_ring->tx_buffer_info[eop].dma &&
143 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
144 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
145 /* detected Tx unit hang */
146 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
147 " TDH <%x>\n"
148 " TDT <%x>\n"
149 " next_to_use <%x>\n"
150 " next_to_clean <%x>\n"
151 "tx_buffer_info[next_to_clean]\n"
152 " time_stamp <%lx>\n"
153 " next_to_watch <%x>\n"
154 " jiffies <%lx>\n"
155 " next_to_watch.status <%x>\n",
156 readl(adapter->hw.hw_addr + tx_ring->head),
157 readl(adapter->hw.hw_addr + tx_ring->tail),
158 tx_ring->next_to_use,
159 tx_ring->next_to_clean,
160 tx_ring->tx_buffer_info[eop].time_stamp,
161 eop, jiffies, eop_desc->wb.status);
162 return true;
163 }
164
165 return false;
166}
167
168/**
169 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
170 * @adapter: board private structure
171 **/
172static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
173 struct ixgbe_ring *tx_ring)
174{
175 struct net_device *netdev = adapter->netdev;
176 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
177 struct ixgbe_tx_buffer *tx_buffer_info;
178 unsigned int i, eop;
179 bool cleaned = false;
180 int count = 0;
181
182 i = tx_ring->next_to_clean;
183 eop = tx_ring->tx_buffer_info[i].next_to_watch;
184 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
185 while (eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) {
186 for (cleaned = false; !cleaned;) {
187 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
188 tx_buffer_info = &tx_ring->tx_buffer_info[i];
189 cleaned = (i == eop);
190
191 tx_ring->stats.bytes += tx_buffer_info->length;
192 ixgbe_unmap_and_free_tx_resource(adapter,
193 tx_buffer_info);
194 tx_desc->wb.status = 0;
195
196 i++;
197 if (i == tx_ring->count)
198 i = 0;
199 }
200
201 tx_ring->stats.packets++;
202
203 eop = tx_ring->tx_buffer_info[i].next_to_watch;
204 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
205
206 /* weight of a sort for tx, avoid endless transmit cleanup */
207 if (count++ >= tx_ring->work_limit)
208 break;
209 }
210
211 tx_ring->next_to_clean = i;
212
213#define TX_WAKE_THRESHOLD 32
214 spin_lock(&tx_ring->tx_lock);
215
216 if (cleaned && netif_carrier_ok(netdev) &&
217 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD) &&
218 !test_bit(__IXGBE_DOWN, &adapter->state))
219 netif_wake_queue(netdev);
220
221 spin_unlock(&tx_ring->tx_lock);
222
223 if (adapter->detect_tx_hung)
224 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
225 netif_stop_queue(netdev);
226
227 if (count >= tx_ring->work_limit)
228 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
229
230 return cleaned;
231}
232
233/**
234 * ixgbe_receive_skb - Send a completed packet up the stack
235 * @adapter: board private structure
236 * @skb: packet to send up
237 * @is_vlan: packet has a VLAN tag
238 * @tag: VLAN tag from descriptor
239 **/
240static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
241 struct sk_buff *skb, bool is_vlan,
242 u16 tag)
243{
244 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
245 if (adapter->vlgrp && is_vlan)
246 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
247 else
248 netif_receive_skb(skb);
249 } else {
250
251 if (adapter->vlgrp && is_vlan)
252 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
253 else
254 netif_rx(skb);
255 }
256}
257
258static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
259 u32 status_err,
260 struct sk_buff *skb)
261{
262 skb->ip_summed = CHECKSUM_NONE;
263
264 /* Ignore Checksum bit is set */
265 if ((status_err & IXGBE_RXD_STAT_IXSM) ||
266 !(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
267 return;
268 /* TCP/UDP checksum error bit is set */
269 if (status_err & (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE)) {
270 /* let the stack verify checksum errors */
271 adapter->hw_csum_rx_error++;
272 return;
273 }
274 /* It must be a TCP or UDP packet with a valid checksum */
275 if (status_err & (IXGBE_RXD_STAT_L4CS | IXGBE_RXD_STAT_UDPCS))
276 skb->ip_summed = CHECKSUM_UNNECESSARY;
277 adapter->hw_csum_rx_good++;
278}
279
280/**
281 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
282 * @adapter: address of board private structure
283 **/
284static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
285 struct ixgbe_ring *rx_ring,
286 int cleaned_count)
287{
288 struct net_device *netdev = adapter->netdev;
289 struct pci_dev *pdev = adapter->pdev;
290 union ixgbe_adv_rx_desc *rx_desc;
291 struct ixgbe_rx_buffer *rx_buffer_info;
292 struct sk_buff *skb;
293 unsigned int i;
294 unsigned int bufsz = adapter->rx_buf_len + NET_IP_ALIGN;
295
296 i = rx_ring->next_to_use;
297 rx_buffer_info = &rx_ring->rx_buffer_info[i];
298
299 while (cleaned_count--) {
300 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
301
302 if (!rx_buffer_info->page &&
303 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
304 rx_buffer_info->page = alloc_page(GFP_ATOMIC);
305 if (!rx_buffer_info->page) {
306 adapter->alloc_rx_page_failed++;
307 goto no_buffers;
308 }
309 rx_buffer_info->page_dma =
310 pci_map_page(pdev, rx_buffer_info->page,
311 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
312 }
313
314 if (!rx_buffer_info->skb) {
315 skb = netdev_alloc_skb(netdev, bufsz);
316
317 if (!skb) {
318 adapter->alloc_rx_buff_failed++;
319 goto no_buffers;
320 }
321
322 /*
323 * Make buffer alignment 2 beyond a 16 byte boundary
324 * this will result in a 16 byte aligned IP header after
325 * the 14 byte MAC header is removed
326 */
327 skb_reserve(skb, NET_IP_ALIGN);
328
329 rx_buffer_info->skb = skb;
330 rx_buffer_info->dma = pci_map_single(pdev, skb->data,
331 bufsz,
332 PCI_DMA_FROMDEVICE);
333 }
334 /* Refresh the desc even if buffer_addrs didn't change because
335 * each write-back erases this info. */
336 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
337 rx_desc->read.pkt_addr =
338 cpu_to_le64(rx_buffer_info->page_dma);
339 rx_desc->read.hdr_addr =
340 cpu_to_le64(rx_buffer_info->dma);
341 } else {
342 rx_desc->read.pkt_addr =
343 cpu_to_le64(rx_buffer_info->dma);
344 }
345
346 i++;
347 if (i == rx_ring->count)
348 i = 0;
349 rx_buffer_info = &rx_ring->rx_buffer_info[i];
350 }
351no_buffers:
352 if (rx_ring->next_to_use != i) {
353 rx_ring->next_to_use = i;
354 if (i-- == 0)
355 i = (rx_ring->count - 1);
356
357 /*
358 * Force memory writes to complete before letting h/w
359 * know there are new descriptors to fetch. (Only
360 * applicable for weak-ordered memory model archs,
361 * such as IA-64).
362 */
363 wmb();
364 writel(i, adapter->hw.hw_addr + rx_ring->tail);
365 }
366}
367
368static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
369 struct ixgbe_ring *rx_ring,
370 int *work_done, int work_to_do)
371{
372 struct net_device *netdev = adapter->netdev;
373 struct pci_dev *pdev = adapter->pdev;
374 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
375 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
376 struct sk_buff *skb;
377 unsigned int i;
378 u32 upper_len, len, staterr;
379 u16 hdr_info, vlan_tag;
380 bool is_vlan, cleaned = false;
381 int cleaned_count = 0;
382
383 i = rx_ring->next_to_clean;
384 upper_len = 0;
385 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
386 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
387 rx_buffer_info = &rx_ring->rx_buffer_info[i];
388 is_vlan = (staterr & IXGBE_RXD_STAT_VP);
389 vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
390
391 while (staterr & IXGBE_RXD_STAT_DD) {
392 if (*work_done >= work_to_do)
393 break;
394 (*work_done)++;
395
396 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
397 hdr_info =
398 le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info);
399 len =
400 ((hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
401 IXGBE_RXDADV_HDRBUFLEN_SHIFT);
402 if (hdr_info & IXGBE_RXDADV_SPH)
403 adapter->rx_hdr_split++;
404 if (len > IXGBE_RX_HDR_SIZE)
405 len = IXGBE_RX_HDR_SIZE;
406 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
407 } else
408 len = le16_to_cpu(rx_desc->wb.upper.length);
409
410 cleaned = true;
411 skb = rx_buffer_info->skb;
412 prefetch(skb->data - NET_IP_ALIGN);
413 rx_buffer_info->skb = NULL;
414
415 if (len && !skb_shinfo(skb)->nr_frags) {
416 pci_unmap_single(pdev, rx_buffer_info->dma,
417 adapter->rx_buf_len + NET_IP_ALIGN,
418 PCI_DMA_FROMDEVICE);
419 skb_put(skb, len);
420 }
421
422 if (upper_len) {
423 pci_unmap_page(pdev, rx_buffer_info->page_dma,
424 PAGE_SIZE, PCI_DMA_FROMDEVICE);
425 rx_buffer_info->page_dma = 0;
426 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
427 rx_buffer_info->page, 0, upper_len);
428 rx_buffer_info->page = NULL;
429
430 skb->len += upper_len;
431 skb->data_len += upper_len;
432 skb->truesize += upper_len;
433 }
434
435 i++;
436 if (i == rx_ring->count)
437 i = 0;
438 next_buffer = &rx_ring->rx_buffer_info[i];
439
440 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
441 prefetch(next_rxd);
442
443 cleaned_count++;
444 if (staterr & IXGBE_RXD_STAT_EOP) {
445 rx_ring->stats.packets++;
446 rx_ring->stats.bytes += skb->len;
447 } else {
448 rx_buffer_info->skb = next_buffer->skb;
449 rx_buffer_info->dma = next_buffer->dma;
450 next_buffer->skb = skb;
451 adapter->non_eop_descs++;
452 goto next_desc;
453 }
454
455 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
456 dev_kfree_skb_irq(skb);
457 goto next_desc;
458 }
459
460 ixgbe_rx_checksum(adapter, staterr, skb);
461 skb->protocol = eth_type_trans(skb, netdev);
462 ixgbe_receive_skb(adapter, skb, is_vlan, vlan_tag);
463 netdev->last_rx = jiffies;
464
465next_desc:
466 rx_desc->wb.upper.status_error = 0;
467
468 /* return some buffers to hardware, one at a time is too slow */
469 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
470 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
471 cleaned_count = 0;
472 }
473
474 /* use prefetched values */
475 rx_desc = next_rxd;
476 rx_buffer_info = next_buffer;
477
478 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
479 is_vlan = (staterr & IXGBE_RXD_STAT_VP);
480 vlan_tag = le16_to_cpu(rx_desc->wb.upper.vlan);
481 }
482
483 rx_ring->next_to_clean = i;
484 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
485
486 if (cleaned_count)
487 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
488
489 return cleaned;
490}
491
492#define IXGBE_MAX_INTR 10
493/**
494 * ixgbe_configure_msix - Configure MSI-X hardware
495 * @adapter: board private structure
496 *
497 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
498 * interrupts.
499 **/
500static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
501{
502 int i, vector = 0;
503
504 for (i = 0; i < adapter->num_tx_queues; i++) {
505 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
506 IXGBE_MSIX_VECTOR(vector));
507 writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
508 adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
509 vector++;
510 }
511
512 for (i = 0; i < adapter->num_rx_queues; i++) {
513 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i),
514 IXGBE_MSIX_VECTOR(vector));
515 writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr),
516 adapter->hw.hw_addr + adapter->rx_ring[i].itr_register);
517 vector++;
518 }
519
520 vector = adapter->num_tx_queues + adapter->num_rx_queues;
521 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX,
522 IXGBE_MSIX_VECTOR(vector));
523 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950);
524}
525
526static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
527{
528 struct net_device *netdev = data;
529 struct ixgbe_adapter *adapter = netdev_priv(netdev);
530 struct ixgbe_hw *hw = &adapter->hw;
531 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
532
533 if (eicr & IXGBE_EICR_LSC) {
534 adapter->lsc_int++;
535 if (!test_bit(__IXGBE_DOWN, &adapter->state))
536 mod_timer(&adapter->watchdog_timer, jiffies);
537 }
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -0800538
539 if (!test_bit(__IXGBE_DOWN, &adapter->state))
540 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -0700541
542 return IRQ_HANDLED;
543}
544
545static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
546{
547 struct ixgbe_ring *txr = data;
548 struct ixgbe_adapter *adapter = txr->adapter;
549
550 ixgbe_clean_tx_irq(adapter, txr);
551
552 return IRQ_HANDLED;
553}
554
555static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
556{
557 struct ixgbe_ring *rxr = data;
558 struct ixgbe_adapter *adapter = rxr->adapter;
559
560 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
561 netif_rx_schedule(adapter->netdev, &adapter->napi);
562 return IRQ_HANDLED;
563}
564
565static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
566{
567 struct ixgbe_adapter *adapter = container_of(napi,
568 struct ixgbe_adapter, napi);
569 struct net_device *netdev = adapter->netdev;
570 int work_done = 0;
571 struct ixgbe_ring *rxr = adapter->rx_ring;
572
573 /* Keep link state information with original netdev */
574 if (!netif_carrier_ok(netdev))
575 goto quit_polling;
576
577 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
578
579 /* If no Tx and not enough Rx work done, exit the polling mode */
580 if ((work_done < budget) || !netif_running(netdev)) {
581quit_polling:
582 netif_rx_complete(netdev, napi);
583 if (!test_bit(__IXGBE_DOWN, &adapter->state))
584 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
585 rxr->eims_value);
586 }
587
588 return work_done;
589}
590
591/**
592 * ixgbe_setup_msix - Initialize MSI-X interrupts
593 *
594 * ixgbe_setup_msix allocates MSI-X vectors and requests
595 * interrutps from the kernel.
596 **/
597static int ixgbe_setup_msix(struct ixgbe_adapter *adapter)
598{
599 struct net_device *netdev = adapter->netdev;
600 int i, int_vector = 0, err = 0;
601 int max_msix_count;
602
603 /* +1 for the LSC interrupt */
604 max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1;
605 adapter->msix_entries = kcalloc(max_msix_count,
606 sizeof(struct msix_entry), GFP_KERNEL);
607 if (!adapter->msix_entries)
608 return -ENOMEM;
609
610 for (i = 0; i < max_msix_count; i++)
611 adapter->msix_entries[i].entry = i;
612
613 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
614 max_msix_count);
615 if (err)
616 goto out;
617
618 for (i = 0; i < adapter->num_tx_queues; i++) {
619 sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i);
620 err = request_irq(adapter->msix_entries[int_vector].vector,
621 &ixgbe_msix_clean_tx,
622 0,
623 adapter->tx_ring[i].name,
624 &(adapter->tx_ring[i]));
625 if (err) {
626 DPRINTK(PROBE, ERR,
627 "request_irq failed for MSIX interrupt "
628 "Error: %d\n", err);
629 goto release_irqs;
630 }
631 adapter->tx_ring[i].eims_value =
632 (1 << IXGBE_MSIX_VECTOR(int_vector));
633 adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
634 int_vector++;
635 }
636
637 for (i = 0; i < adapter->num_rx_queues; i++) {
638 if (strlen(netdev->name) < (IFNAMSIZ - 5))
639 sprintf(adapter->rx_ring[i].name,
640 "%s-rx%d", netdev->name, i);
641 else
642 memcpy(adapter->rx_ring[i].name,
643 netdev->name, IFNAMSIZ);
644 err = request_irq(adapter->msix_entries[int_vector].vector,
645 &ixgbe_msix_clean_rx, 0,
646 adapter->rx_ring[i].name,
647 &(adapter->rx_ring[i]));
648 if (err) {
649 DPRINTK(PROBE, ERR,
650 "request_irq failed for MSIX interrupt "
651 "Error: %d\n", err);
652 goto release_irqs;
653 }
654
655 adapter->rx_ring[i].eims_value =
656 (1 << IXGBE_MSIX_VECTOR(int_vector));
657 adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
658 int_vector++;
659 }
660
661 sprintf(adapter->lsc_name, "%s-lsc", netdev->name);
662 err = request_irq(adapter->msix_entries[int_vector].vector,
663 &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev);
664 if (err) {
665 DPRINTK(PROBE, ERR,
666 "request_irq for msix_lsc failed: %d\n", err);
667 goto release_irqs;
668 }
669
670 /* FIXME: implement netif_napi_remove() instead */
671 adapter->napi.poll = ixgbe_clean_rxonly;
672 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
673 return 0;
674
675release_irqs:
676 int_vector--;
677 for (; int_vector >= adapter->num_tx_queues; int_vector--)
678 free_irq(adapter->msix_entries[int_vector].vector,
679 &(adapter->rx_ring[int_vector -
680 adapter->num_tx_queues]));
681
682 for (; int_vector >= 0; int_vector--)
683 free_irq(adapter->msix_entries[int_vector].vector,
684 &(adapter->tx_ring[int_vector]));
685out:
686 kfree(adapter->msix_entries);
687 adapter->msix_entries = NULL;
688 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
689 return err;
690}
691
692/**
693 * ixgbe_intr - Interrupt Handler
694 * @irq: interrupt number
695 * @data: pointer to a network interface device structure
696 * @pt_regs: CPU registers structure
697 **/
698static irqreturn_t ixgbe_intr(int irq, void *data)
699{
700 struct net_device *netdev = data;
701 struct ixgbe_adapter *adapter = netdev_priv(netdev);
702 struct ixgbe_hw *hw = &adapter->hw;
703 u32 eicr;
704
705 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
706
707 if (!eicr)
708 return IRQ_NONE; /* Not our interrupt */
709
710 if (eicr & IXGBE_EICR_LSC) {
711 adapter->lsc_int++;
712 if (!test_bit(__IXGBE_DOWN, &adapter->state))
713 mod_timer(&adapter->watchdog_timer, jiffies);
714 }
715 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
716 /* Disable interrupts and register for poll. The flush of the
717 * posted write is intentionally left out. */
Auke Kok9a799d72007-09-15 14:07:45 -0700718 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
719 __netif_rx_schedule(netdev, &adapter->napi);
720 }
721
722 return IRQ_HANDLED;
723}
724
725/**
726 * ixgbe_request_irq - initialize interrupts
727 * @adapter: board private structure
728 *
729 * Attempts to configure interrupts using the best available
730 * capabilities of the hardware and kernel.
731 **/
732static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues)
733{
734 struct net_device *netdev = adapter->netdev;
735 int flags, err;
Jeff Garzik28fc1f52007-10-29 05:46:16 -0400736 irq_handler_t handler = ixgbe_intr;
Auke Kok9a799d72007-09-15 14:07:45 -0700737
738 flags = IRQF_SHARED;
739
740 err = ixgbe_setup_msix(adapter);
741 if (!err)
742 goto request_done;
743
744 /*
745 * if we can't do MSI-X, fall through and try MSI
746 * No need to reallocate memory since we're decreasing the number of
747 * queues. We just won't use the other ones, also it is freed correctly
748 * on ixgbe_remove.
749 */
750 *num_rx_queues = 1;
751
752 /* do MSI */
753 err = pci_enable_msi(adapter->pdev);
754 if (!err) {
755 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
756 flags &= ~IRQF_SHARED;
757 handler = &ixgbe_intr;
758 }
759
760 err = request_irq(adapter->pdev->irq, handler, flags,
761 netdev->name, netdev);
762 if (err)
763 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
764
765request_done:
766 return err;
767}
768
769static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
770{
771 struct net_device *netdev = adapter->netdev;
772
773 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
774 int i;
775
776 for (i = 0; i < adapter->num_tx_queues; i++)
777 free_irq(adapter->msix_entries[i].vector,
778 &(adapter->tx_ring[i]));
779 for (i = 0; i < adapter->num_rx_queues; i++)
780 free_irq(adapter->msix_entries[i +
781 adapter->num_tx_queues].vector,
782 &(adapter->rx_ring[i]));
783 i = adapter->num_rx_queues + adapter->num_tx_queues;
784 free_irq(adapter->msix_entries[i].vector, netdev);
785 pci_disable_msix(adapter->pdev);
786 kfree(adapter->msix_entries);
787 adapter->msix_entries = NULL;
788 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
789 return;
790 }
791
792 free_irq(adapter->pdev->irq, netdev);
793 if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
794 pci_disable_msi(adapter->pdev);
795 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
796 }
797}
798
799/**
800 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
801 * @adapter: board private structure
802 **/
803static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
804{
Auke Kok9a799d72007-09-15 14:07:45 -0700805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
806 IXGBE_WRITE_FLUSH(&adapter->hw);
807 synchronize_irq(adapter->pdev->irq);
808}
809
810/**
811 * ixgbe_irq_enable - Enable default interrupt generation settings
812 * @adapter: board private structure
813 **/
814static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
815{
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -0800816 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
817 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC,
818 (IXGBE_EIMS_ENABLE_MASK &
819 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
820 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
821 IXGBE_EIMS_ENABLE_MASK);
822 IXGBE_WRITE_FLUSH(&adapter->hw);
Auke Kok9a799d72007-09-15 14:07:45 -0700823}
824
825/**
826 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
827 *
828 **/
829static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
830{
831 int i;
832 struct ixgbe_hw *hw = &adapter->hw;
833
834 if (adapter->rx_eitr)
835 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
836 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
837
838 /* for re-triggering the interrupt in non-NAPI mode */
839 adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
840 adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
841
842 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
843 for (i = 0; i < adapter->num_tx_queues; i++)
844 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i);
845}
846
847/**
848 * ixgbe_configure_tx - Configure 8254x Transmit Unit after Reset
849 * @adapter: board private structure
850 *
851 * Configure the Tx unit of the MAC after a reset.
852 **/
853static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
854{
855 u64 tdba;
856 struct ixgbe_hw *hw = &adapter->hw;
857 u32 i, tdlen;
858
859 /* Setup the HW Tx Head and Tail descriptor pointers */
860 for (i = 0; i < adapter->num_tx_queues; i++) {
861 tdba = adapter->tx_ring[i].dma;
862 tdlen = adapter->tx_ring[i].count *
863 sizeof(union ixgbe_adv_tx_desc);
864 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK));
865 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
866 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen);
867 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
868 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
869 adapter->tx_ring[i].head = IXGBE_TDH(i);
870 adapter->tx_ring[i].tail = IXGBE_TDT(i);
871 }
872
873 IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
874}
875
876#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
877 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
878
879#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
880/**
881 * ixgbe_configure_rx - Configure 8254x Receive Unit after Reset
882 * @adapter: board private structure
883 *
884 * Configure the Rx unit of the MAC after a reset.
885 **/
886static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
887{
888 u64 rdba;
889 struct ixgbe_hw *hw = &adapter->hw;
890 struct net_device *netdev = adapter->netdev;
891 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
892 u32 rdlen, rxctrl, rxcsum;
893 u32 random[10];
894 u32 reta, mrqc;
895 int i;
896 u32 fctrl, hlreg0;
897 u32 srrctl;
898 u32 pages;
899
900 /* Decide whether to use packet split mode or not */
901 if (netdev->mtu > ETH_DATA_LEN)
902 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
903 else
904 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
905
906 /* Set the RX buffer length according to the mode */
907 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
908 adapter->rx_buf_len = IXGBE_RX_HDR_SIZE;
909 } else {
910 if (netdev->mtu <= ETH_DATA_LEN)
911 adapter->rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
912 else
913 adapter->rx_buf_len = ALIGN(max_frame, 1024);
914 }
915
916 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
917 fctrl |= IXGBE_FCTRL_BAM;
918 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
919
920 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
921 if (adapter->netdev->mtu <= ETH_DATA_LEN)
922 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
923 else
924 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
925 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
926
927 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
928
929 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(0));
930 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
931 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
932
933 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
934 srrctl |= PAGE_SIZE >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
935 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
936 srrctl |= ((IXGBE_RX_HDR_SIZE <<
937 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
938 IXGBE_SRRCTL_BSIZEHDR_MASK);
939 } else {
940 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
941
942 if (adapter->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
943 srrctl |=
944 IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
945 else
946 srrctl |=
947 adapter->rx_buf_len >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
948 }
949 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(0), srrctl);
950
951 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
952 /* disable receives while setting up the descriptors */
953 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
954 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
955
956 /* Setup the HW Rx Head and Tail Descriptor Pointers and
957 * the Base and Length of the Rx Descriptor Ring */
958 for (i = 0; i < adapter->num_rx_queues; i++) {
959 rdba = adapter->rx_ring[i].dma;
960 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), (rdba & DMA_32BIT_MASK));
961 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
962 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), rdlen);
963 IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
964 IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
965 adapter->rx_ring[i].head = IXGBE_RDH(i);
966 adapter->rx_ring[i].tail = IXGBE_RDT(i);
967 }
968
969 if (adapter->num_rx_queues > 1) {
970 /* Random 40bytes used as random key in RSS hash function */
971 get_random_bytes(&random[0], 40);
972
973 switch (adapter->num_rx_queues) {
974 case 8:
975 case 4:
976 /* Bits [3:0] in each byte refers the Rx queue no */
977 reta = 0x00010203;
978 break;
979 case 2:
980 reta = 0x00010001;
981 break;
982 default:
983 reta = 0x00000000;
984 break;
985 }
986
987 /* Fill out redirection table */
988 for (i = 0; i < 32; i++) {
989 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta);
990 if (adapter->num_rx_queues > 4) {
991 i++;
992 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i,
993 0x04050607);
994 }
995 }
996
997 /* Fill out hash function seeds */
998 for (i = 0; i < 10; i++)
999 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]);
1000
1001 mrqc = IXGBE_MRQC_RSSEN
1002 /* Perform hash on these packet types */
1003 | IXGBE_MRQC_RSS_FIELD_IPV4
1004 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1005 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1006 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1007 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1008 | IXGBE_MRQC_RSS_FIELD_IPV6
1009 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1010 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1011 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1012 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1013
1014 /* Multiqueue and packet checksumming are mutually exclusive. */
1015 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1016 rxcsum |= IXGBE_RXCSUM_PCSD;
1017 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1018 } else {
1019 /* Enable Receive Checksum Offload for TCP and UDP */
1020 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1021 if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1022 /* Enable IPv4 payload checksum for UDP fragments
1023 * Must be used in conjunction with packet-split. */
1024 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1025 } else {
1026 /* don't need to clear IPPCSE as it defaults to 0 */
1027 }
1028 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1029 }
1030 /* Enable Receives */
1031 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1032 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1033}
1034
1035static void ixgbe_vlan_rx_register(struct net_device *netdev,
1036 struct vlan_group *grp)
1037{
1038 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1039 u32 ctrl;
1040
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001041 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1042 ixgbe_irq_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001043 adapter->vlgrp = grp;
1044
1045 if (grp) {
1046 /* enable VLAN tag insert/strip */
1047 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1048 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1049 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1050 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1051 }
1052
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001053 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1054 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001055}
1056
1057static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1058{
1059 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1060
1061 /* add VID to filter table */
1062 ixgbe_set_vfta(&adapter->hw, vid, 0, true);
1063}
1064
1065static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1066{
1067 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1068
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001069 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1070 ixgbe_irq_disable(adapter);
1071
Auke Kok9a799d72007-09-15 14:07:45 -07001072 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001073
1074 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1075 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001076
1077 /* remove VID from filter table */
1078 ixgbe_set_vfta(&adapter->hw, vid, 0, false);
1079}
1080
1081static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1082{
1083 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1084
1085 if (adapter->vlgrp) {
1086 u16 vid;
1087 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1088 if (!vlan_group_get_device(adapter->vlgrp, vid))
1089 continue;
1090 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1091 }
1092 }
1093}
1094
1095/**
1096 * ixgbe_set_multi - Multicast and Promiscuous mode set
1097 * @netdev: network interface device structure
1098 *
1099 * The set_multi entry point is called whenever the multicast address
1100 * list or the network interface flags are updated. This routine is
1101 * responsible for configuring the hardware for proper multicast,
1102 * promiscuous mode, and all-multi behavior.
1103 **/
1104static void ixgbe_set_multi(struct net_device *netdev)
1105{
1106 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1107 struct ixgbe_hw *hw = &adapter->hw;
1108 struct dev_mc_list *mc_ptr;
1109 u8 *mta_list;
1110 u32 fctrl;
1111 int i;
1112
1113 /* Check for Promiscuous and All Multicast modes */
1114
1115 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1116
1117 if (netdev->flags & IFF_PROMISC) {
1118 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1119 } else if (netdev->flags & IFF_ALLMULTI) {
1120 fctrl |= IXGBE_FCTRL_MPE;
1121 fctrl &= ~IXGBE_FCTRL_UPE;
1122 } else {
1123 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1124 }
1125
1126 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1127
1128 if (netdev->mc_count) {
1129 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
1130 if (!mta_list)
1131 return;
1132
1133 /* Shared function expects packed array of only addresses. */
1134 mc_ptr = netdev->mc_list;
1135
1136 for (i = 0; i < netdev->mc_count; i++) {
1137 if (!mc_ptr)
1138 break;
1139 memcpy(mta_list + (i * ETH_ALEN), mc_ptr->dmi_addr,
1140 ETH_ALEN);
1141 mc_ptr = mc_ptr->next;
1142 }
1143
1144 ixgbe_update_mc_addr_list(hw, mta_list, i, 0);
1145 kfree(mta_list);
1146 } else {
1147 ixgbe_update_mc_addr_list(hw, NULL, 0, 0);
1148 }
1149
1150}
1151
1152static void ixgbe_configure(struct ixgbe_adapter *adapter)
1153{
1154 struct net_device *netdev = adapter->netdev;
1155 int i;
1156
1157 ixgbe_set_multi(netdev);
1158
1159 ixgbe_restore_vlan(adapter);
1160
1161 ixgbe_configure_tx(adapter);
1162 ixgbe_configure_rx(adapter);
1163 for (i = 0; i < adapter->num_rx_queues; i++)
1164 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
1165 (adapter->rx_ring[i].count - 1));
1166}
1167
1168static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1169{
1170 struct net_device *netdev = adapter->netdev;
1171 int i;
1172 u32 gpie = 0;
1173 struct ixgbe_hw *hw = &adapter->hw;
1174 u32 txdctl, rxdctl, mhadd;
1175 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1176
1177 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED |
1178 IXGBE_FLAG_MSI_ENABLED)) {
1179 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1180 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1181 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1182 } else {
1183 /* MSI only */
1184 gpie = (IXGBE_GPIE_EIAME |
1185 IXGBE_GPIE_PBA_SUPPORT);
1186 }
1187 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie);
1188 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
1189 }
1190
1191 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1192
1193 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1194 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1195 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
1196
1197 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
1198 }
1199
1200 for (i = 0; i < adapter->num_tx_queues; i++) {
1201 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i));
1202 txdctl |= IXGBE_TXDCTL_ENABLE;
1203 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl);
1204 }
1205
1206 for (i = 0; i < adapter->num_rx_queues; i++) {
1207 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i));
1208 rxdctl |= IXGBE_RXDCTL_ENABLE;
1209 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl);
1210 }
1211 /* enable all receives */
1212 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1213 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
1214 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
1215
1216 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
1217 ixgbe_configure_msix(adapter);
1218 else
1219 ixgbe_configure_msi_and_legacy(adapter);
1220
1221 clear_bit(__IXGBE_DOWN, &adapter->state);
1222 napi_enable(&adapter->napi);
1223 ixgbe_irq_enable(adapter);
1224
1225 /* bring the link up in the watchdog, this could race with our first
1226 * link up interrupt but shouldn't be a problem */
1227 mod_timer(&adapter->watchdog_timer, jiffies);
1228 return 0;
1229}
1230
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001231void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
1232{
1233 WARN_ON(in_interrupt());
1234 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
1235 msleep(1);
1236 ixgbe_down(adapter);
1237 ixgbe_up(adapter);
1238 clear_bit(__IXGBE_RESETTING, &adapter->state);
1239}
1240
Auke Kok9a799d72007-09-15 14:07:45 -07001241int ixgbe_up(struct ixgbe_adapter *adapter)
1242{
1243 /* hardware has been reset, we need to reload some things */
1244 ixgbe_configure(adapter);
1245
1246 return ixgbe_up_complete(adapter);
1247}
1248
1249void ixgbe_reset(struct ixgbe_adapter *adapter)
1250{
1251 if (ixgbe_init_hw(&adapter->hw))
1252 DPRINTK(PROBE, ERR, "Hardware Error\n");
1253
1254 /* reprogram the RAR[0] in case user changed it. */
1255 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
1256
1257}
1258
1259#ifdef CONFIG_PM
1260static int ixgbe_resume(struct pci_dev *pdev)
1261{
1262 struct net_device *netdev = pci_get_drvdata(pdev);
1263 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1264 u32 err, num_rx_queues = adapter->num_rx_queues;
1265
1266 pci_set_power_state(pdev, PCI_D0);
1267 pci_restore_state(pdev);
1268 err = pci_enable_device(pdev);
1269 if (err) {
1270 printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \
1271 "suspend\n");
1272 return err;
1273 }
1274 pci_set_master(pdev);
1275
1276 pci_enable_wake(pdev, PCI_D3hot, 0);
1277 pci_enable_wake(pdev, PCI_D3cold, 0);
1278
1279 if (netif_running(netdev)) {
1280 err = ixgbe_request_irq(adapter, &num_rx_queues);
1281 if (err)
1282 return err;
1283 }
1284
1285 ixgbe_reset(adapter);
1286
1287 if (netif_running(netdev))
1288 ixgbe_up(adapter);
1289
1290 netif_device_attach(netdev);
1291
1292 return 0;
1293}
1294#endif
1295
1296/**
1297 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
1298 * @adapter: board private structure
1299 * @rx_ring: ring to free buffers from
1300 **/
1301static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
1302 struct ixgbe_ring *rx_ring)
1303{
1304 struct pci_dev *pdev = adapter->pdev;
1305 unsigned long size;
1306 unsigned int i;
1307
1308 /* Free all the Rx ring sk_buffs */
1309
1310 for (i = 0; i < rx_ring->count; i++) {
1311 struct ixgbe_rx_buffer *rx_buffer_info;
1312
1313 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1314 if (rx_buffer_info->dma) {
1315 pci_unmap_single(pdev, rx_buffer_info->dma,
1316 adapter->rx_buf_len,
1317 PCI_DMA_FROMDEVICE);
1318 rx_buffer_info->dma = 0;
1319 }
1320 if (rx_buffer_info->skb) {
1321 dev_kfree_skb(rx_buffer_info->skb);
1322 rx_buffer_info->skb = NULL;
1323 }
1324 if (!rx_buffer_info->page)
1325 continue;
1326 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE,
1327 PCI_DMA_FROMDEVICE);
1328 rx_buffer_info->page_dma = 0;
1329
1330 put_page(rx_buffer_info->page);
1331 rx_buffer_info->page = NULL;
1332 }
1333
1334 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
1335 memset(rx_ring->rx_buffer_info, 0, size);
1336
1337 /* Zero out the descriptor ring */
1338 memset(rx_ring->desc, 0, rx_ring->size);
1339
1340 rx_ring->next_to_clean = 0;
1341 rx_ring->next_to_use = 0;
1342
1343 writel(0, adapter->hw.hw_addr + rx_ring->head);
1344 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1345}
1346
1347/**
1348 * ixgbe_clean_tx_ring - Free Tx Buffers
1349 * @adapter: board private structure
1350 * @tx_ring: ring to be cleaned
1351 **/
1352static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1353 struct ixgbe_ring *tx_ring)
1354{
1355 struct ixgbe_tx_buffer *tx_buffer_info;
1356 unsigned long size;
1357 unsigned int i;
1358
1359 /* Free all the Tx ring sk_buffs */
1360
1361 for (i = 0; i < tx_ring->count; i++) {
1362 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1363 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1364 }
1365
1366 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
1367 memset(tx_ring->tx_buffer_info, 0, size);
1368
1369 /* Zero out the descriptor ring */
1370 memset(tx_ring->desc, 0, tx_ring->size);
1371
1372 tx_ring->next_to_use = 0;
1373 tx_ring->next_to_clean = 0;
1374
1375 writel(0, adapter->hw.hw_addr + tx_ring->head);
1376 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1377}
1378
1379/**
1380 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1381 * @adapter: board private structure
1382 **/
1383static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1384{
1385 int i;
1386
1387 for (i = 0; i < adapter->num_tx_queues; i++)
1388 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1389}
1390
1391/**
1392 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1393 * @adapter: board private structure
1394 **/
1395static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
1396{
1397 int i;
1398
1399 for (i = 0; i < adapter->num_rx_queues; i++)
1400 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1401}
1402
1403void ixgbe_down(struct ixgbe_adapter *adapter)
1404{
1405 struct net_device *netdev = adapter->netdev;
1406 u32 rxctrl;
1407
1408 /* signal that we are down to the interrupt handler */
1409 set_bit(__IXGBE_DOWN, &adapter->state);
1410
1411 /* disable receives */
1412 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1413 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL,
1414 rxctrl & ~IXGBE_RXCTRL_RXEN);
1415
1416 netif_tx_disable(netdev);
1417
1418 /* disable transmits in the hardware */
1419
1420 /* flush both disables */
1421 IXGBE_WRITE_FLUSH(&adapter->hw);
1422 msleep(10);
1423
David S. Miller49d85c52008-01-18 04:21:39 -08001424 napi_disable(&adapter->napi);
David S. Miller49d85c52008-01-18 04:21:39 -08001425
Auke Kok9a799d72007-09-15 14:07:45 -07001426 ixgbe_irq_disable(adapter);
1427
Auke Kok9a799d72007-09-15 14:07:45 -07001428 del_timer_sync(&adapter->watchdog_timer);
1429
1430 netif_carrier_off(netdev);
1431 netif_stop_queue(netdev);
1432
1433 ixgbe_reset(adapter);
1434 ixgbe_clean_all_tx_rings(adapter);
1435 ixgbe_clean_all_rx_rings(adapter);
1436
1437}
1438
1439static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
1440{
1441 struct net_device *netdev = pci_get_drvdata(pdev);
1442 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1443#ifdef CONFIG_PM
1444 int retval = 0;
1445#endif
1446
1447 netif_device_detach(netdev);
1448
1449 if (netif_running(netdev)) {
1450 ixgbe_down(adapter);
1451 ixgbe_free_irq(adapter);
1452 }
1453
1454#ifdef CONFIG_PM
1455 retval = pci_save_state(pdev);
1456 if (retval)
1457 return retval;
1458#endif
1459
1460 pci_enable_wake(pdev, PCI_D3hot, 0);
1461 pci_enable_wake(pdev, PCI_D3cold, 0);
1462
1463 pci_disable_device(pdev);
1464
1465 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1466
1467 return 0;
1468}
1469
1470static void ixgbe_shutdown(struct pci_dev *pdev)
1471{
1472 ixgbe_suspend(pdev, PMSG_SUSPEND);
1473}
1474
1475/**
1476 * ixgbe_clean - NAPI Rx polling callback
1477 * @adapter: board private structure
1478 **/
1479static int ixgbe_clean(struct napi_struct *napi, int budget)
1480{
1481 struct ixgbe_adapter *adapter = container_of(napi,
1482 struct ixgbe_adapter, napi);
1483 struct net_device *netdev = adapter->netdev;
David S. Millerd2c7ddd2008-01-15 22:43:24 -08001484 int tx_cleaned = 0, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001485
Auke Kok9a799d72007-09-15 14:07:45 -07001486 /* In non-MSIX case, there is no multi-Tx/Rx queue */
David S. Millerd2c7ddd2008-01-15 22:43:24 -08001487 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
Auke Kok9a799d72007-09-15 14:07:45 -07001488 ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done,
1489 budget);
1490
David S. Millerd2c7ddd2008-01-15 22:43:24 -08001491 if (tx_cleaned)
1492 work_done = budget;
1493
David S. Miller53e52c72008-01-07 21:06:12 -08001494 /* If budget not fully consumed, exit the polling mode */
1495 if (work_done < budget) {
Auke Kok9a799d72007-09-15 14:07:45 -07001496 netif_rx_complete(netdev, napi);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001497 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1498 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001499 }
1500
1501 return work_done;
1502}
1503
1504/**
1505 * ixgbe_tx_timeout - Respond to a Tx Hang
1506 * @netdev: network interface device structure
1507 **/
1508static void ixgbe_tx_timeout(struct net_device *netdev)
1509{
1510 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1511
1512 /* Do the reset outside of interrupt context */
1513 schedule_work(&adapter->reset_task);
1514}
1515
1516static void ixgbe_reset_task(struct work_struct *work)
1517{
1518 struct ixgbe_adapter *adapter;
1519 adapter = container_of(work, struct ixgbe_adapter, reset_task);
1520
1521 adapter->tx_timeout_count++;
1522
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001523 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001524}
1525
1526/**
1527 * ixgbe_alloc_queues - Allocate memory for all rings
1528 * @adapter: board private structure to initialize
1529 *
1530 * We allocate one ring per queue at run-time since we don't know the
1531 * number of queues at compile-time. The polling_netdev array is
1532 * intended for Multiqueue, but should work fine with a single queue.
1533 **/
1534static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
1535{
1536 int i;
1537
1538 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1539 sizeof(struct ixgbe_ring), GFP_KERNEL);
1540 if (!adapter->tx_ring)
1541 return -ENOMEM;
1542
1543 for (i = 0; i < adapter->num_tx_queues; i++)
1544 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
1545
1546 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1547 sizeof(struct ixgbe_ring), GFP_KERNEL);
1548 if (!adapter->rx_ring) {
1549 kfree(adapter->tx_ring);
1550 return -ENOMEM;
1551 }
1552
1553 for (i = 0; i < adapter->num_rx_queues; i++) {
1554 adapter->rx_ring[i].adapter = adapter;
1555 adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
1556 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
1557 }
1558
1559 return 0;
1560}
1561
1562/**
1563 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
1564 * @adapter: board private structure to initialize
1565 *
1566 * ixgbe_sw_init initializes the Adapter private data structure.
1567 * Fields are initialized based on PCI device information and
1568 * OS network device settings (MTU size).
1569 **/
1570static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1571{
1572 struct ixgbe_hw *hw = &adapter->hw;
1573 struct pci_dev *pdev = adapter->pdev;
1574
1575 /* default flow control settings */
1576 hw->fc.original_type = ixgbe_fc_full;
1577 hw->fc.type = ixgbe_fc_full;
1578
1579 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
1580 if (hw->mac.ops.reset(hw)) {
1581 dev_err(&pdev->dev, "HW Init failed\n");
1582 return -EIO;
1583 }
Auke Kok3957d632007-10-31 15:22:10 -07001584 if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true,
1585 false)) {
Auke Kok9a799d72007-09-15 14:07:45 -07001586 dev_err(&pdev->dev, "Link Speed setup failed\n");
1587 return -EIO;
1588 }
1589
1590 /* initialize eeprom parameters */
1591 if (ixgbe_init_eeprom(hw)) {
1592 dev_err(&pdev->dev, "EEPROM initialization failed\n");
1593 return -EIO;
1594 }
1595
1596 /* Set the default values */
1597 adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
1598 adapter->num_tx_queues = 1;
1599 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
1600
1601 if (ixgbe_alloc_queues(adapter)) {
1602 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1603 return -ENOMEM;
1604 }
1605
Auke Kok9a799d72007-09-15 14:07:45 -07001606 set_bit(__IXGBE_DOWN, &adapter->state);
1607
1608 return 0;
1609}
1610
1611/**
1612 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
1613 * @adapter: board private structure
1614 * @txdr: tx descriptor ring (for a specific queue) to setup
1615 *
1616 * Return 0 on success, negative on failure
1617 **/
1618int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
1619 struct ixgbe_ring *txdr)
1620{
1621 struct pci_dev *pdev = adapter->pdev;
1622 int size;
1623
1624 size = sizeof(struct ixgbe_tx_buffer) * txdr->count;
1625 txdr->tx_buffer_info = vmalloc(size);
1626 if (!txdr->tx_buffer_info) {
1627 DPRINTK(PROBE, ERR,
1628 "Unable to allocate memory for the transmit descriptor ring\n");
1629 return -ENOMEM;
1630 }
1631 memset(txdr->tx_buffer_info, 0, size);
1632
1633 /* round up to nearest 4K */
1634 txdr->size = txdr->count * sizeof(union ixgbe_adv_tx_desc);
1635 txdr->size = ALIGN(txdr->size, 4096);
1636
1637 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1638 if (!txdr->desc) {
1639 vfree(txdr->tx_buffer_info);
1640 DPRINTK(PROBE, ERR,
1641 "Memory allocation failed for the tx desc ring\n");
1642 return -ENOMEM;
1643 }
1644
1645 txdr->adapter = adapter;
1646 txdr->next_to_use = 0;
1647 txdr->next_to_clean = 0;
1648 txdr->work_limit = txdr->count;
1649 spin_lock_init(&txdr->tx_lock);
1650
1651 return 0;
1652}
1653
1654/**
1655 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
1656 * @adapter: board private structure
1657 * @rxdr: rx descriptor ring (for a specific queue) to setup
1658 *
1659 * Returns 0 on success, negative on failure
1660 **/
1661int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1662 struct ixgbe_ring *rxdr)
1663{
1664 struct pci_dev *pdev = adapter->pdev;
1665 int size, desc_len;
1666
1667 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
1668 rxdr->rx_buffer_info = vmalloc(size);
1669 if (!rxdr->rx_buffer_info) {
1670 DPRINTK(PROBE, ERR,
1671 "vmalloc allocation failed for the rx desc ring\n");
1672 return -ENOMEM;
1673 }
1674 memset(rxdr->rx_buffer_info, 0, size);
1675
1676 desc_len = sizeof(union ixgbe_adv_rx_desc);
1677
1678 /* Round up to nearest 4K */
1679 rxdr->size = rxdr->count * desc_len;
1680 rxdr->size = ALIGN(rxdr->size, 4096);
1681
1682 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1683
1684 if (!rxdr->desc) {
1685 DPRINTK(PROBE, ERR,
1686 "Memory allocation failed for the rx desc ring\n");
1687 vfree(rxdr->rx_buffer_info);
1688 return -ENOMEM;
1689 }
1690
1691 rxdr->next_to_clean = 0;
1692 rxdr->next_to_use = 0;
1693 rxdr->adapter = adapter;
1694
1695 return 0;
1696}
1697
1698/**
1699 * ixgbe_free_tx_resources - Free Tx Resources per Queue
1700 * @adapter: board private structure
1701 * @tx_ring: Tx descriptor ring for a specific queue
1702 *
1703 * Free all transmit software resources
1704 **/
1705static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
1706 struct ixgbe_ring *tx_ring)
1707{
1708 struct pci_dev *pdev = adapter->pdev;
1709
1710 ixgbe_clean_tx_ring(adapter, tx_ring);
1711
1712 vfree(tx_ring->tx_buffer_info);
1713 tx_ring->tx_buffer_info = NULL;
1714
1715 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
1716
1717 tx_ring->desc = NULL;
1718}
1719
1720/**
1721 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
1722 * @adapter: board private structure
1723 *
1724 * Free all transmit software resources
1725 **/
1726static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
1727{
1728 int i;
1729
1730 for (i = 0; i < adapter->num_tx_queues; i++)
1731 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
1732}
1733
1734/**
1735 * ixgbe_free_rx_resources - Free Rx Resources
1736 * @adapter: board private structure
1737 * @rx_ring: ring to clean the resources from
1738 *
1739 * Free all receive software resources
1740 **/
1741static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
1742 struct ixgbe_ring *rx_ring)
1743{
1744 struct pci_dev *pdev = adapter->pdev;
1745
1746 ixgbe_clean_rx_ring(adapter, rx_ring);
1747
1748 vfree(rx_ring->rx_buffer_info);
1749 rx_ring->rx_buffer_info = NULL;
1750
1751 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1752
1753 rx_ring->desc = NULL;
1754}
1755
1756/**
1757 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
1758 * @adapter: board private structure
1759 *
1760 * Free all receive software resources
1761 **/
1762static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
1763{
1764 int i;
1765
1766 for (i = 0; i < adapter->num_rx_queues; i++)
1767 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
1768}
1769
1770/**
1771 * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources
1772 * (Descriptors) for all queues
1773 * @adapter: board private structure
1774 *
1775 * If this function returns with an error, then it's possible one or
1776 * more of the rings is populated (while the rest are not). It is the
1777 * callers duty to clean those orphaned rings.
1778 *
1779 * Return 0 on success, negative on failure
1780 **/
1781static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
1782{
1783 int i, err = 0;
1784
1785 for (i = 0; i < adapter->num_tx_queues; i++) {
1786 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1787 if (err) {
1788 DPRINTK(PROBE, ERR,
1789 "Allocation for Tx Queue %u failed\n", i);
1790 break;
1791 }
1792 }
1793
1794 return err;
1795}
1796
1797/**
1798 * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources
1799 * (Descriptors) for all queues
1800 * @adapter: board private structure
1801 *
1802 * If this function returns with an error, then it's possible one or
1803 * more of the rings is populated (while the rest are not). It is the
1804 * callers duty to clean those orphaned rings.
1805 *
1806 * Return 0 on success, negative on failure
1807 **/
1808
1809static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
1810{
1811 int i, err = 0;
1812
1813 for (i = 0; i < adapter->num_rx_queues; i++) {
1814 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1815 if (err) {
1816 DPRINTK(PROBE, ERR,
1817 "Allocation for Rx Queue %u failed\n", i);
1818 break;
1819 }
1820 }
1821
1822 return err;
1823}
1824
1825/**
1826 * ixgbe_change_mtu - Change the Maximum Transfer Unit
1827 * @netdev: network interface device structure
1828 * @new_mtu: new value for maximum frame size
1829 *
1830 * Returns 0 on success, negative on failure
1831 **/
1832static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
1833{
1834 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1835 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
1836
1837 if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) ||
1838 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
1839 return -EINVAL;
1840
1841 netdev->mtu = new_mtu;
1842
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001843 if (netif_running(netdev))
1844 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001845
1846 return 0;
1847}
1848
1849/**
1850 * ixgbe_open - Called when a network interface is made active
1851 * @netdev: network interface device structure
1852 *
1853 * Returns 0 on success, negative value on failure
1854 *
1855 * The open entry point is called when a network interface is made
1856 * active by the system (IFF_UP). At this point all resources needed
1857 * for transmit and receive operations are allocated, the interrupt
1858 * handler is registered with the OS, the watchdog timer is started,
1859 * and the stack is notified that the interface is ready.
1860 **/
1861static int ixgbe_open(struct net_device *netdev)
1862{
1863 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1864 int err;
1865 u32 ctrl_ext;
1866 u32 num_rx_queues = adapter->num_rx_queues;
1867
1868 /* Let firmware know the driver has taken over */
1869 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1870 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
1871 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
1872
1873try_intr_reinit:
1874 /* allocate transmit descriptors */
1875 err = ixgbe_setup_all_tx_resources(adapter);
1876 if (err)
1877 goto err_setup_tx;
1878
1879 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1880 num_rx_queues = 1;
1881 adapter->num_rx_queues = num_rx_queues;
1882 }
1883
1884 /* allocate receive descriptors */
1885 err = ixgbe_setup_all_rx_resources(adapter);
1886 if (err)
1887 goto err_setup_rx;
1888
1889 ixgbe_configure(adapter);
1890
1891 err = ixgbe_request_irq(adapter, &num_rx_queues);
1892 if (err)
1893 goto err_req_irq;
1894
1895 /* ixgbe_request might have reduced num_rx_queues */
1896 if (num_rx_queues < adapter->num_rx_queues) {
1897 /* We didn't get MSI-X, so we need to release everything,
1898 * set our Rx queue count to num_rx_queues, and redo the
1899 * whole init process.
1900 */
1901 ixgbe_free_irq(adapter);
1902 if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1903 pci_disable_msi(adapter->pdev);
1904 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1905 }
1906 ixgbe_free_all_rx_resources(adapter);
1907 ixgbe_free_all_tx_resources(adapter);
1908 adapter->num_rx_queues = num_rx_queues;
1909
1910 /* Reset the hardware, and start over. */
1911 ixgbe_reset(adapter);
1912
1913 goto try_intr_reinit;
1914 }
1915
1916 err = ixgbe_up_complete(adapter);
1917 if (err)
1918 goto err_up;
1919
1920 return 0;
1921
1922err_up:
1923 ixgbe_free_irq(adapter);
1924err_req_irq:
1925 ixgbe_free_all_rx_resources(adapter);
1926err_setup_rx:
1927 ixgbe_free_all_tx_resources(adapter);
1928err_setup_tx:
1929 ixgbe_reset(adapter);
1930
1931 return err;
1932}
1933
1934/**
1935 * ixgbe_close - Disables a network interface
1936 * @netdev: network interface device structure
1937 *
1938 * Returns 0, this is not allowed to fail
1939 *
1940 * The close entry point is called when an interface is de-activated
1941 * by the OS. The hardware is still under the drivers control, but
1942 * needs to be disabled. A global MAC reset is issued to stop the
1943 * hardware, and all transmit and receive resources are freed.
1944 **/
1945static int ixgbe_close(struct net_device *netdev)
1946{
1947 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1948 u32 ctrl_ext;
1949
1950 ixgbe_down(adapter);
1951 ixgbe_free_irq(adapter);
1952
1953 ixgbe_free_all_tx_resources(adapter);
1954 ixgbe_free_all_rx_resources(adapter);
1955
1956 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
1957 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
1958 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
1959
1960 return 0;
1961}
1962
1963/**
1964 * ixgbe_update_stats - Update the board statistics counters.
1965 * @adapter: board private structure
1966 **/
1967void ixgbe_update_stats(struct ixgbe_adapter *adapter)
1968{
1969 struct ixgbe_hw *hw = &adapter->hw;
1970 u64 good_rx, missed_rx, bprc;
1971
1972 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
1973 good_rx = IXGBE_READ_REG(hw, IXGBE_GPRC);
1974 missed_rx = IXGBE_READ_REG(hw, IXGBE_MPC(0));
1975 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(1));
1976 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(2));
1977 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(3));
1978 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(4));
1979 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(5));
1980 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(6));
1981 missed_rx += IXGBE_READ_REG(hw, IXGBE_MPC(7));
1982 adapter->stats.gprc += (good_rx - missed_rx);
1983
1984 adapter->stats.mpc[0] += missed_rx;
1985 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
1986 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
1987 adapter->stats.bprc += bprc;
1988 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
1989 adapter->stats.mprc -= bprc;
1990 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
1991 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
1992 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
1993 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
1994 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
1995 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
1996 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
1997
1998 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
1999 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
2000 adapter->stats.lxontxc += IXGBE_READ_REG(hw, IXGBE_LXONTXC);
2001 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
2002 adapter->stats.lxofftxc += IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
2003 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2004 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
2005 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
2006 adapter->stats.rnbc[0] += IXGBE_READ_REG(hw, IXGBE_RNBC(0));
2007 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
2008 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
2009 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
2010 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
2011 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
2012 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
2013 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
2014 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
2015 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
2016 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
2017 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
2018 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
2019 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
2020
2021 /* Fill out the OS statistics structure */
2022 adapter->net_stats.rx_packets = adapter->stats.gprc;
2023 adapter->net_stats.tx_packets = adapter->stats.gptc;
2024 adapter->net_stats.rx_bytes = adapter->stats.gorc;
2025 adapter->net_stats.tx_bytes = adapter->stats.gotc;
2026 adapter->net_stats.multicast = adapter->stats.mprc;
2027
2028 /* Rx Errors */
2029 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
2030 adapter->stats.rlec;
2031 adapter->net_stats.rx_dropped = 0;
2032 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2033 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2034 adapter->net_stats.rx_missed_errors = adapter->stats.mpc[0];
2035
2036}
2037
2038/**
2039 * ixgbe_watchdog - Timer Call-back
2040 * @data: pointer to adapter cast into an unsigned long
2041 **/
2042static void ixgbe_watchdog(unsigned long data)
2043{
2044 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2045 struct net_device *netdev = adapter->netdev;
2046 bool link_up;
2047 u32 link_speed = 0;
2048
Auke Kok3957d632007-10-31 15:22:10 -07002049 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
Auke Kok9a799d72007-09-15 14:07:45 -07002050
2051 if (link_up) {
2052 if (!netif_carrier_ok(netdev)) {
2053 u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2054 u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS);
2055#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
2056#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
2057 DPRINTK(LINK, INFO, "NIC Link is Up %s, "
2058 "Flow Control: %s\n",
2059 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
2060 "10 Gbps" :
2061 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
2062 "1 Gpbs" : "unknown speed")),
2063 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
2064 (FLOW_RX ? "RX" :
2065 (FLOW_TX ? "TX" : "None"))));
2066
2067 netif_carrier_on(netdev);
2068 netif_wake_queue(netdev);
2069 } else {
2070 /* Force detection of hung controller */
2071 adapter->detect_tx_hung = true;
2072 }
2073 } else {
2074 if (netif_carrier_ok(netdev)) {
2075 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2076 netif_carrier_off(netdev);
2077 netif_stop_queue(netdev);
2078 }
2079 }
2080
2081 ixgbe_update_stats(adapter);
2082
2083 /* Reset the timer */
2084 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2085 mod_timer(&adapter->watchdog_timer,
2086 round_jiffies(jiffies + 2 * HZ));
2087}
2088
2089#define IXGBE_MAX_TXD_PWR 14
2090#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
2091
2092/* Tx Descriptors needed, worst case */
2093#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
2094 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
2095#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
2096 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
2097
2098static int ixgbe_tso(struct ixgbe_adapter *adapter,
2099 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
2100 u32 tx_flags, u8 *hdr_len)
2101{
2102 struct ixgbe_adv_tx_context_desc *context_desc;
2103 unsigned int i;
2104 int err;
2105 struct ixgbe_tx_buffer *tx_buffer_info;
2106 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2107 u32 mss_l4len_idx = 0, l4len;
2108 *hdr_len = 0;
2109
2110 if (skb_is_gso(skb)) {
2111 if (skb_header_cloned(skb)) {
2112 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2113 if (err)
2114 return err;
2115 }
2116 l4len = tcp_hdrlen(skb);
2117 *hdr_len += l4len;
2118
Al Viro8327d002007-12-10 18:54:12 +00002119 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002120 struct iphdr *iph = ip_hdr(skb);
2121 iph->tot_len = 0;
2122 iph->check = 0;
2123 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2124 iph->daddr, 0,
2125 IPPROTO_TCP,
2126 0);
2127 adapter->hw_tso_ctxt++;
2128 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2129 ipv6_hdr(skb)->payload_len = 0;
2130 tcp_hdr(skb)->check =
2131 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2132 &ipv6_hdr(skb)->daddr,
2133 0, IPPROTO_TCP, 0);
2134 adapter->hw_tso6_ctxt++;
2135 }
2136
2137 i = tx_ring->next_to_use;
2138
2139 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2140 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2141
2142 /* VLAN MACLEN IPLEN */
2143 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2144 vlan_macip_lens |=
2145 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2146 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2147 IXGBE_ADVTXD_MACLEN_SHIFT);
2148 *hdr_len += skb_network_offset(skb);
2149 vlan_macip_lens |=
2150 (skb_transport_header(skb) - skb_network_header(skb));
2151 *hdr_len +=
2152 (skb_transport_header(skb) - skb_network_header(skb));
2153 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2154 context_desc->seqnum_seed = 0;
2155
2156 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2157 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2158 IXGBE_ADVTXD_DTYP_CTXT);
2159
Al Viro8327d002007-12-10 18:54:12 +00002160 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07002161 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2162 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2163 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2164
2165 /* MSS L4LEN IDX */
2166 mss_l4len_idx |=
2167 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2168 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2169 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2170
2171 tx_buffer_info->time_stamp = jiffies;
2172 tx_buffer_info->next_to_watch = i;
2173
2174 i++;
2175 if (i == tx_ring->count)
2176 i = 0;
2177 tx_ring->next_to_use = i;
2178
2179 return true;
2180 }
2181 return false;
2182}
2183
2184static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
2185 struct ixgbe_ring *tx_ring,
2186 struct sk_buff *skb, u32 tx_flags)
2187{
2188 struct ixgbe_adv_tx_context_desc *context_desc;
2189 unsigned int i;
2190 struct ixgbe_tx_buffer *tx_buffer_info;
2191 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2192
2193 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2194 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2195 i = tx_ring->next_to_use;
2196 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2197 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2198
2199 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2200 vlan_macip_lens |=
2201 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2202 vlan_macip_lens |= (skb_network_offset(skb) <<
2203 IXGBE_ADVTXD_MACLEN_SHIFT);
2204 if (skb->ip_summed == CHECKSUM_PARTIAL)
2205 vlan_macip_lens |= (skb_transport_header(skb) -
2206 skb_network_header(skb));
2207
2208 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2209 context_desc->seqnum_seed = 0;
2210
2211 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2212 IXGBE_ADVTXD_DTYP_CTXT);
2213
2214 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Al Viro8327d002007-12-10 18:54:12 +00002215 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07002216 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2217
2218 if (skb->sk->sk_protocol == IPPROTO_TCP)
2219 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2220 }
2221
2222 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2223 context_desc->mss_l4len_idx = 0;
2224
2225 tx_buffer_info->time_stamp = jiffies;
2226 tx_buffer_info->next_to_watch = i;
2227 adapter->hw_csum_tx_good++;
2228 i++;
2229 if (i == tx_ring->count)
2230 i = 0;
2231 tx_ring->next_to_use = i;
2232
2233 return true;
2234 }
2235 return false;
2236}
2237
2238static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
2239 struct ixgbe_ring *tx_ring,
2240 struct sk_buff *skb, unsigned int first)
2241{
2242 struct ixgbe_tx_buffer *tx_buffer_info;
2243 unsigned int len = skb->len;
2244 unsigned int offset = 0, size, count = 0, i;
2245 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2246 unsigned int f;
2247
2248 len -= skb->data_len;
2249
2250 i = tx_ring->next_to_use;
2251
2252 while (len) {
2253 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2254 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
2255
2256 tx_buffer_info->length = size;
2257 tx_buffer_info->dma = pci_map_single(adapter->pdev,
2258 skb->data + offset,
2259 size, PCI_DMA_TODEVICE);
2260 tx_buffer_info->time_stamp = jiffies;
2261 tx_buffer_info->next_to_watch = i;
2262
2263 len -= size;
2264 offset += size;
2265 count++;
2266 i++;
2267 if (i == tx_ring->count)
2268 i = 0;
2269 }
2270
2271 for (f = 0; f < nr_frags; f++) {
2272 struct skb_frag_struct *frag;
2273
2274 frag = &skb_shinfo(skb)->frags[f];
2275 len = frag->size;
2276 offset = frag->page_offset;
2277
2278 while (len) {
2279 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2280 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
2281
2282 tx_buffer_info->length = size;
2283 tx_buffer_info->dma = pci_map_page(adapter->pdev,
2284 frag->page,
2285 offset,
2286 size, PCI_DMA_TODEVICE);
2287 tx_buffer_info->time_stamp = jiffies;
2288 tx_buffer_info->next_to_watch = i;
2289
2290 len -= size;
2291 offset += size;
2292 count++;
2293 i++;
2294 if (i == tx_ring->count)
2295 i = 0;
2296 }
2297 }
2298 if (i == 0)
2299 i = tx_ring->count - 1;
2300 else
2301 i = i - 1;
2302 tx_ring->tx_buffer_info[i].skb = skb;
2303 tx_ring->tx_buffer_info[first].next_to_watch = i;
2304
2305 return count;
2306}
2307
2308static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
2309 struct ixgbe_ring *tx_ring,
2310 int tx_flags, int count, u32 paylen, u8 hdr_len)
2311{
2312 union ixgbe_adv_tx_desc *tx_desc = NULL;
2313 struct ixgbe_tx_buffer *tx_buffer_info;
2314 u32 olinfo_status = 0, cmd_type_len = 0;
2315 unsigned int i;
2316 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2317
2318 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2319
2320 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2321
2322 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2323 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2324
2325 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2326 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2327
2328 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
2329 IXGBE_ADVTXD_POPTS_SHIFT;
2330
2331 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2332 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
2333 IXGBE_ADVTXD_POPTS_SHIFT;
2334
2335 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2336 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
2337 IXGBE_ADVTXD_POPTS_SHIFT;
2338
2339 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2340
2341 i = tx_ring->next_to_use;
2342 while (count--) {
2343 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2344 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
2345 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2346 tx_desc->read.cmd_type_len =
2347 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2348 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2349
2350 i++;
2351 if (i == tx_ring->count)
2352 i = 0;
2353 }
2354
2355 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2356
2357 /*
2358 * Force memory writes to complete before letting h/w
2359 * know there are new descriptors to fetch. (Only
2360 * applicable for weak-ordered memory model archs,
2361 * such as IA-64).
2362 */
2363 wmb();
2364
2365 tx_ring->next_to_use = i;
2366 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2367}
2368
2369static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2370{
2371 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2372 struct ixgbe_ring *tx_ring;
2373 unsigned int len = skb->len;
2374 unsigned int first;
2375 unsigned int tx_flags = 0;
2376 unsigned long flags = 0;
2377 u8 hdr_len;
2378 int tso;
2379 unsigned int mss = 0;
2380 int count = 0;
2381 unsigned int f;
2382 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2383 len -= skb->data_len;
2384
2385 tx_ring = adapter->tx_ring;
2386
2387 if (skb->len <= 0) {
2388 dev_kfree_skb(skb);
2389 return NETDEV_TX_OK;
2390 }
2391 mss = skb_shinfo(skb)->gso_size;
2392
2393 if (mss)
2394 count++;
2395 else if (skb->ip_summed == CHECKSUM_PARTIAL)
2396 count++;
2397
2398 count += TXD_USE_COUNT(len);
2399 for (f = 0; f < nr_frags; f++)
2400 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2401
2402 spin_lock_irqsave(&tx_ring->tx_lock, flags);
2403 if (IXGBE_DESC_UNUSED(tx_ring) < (count + 2)) {
2404 adapter->tx_busy++;
2405 netif_stop_queue(netdev);
2406 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2407 return NETDEV_TX_BUSY;
2408 }
2409 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2410 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2411 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2412 tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT);
2413 }
2414
Al Viro8327d002007-12-10 18:54:12 +00002415 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07002416 tx_flags |= IXGBE_TX_FLAGS_IPV4;
2417 first = tx_ring->next_to_use;
2418 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
2419 if (tso < 0) {
2420 dev_kfree_skb_any(skb);
2421 return NETDEV_TX_OK;
2422 }
2423
2424 if (tso)
2425 tx_flags |= IXGBE_TX_FLAGS_TSO;
2426 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2427 (skb->ip_summed == CHECKSUM_PARTIAL))
2428 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2429
2430 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
2431 ixgbe_tx_map(adapter, tx_ring, skb, first),
2432 skb->len, hdr_len);
2433
2434 netdev->trans_start = jiffies;
2435
2436 spin_lock_irqsave(&tx_ring->tx_lock, flags);
2437 /* Make sure there is space in the ring for the next send. */
2438 if (IXGBE_DESC_UNUSED(tx_ring) < DESC_NEEDED)
2439 netif_stop_queue(netdev);
2440 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
2441
2442 return NETDEV_TX_OK;
2443}
2444
2445/**
2446 * ixgbe_get_stats - Get System Network Statistics
2447 * @netdev: network interface device structure
2448 *
2449 * Returns the address of the device statistics structure.
2450 * The statistics are actually updated from the timer callback.
2451 **/
2452static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
2453{
2454 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2455
2456 /* only return the current stats */
2457 return &adapter->net_stats;
2458}
2459
2460/**
2461 * ixgbe_set_mac - Change the Ethernet Address of the NIC
2462 * @netdev: network interface device structure
2463 * @p: pointer to an address structure
2464 *
2465 * Returns 0 on success, negative on failure
2466 **/
2467static int ixgbe_set_mac(struct net_device *netdev, void *p)
2468{
2469 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2470 struct sockaddr *addr = p;
2471
2472 if (!is_valid_ether_addr(addr->sa_data))
2473 return -EADDRNOTAVAIL;
2474
2475 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2476 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2477
2478 ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
2479
2480 return 0;
2481}
2482
2483#ifdef CONFIG_NET_POLL_CONTROLLER
2484/*
2485 * Polling 'interrupt' - used by things like netconsole to send skbs
2486 * without having to re-enable interrupts. It's not called while
2487 * the interrupt routine is executing.
2488 */
2489static void ixgbe_netpoll(struct net_device *netdev)
2490{
2491 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2492
2493 disable_irq(adapter->pdev->irq);
2494 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
2495 ixgbe_intr(adapter->pdev->irq, netdev);
2496 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
2497 enable_irq(adapter->pdev->irq);
2498}
2499#endif
2500
2501/**
2502 * ixgbe_probe - Device Initialization Routine
2503 * @pdev: PCI device information struct
2504 * @ent: entry in ixgbe_pci_tbl
2505 *
2506 * Returns 0 on success, negative on failure
2507 *
2508 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
2509 * The OS initialization, configuring of the adapter private structure,
2510 * and a hardware reset occur.
2511 **/
2512static int __devinit ixgbe_probe(struct pci_dev *pdev,
2513 const struct pci_device_id *ent)
2514{
2515 struct net_device *netdev;
2516 struct ixgbe_adapter *adapter = NULL;
2517 struct ixgbe_hw *hw;
2518 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
2519 unsigned long mmio_start, mmio_len;
2520 static int cards_found;
2521 int i, err, pci_using_dac;
2522 u16 link_status, link_speed, link_width;
2523 u32 part_num;
2524
2525 err = pci_enable_device(pdev);
2526 if (err)
2527 return err;
2528
2529 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
2530 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
2531 pci_using_dac = 1;
2532 } else {
2533 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2534 if (err) {
2535 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2536 if (err) {
2537 dev_err(&pdev->dev, "No usable DMA "
2538 "configuration, aborting\n");
2539 goto err_dma;
2540 }
2541 }
2542 pci_using_dac = 0;
2543 }
2544
2545 err = pci_request_regions(pdev, ixgbe_driver_name);
2546 if (err) {
2547 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
2548 goto err_pci_reg;
2549 }
2550
2551 pci_set_master(pdev);
2552
2553 netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
2554 if (!netdev) {
2555 err = -ENOMEM;
2556 goto err_alloc_etherdev;
2557 }
2558
Auke Kok9a799d72007-09-15 14:07:45 -07002559 SET_NETDEV_DEV(netdev, &pdev->dev);
2560
2561 pci_set_drvdata(pdev, netdev);
2562 adapter = netdev_priv(netdev);
2563
2564 adapter->netdev = netdev;
2565 adapter->pdev = pdev;
2566 hw = &adapter->hw;
2567 hw->back = adapter;
2568 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
2569
2570 mmio_start = pci_resource_start(pdev, 0);
2571 mmio_len = pci_resource_len(pdev, 0);
2572
2573 hw->hw_addr = ioremap(mmio_start, mmio_len);
2574 if (!hw->hw_addr) {
2575 err = -EIO;
2576 goto err_ioremap;
2577 }
2578
2579 for (i = 1; i <= 5; i++) {
2580 if (pci_resource_len(pdev, i) == 0)
2581 continue;
2582 }
2583
2584 netdev->open = &ixgbe_open;
2585 netdev->stop = &ixgbe_close;
2586 netdev->hard_start_xmit = &ixgbe_xmit_frame;
2587 netdev->get_stats = &ixgbe_get_stats;
2588 netdev->set_multicast_list = &ixgbe_set_multi;
2589 netdev->set_mac_address = &ixgbe_set_mac;
2590 netdev->change_mtu = &ixgbe_change_mtu;
2591 ixgbe_set_ethtool_ops(netdev);
2592 netdev->tx_timeout = &ixgbe_tx_timeout;
2593 netdev->watchdog_timeo = 5 * HZ;
2594 netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64);
2595 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
2596 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
2597 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
2598#ifdef CONFIG_NET_POLL_CONTROLLER
2599 netdev->poll_controller = ixgbe_netpoll;
2600#endif
2601 strcpy(netdev->name, pci_name(pdev));
2602
2603 netdev->mem_start = mmio_start;
2604 netdev->mem_end = mmio_start + mmio_len;
2605
2606 adapter->bd_number = cards_found;
2607
2608 /* PCI config space info */
2609 hw->vendor_id = pdev->vendor;
2610 hw->device_id = pdev->device;
2611 hw->revision_id = pdev->revision;
2612 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2613 hw->subsystem_device_id = pdev->subsystem_device;
2614
2615 /* Setup hw api */
2616 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Auke Kok9a799d72007-09-15 14:07:45 -07002617
2618 err = ii->get_invariants(hw);
2619 if (err)
2620 goto err_hw_init;
2621
2622 /* setup the private structure */
2623 err = ixgbe_sw_init(adapter);
2624 if (err)
2625 goto err_sw_init;
2626
2627 netdev->features = NETIF_F_SG |
2628 NETIF_F_HW_CSUM |
2629 NETIF_F_HW_VLAN_TX |
2630 NETIF_F_HW_VLAN_RX |
2631 NETIF_F_HW_VLAN_FILTER;
2632
2633 netdev->features |= NETIF_F_TSO;
2634
2635 netdev->features |= NETIF_F_TSO6;
2636 if (pci_using_dac)
2637 netdev->features |= NETIF_F_HIGHDMA;
2638
2639
2640 /* make sure the EEPROM is good */
2641 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
2642 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
2643 err = -EIO;
2644 goto err_eeprom;
2645 }
2646
2647 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
2648 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
2649
2650 if (ixgbe_validate_mac_addr(netdev->dev_addr)) {
2651 err = -EIO;
2652 goto err_eeprom;
2653 }
2654
2655 init_timer(&adapter->watchdog_timer);
2656 adapter->watchdog_timer.function = &ixgbe_watchdog;
2657 adapter->watchdog_timer.data = (unsigned long)adapter;
2658
2659 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
2660
2661 /* initialize default flow control settings */
2662 hw->fc.original_type = ixgbe_fc_full;
2663 hw->fc.type = ixgbe_fc_full;
2664 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2665 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2666 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2667
2668 /* Interrupt Throttle Rate */
2669 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS);
2670 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS);
2671
2672 /* print bus type/speed/width info */
2673 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
2674 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
2675 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
2676 dev_info(&pdev->dev, "(PCI Express:%s:%s) "
2677 "%02x:%02x:%02x:%02x:%02x:%02x\n",
2678 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
2679 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
2680 "Unknown"),
2681 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
2682 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
2683 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
2684 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
2685 "Unknown"),
2686 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
2687 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
2688 ixgbe_read_part_num(hw, &part_num);
2689 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
2690 hw->mac.type, hw->phy.type,
2691 (part_num >> 8), (part_num & 0xff));
2692
2693 /* reset the hardware with the new settings */
2694 ixgbe_start_hw(hw);
2695
2696 netif_carrier_off(netdev);
2697 netif_stop_queue(netdev);
2698
2699 strcpy(netdev->name, "eth%d");
2700 err = register_netdev(netdev);
2701 if (err)
2702 goto err_register;
2703
2704
2705 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
2706 cards_found++;
2707 return 0;
2708
2709err_register:
2710err_hw_init:
2711err_sw_init:
2712err_eeprom:
2713 iounmap(hw->hw_addr);
2714err_ioremap:
2715 free_netdev(netdev);
2716err_alloc_etherdev:
2717 pci_release_regions(pdev);
2718err_pci_reg:
2719err_dma:
2720 pci_disable_device(pdev);
2721 return err;
2722}
2723
2724/**
2725 * ixgbe_remove - Device Removal Routine
2726 * @pdev: PCI device information struct
2727 *
2728 * ixgbe_remove is called by the PCI subsystem to alert the driver
2729 * that it should release a PCI device. The could be caused by a
2730 * Hot-Plug event, or because the driver is going to be removed from
2731 * memory.
2732 **/
2733static void __devexit ixgbe_remove(struct pci_dev *pdev)
2734{
2735 struct net_device *netdev = pci_get_drvdata(pdev);
2736 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2737
2738 set_bit(__IXGBE_DOWN, &adapter->state);
2739 del_timer_sync(&adapter->watchdog_timer);
2740
2741 flush_scheduled_work();
2742
2743 unregister_netdev(netdev);
2744
2745 kfree(adapter->tx_ring);
2746 kfree(adapter->rx_ring);
2747
2748 iounmap(adapter->hw.hw_addr);
2749 pci_release_regions(pdev);
2750
2751 free_netdev(netdev);
2752
2753 pci_disable_device(pdev);
2754}
2755
2756/**
2757 * ixgbe_io_error_detected - called when PCI error is detected
2758 * @pdev: Pointer to PCI device
2759 * @state: The current pci connection state
2760 *
2761 * This function is called after a PCI bus error affecting
2762 * this device has been detected.
2763 */
2764static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
2765 pci_channel_state_t state)
2766{
2767 struct net_device *netdev = pci_get_drvdata(pdev);
2768 struct ixgbe_adapter *adapter = netdev->priv;
2769
2770 netif_device_detach(netdev);
2771
2772 if (netif_running(netdev))
2773 ixgbe_down(adapter);
2774 pci_disable_device(pdev);
2775
2776 /* Request a slot slot reset. */
2777 return PCI_ERS_RESULT_NEED_RESET;
2778}
2779
2780/**
2781 * ixgbe_io_slot_reset - called after the pci bus has been reset.
2782 * @pdev: Pointer to PCI device
2783 *
2784 * Restart the card from scratch, as if from a cold-boot.
2785 */
2786static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
2787{
2788 struct net_device *netdev = pci_get_drvdata(pdev);
2789 struct ixgbe_adapter *adapter = netdev->priv;
2790
2791 if (pci_enable_device(pdev)) {
2792 DPRINTK(PROBE, ERR,
2793 "Cannot re-enable PCI device after reset.\n");
2794 return PCI_ERS_RESULT_DISCONNECT;
2795 }
2796 pci_set_master(pdev);
2797
2798 pci_enable_wake(pdev, PCI_D3hot, 0);
2799 pci_enable_wake(pdev, PCI_D3cold, 0);
2800
2801 ixgbe_reset(adapter);
2802
2803 return PCI_ERS_RESULT_RECOVERED;
2804}
2805
2806/**
2807 * ixgbe_io_resume - called when traffic can start flowing again.
2808 * @pdev: Pointer to PCI device
2809 *
2810 * This callback is called when the error recovery driver tells us that
2811 * its OK to resume normal operation.
2812 */
2813static void ixgbe_io_resume(struct pci_dev *pdev)
2814{
2815 struct net_device *netdev = pci_get_drvdata(pdev);
2816 struct ixgbe_adapter *adapter = netdev->priv;
2817
2818 if (netif_running(netdev)) {
2819 if (ixgbe_up(adapter)) {
2820 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
2821 return;
2822 }
2823 }
2824
2825 netif_device_attach(netdev);
2826
2827}
2828
2829static struct pci_error_handlers ixgbe_err_handler = {
2830 .error_detected = ixgbe_io_error_detected,
2831 .slot_reset = ixgbe_io_slot_reset,
2832 .resume = ixgbe_io_resume,
2833};
2834
2835static struct pci_driver ixgbe_driver = {
2836 .name = ixgbe_driver_name,
2837 .id_table = ixgbe_pci_tbl,
2838 .probe = ixgbe_probe,
2839 .remove = __devexit_p(ixgbe_remove),
2840#ifdef CONFIG_PM
2841 .suspend = ixgbe_suspend,
2842 .resume = ixgbe_resume,
2843#endif
2844 .shutdown = ixgbe_shutdown,
2845 .err_handler = &ixgbe_err_handler
2846};
2847
2848/**
2849 * ixgbe_init_module - Driver Registration Routine
2850 *
2851 * ixgbe_init_module is the first routine called when the driver is
2852 * loaded. All it does is register with the PCI subsystem.
2853 **/
2854static int __init ixgbe_init_module(void)
2855{
2856 int ret;
2857 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
2858 ixgbe_driver_string, ixgbe_driver_version);
2859
2860 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
2861
2862 ret = pci_register_driver(&ixgbe_driver);
2863 return ret;
2864}
2865module_init(ixgbe_init_module);
2866
2867/**
2868 * ixgbe_exit_module - Driver Exit Cleanup Routine
2869 *
2870 * ixgbe_exit_module is called just before the driver is removed
2871 * from memory.
2872 **/
2873static void __exit ixgbe_exit_module(void)
2874{
2875 pci_unregister_driver(&ixgbe_driver);
2876}
2877module_exit(ixgbe_exit_module);
2878
2879/* ixgbe_main.c */