blob: 0a0f0417f85de83582210cb1cbdd34e74f7f7872 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28
29#include "ixgb.h"
30
31/* Change Log
32 * 1.0.88 01/05/05
33 * - include fix to the condition that determines when to quit NAPI - Robert Olsson
34 * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
35 * 1.0.84 10/26/04
36 * - reset buffer_info->dma in Tx resource cleanup logic
37 * 1.0.83 10/12/04
38 * - sparse cleanup - shemminger@osdl.org
39 * - fix tx resource cleanup logic
40 */
41
42char ixgb_driver_name[] = "ixgb";
43char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
44
45#ifndef CONFIG_IXGB_NAPI
46#define DRIVERNAPI
47#else
48#define DRIVERNAPI "-NAPI"
49#endif
50char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI;
51char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
52
53/* ixgb_pci_tbl - PCI Device ID Table
54 *
55 * Wildcard entries (PCI_ANY_ID) should come last
56 * Last entry must be all 0s
57 *
58 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
59 * Class, Class Mask, private data (not used) }
60 */
61static struct pci_device_id ixgb_pci_tbl[] = {
62 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
63 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
64 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
65 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
66 {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
67 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
68
69 /* required last entry */
70 {0,}
71};
72
73MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
74
75/* Local Function Prototypes */
76
77int ixgb_up(struct ixgb_adapter *adapter);
78void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
79void ixgb_reset(struct ixgb_adapter *adapter);
80int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
81int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
82void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
83void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
84void ixgb_update_stats(struct ixgb_adapter *adapter);
85
86static int ixgb_init_module(void);
87static void ixgb_exit_module(void);
88static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
89static void __devexit ixgb_remove(struct pci_dev *pdev);
90static int ixgb_sw_init(struct ixgb_adapter *adapter);
91static int ixgb_open(struct net_device *netdev);
92static int ixgb_close(struct net_device *netdev);
93static void ixgb_configure_tx(struct ixgb_adapter *adapter);
94static void ixgb_configure_rx(struct ixgb_adapter *adapter);
95static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
96static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
97static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
98static void ixgb_set_multi(struct net_device *netdev);
99static void ixgb_watchdog(unsigned long data);
100static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
101static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
102static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
103static int ixgb_set_mac(struct net_device *netdev, void *p);
104static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
105static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
106#ifdef CONFIG_IXGB_NAPI
107static int ixgb_clean(struct net_device *netdev, int *budget);
108static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
109 int *work_done, int work_to_do);
110#else
111static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
112#endif
113static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
114void ixgb_set_ethtool_ops(struct net_device *netdev);
115static void ixgb_tx_timeout(struct net_device *dev);
116static void ixgb_tx_timeout_task(struct net_device *dev);
117static void ixgb_vlan_rx_register(struct net_device *netdev,
118 struct vlan_group *grp);
119static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
120static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
121static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#ifdef CONFIG_NET_POLL_CONTROLLER
124/* for netdump / net console */
125static void ixgb_netpoll(struct net_device *dev);
126#endif
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128/* Exported from other modules */
129
130extern void ixgb_check_options(struct ixgb_adapter *adapter);
131
132static struct pci_driver ixgb_driver = {
Malli Chilakalac2eba932005-04-28 19:04:32 -0700133 .name = ixgb_driver_name,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 .id_table = ixgb_pci_tbl,
Malli Chilakalac2eba932005-04-28 19:04:32 -0700135 .probe = ixgb_probe,
136 .remove = __devexit_p(ixgb_remove),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137};
138
139MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
140MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
141MODULE_LICENSE("GPL");
142
143/* some defines for controlling descriptor fetches in h/w */
144#define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */
145#define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is
146 pushed this many descriptors from head */
147#define RXDCTL_WTHRESH_DEFAULT 16 /* chip writes back at this many or RXT0 */
148
149/**
150 * ixgb_init_module - Driver Registration Routine
151 *
152 * ixgb_init_module is the first routine called when the driver is
153 * loaded. All it does is register with the PCI subsystem.
154 **/
155
156static int __init
157ixgb_init_module(void)
158{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 printk(KERN_INFO "%s - version %s\n",
160 ixgb_driver_string, ixgb_driver_version);
161
162 printk(KERN_INFO "%s\n", ixgb_copyright);
163
Malli Chilakalac2eba932005-04-28 19:04:32 -0700164 return pci_module_init(&ixgb_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
167module_init(ixgb_init_module);
168
169/**
170 * ixgb_exit_module - Driver Exit Cleanup Routine
171 *
172 * ixgb_exit_module is called just before the driver is removed
173 * from memory.
174 **/
175
176static void __exit
177ixgb_exit_module(void)
178{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 pci_unregister_driver(&ixgb_driver);
180}
181
182module_exit(ixgb_exit_module);
183
184/**
185 * ixgb_irq_disable - Mask off interrupt generation on the NIC
186 * @adapter: board private structure
187 **/
188
189static inline void
190ixgb_irq_disable(struct ixgb_adapter *adapter)
191{
192 atomic_inc(&adapter->irq_sem);
193 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
194 IXGB_WRITE_FLUSH(&adapter->hw);
195 synchronize_irq(adapter->pdev->irq);
196}
197
198/**
199 * ixgb_irq_enable - Enable default interrupt generation settings
200 * @adapter: board private structure
201 **/
202
203static inline void
204ixgb_irq_enable(struct ixgb_adapter *adapter)
205{
206 if(atomic_dec_and_test(&adapter->irq_sem)) {
207 IXGB_WRITE_REG(&adapter->hw, IMS,
Malli Chilakala6dfbb6d2005-04-28 18:56:40 -0700208 IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
209 IXGB_INT_LSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 IXGB_WRITE_FLUSH(&adapter->hw);
211 }
212}
213
214int
215ixgb_up(struct ixgb_adapter *adapter)
216{
217 struct net_device *netdev = adapter->netdev;
218 int err;
219 int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
220 struct ixgb_hw *hw = &adapter->hw;
221
222 /* hardware has been reset, we need to reload some things */
223
224 ixgb_set_multi(netdev);
225
226 ixgb_restore_vlan(adapter);
227
228 ixgb_configure_tx(adapter);
229 ixgb_setup_rctl(adapter);
230 ixgb_configure_rx(adapter);
231 ixgb_alloc_rx_buffers(adapter);
232
233#ifdef CONFIG_PCI_MSI
234 {
235 boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) &
236 IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
237 adapter->have_msi = TRUE;
238
239 if (!pcix)
240 adapter->have_msi = FALSE;
241 else if((err = pci_enable_msi(adapter->pdev))) {
242 printk (KERN_ERR
243 "Unable to allocate MSI interrupt Error: %d\n", err);
244 adapter->have_msi = FALSE;
245 /* proceed to try to request regular interrupt */
246 }
247 }
248
249#endif
250 if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
251 SA_SHIRQ | SA_SAMPLE_RANDOM,
252 netdev->name, netdev)))
253 return err;
254
255 /* disable interrupts and get the hardware into a known state */
256 IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
257
258 if((hw->max_frame_size != max_frame) ||
259 (hw->max_frame_size !=
260 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
261
262 hw->max_frame_size = max_frame;
263
264 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
265
266 if(hw->max_frame_size >
267 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
268 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
269
270 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
271 ctrl0 |= IXGB_CTRL0_JFE;
272 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
273 }
274 }
275 }
276
277 mod_timer(&adapter->watchdog_timer, jiffies);
278 ixgb_irq_enable(adapter);
279
280#ifdef CONFIG_IXGB_NAPI
281 netif_poll_enable(netdev);
282#endif
283 return 0;
284}
285
286void
287ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
288{
289 struct net_device *netdev = adapter->netdev;
290
291 ixgb_irq_disable(adapter);
292 free_irq(adapter->pdev->irq, netdev);
293#ifdef CONFIG_PCI_MSI
294 if(adapter->have_msi == TRUE)
295 pci_disable_msi(adapter->pdev);
296
297#endif
298 if(kill_watchdog)
299 del_timer_sync(&adapter->watchdog_timer);
300#ifdef CONFIG_IXGB_NAPI
301 netif_poll_disable(netdev);
302#endif
303 adapter->link_speed = 0;
304 adapter->link_duplex = 0;
305 netif_carrier_off(netdev);
306 netif_stop_queue(netdev);
307
308 ixgb_reset(adapter);
309 ixgb_clean_tx_ring(adapter);
310 ixgb_clean_rx_ring(adapter);
311}
312
313void
314ixgb_reset(struct ixgb_adapter *adapter)
315{
316
317 ixgb_adapter_stop(&adapter->hw);
318 if(!ixgb_init_hw(&adapter->hw))
319 IXGB_DBG("ixgb_init_hw failed.\n");
320}
321
322/**
323 * ixgb_probe - Device Initialization Routine
324 * @pdev: PCI device information struct
325 * @ent: entry in ixgb_pci_tbl
326 *
327 * Returns 0 on success, negative on failure
328 *
329 * ixgb_probe initializes an adapter identified by a pci_dev structure.
330 * The OS initialization, configuring of the adapter private structure,
331 * and a hardware reset occur.
332 **/
333
334static int __devinit
335ixgb_probe(struct pci_dev *pdev,
336 const struct pci_device_id *ent)
337{
338 struct net_device *netdev = NULL;
339 struct ixgb_adapter *adapter;
340 static int cards_found = 0;
341 unsigned long mmio_start;
342 int mmio_len;
343 int pci_using_dac;
344 int i;
345 int err;
346
347 if((err = pci_enable_device(pdev)))
348 return err;
349
350 if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
351 pci_using_dac = 1;
352 } else {
353 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
354 IXGB_ERR("No usable DMA configuration, aborting\n");
355 return err;
356 }
357 pci_using_dac = 0;
358 }
359
360 if((err = pci_request_regions(pdev, ixgb_driver_name)))
361 return err;
362
363 pci_set_master(pdev);
364
365 netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
366 if(!netdev) {
367 err = -ENOMEM;
368 goto err_alloc_etherdev;
369 }
370
371 SET_MODULE_OWNER(netdev);
372 SET_NETDEV_DEV(netdev, &pdev->dev);
373
374 pci_set_drvdata(pdev, netdev);
375 adapter = netdev->priv;
376 adapter->netdev = netdev;
377 adapter->pdev = pdev;
378 adapter->hw.back = adapter;
379
380 mmio_start = pci_resource_start(pdev, BAR_0);
381 mmio_len = pci_resource_len(pdev, BAR_0);
382
383 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
384 if(!adapter->hw.hw_addr) {
385 err = -EIO;
386 goto err_ioremap;
387 }
388
389 for(i = BAR_1; i <= BAR_5; i++) {
390 if(pci_resource_len(pdev, i) == 0)
391 continue;
392 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
393 adapter->hw.io_base = pci_resource_start(pdev, i);
394 break;
395 }
396 }
397
398 netdev->open = &ixgb_open;
399 netdev->stop = &ixgb_close;
400 netdev->hard_start_xmit = &ixgb_xmit_frame;
401 netdev->get_stats = &ixgb_get_stats;
402 netdev->set_multicast_list = &ixgb_set_multi;
403 netdev->set_mac_address = &ixgb_set_mac;
404 netdev->change_mtu = &ixgb_change_mtu;
405 ixgb_set_ethtool_ops(netdev);
406 netdev->tx_timeout = &ixgb_tx_timeout;
407 netdev->watchdog_timeo = HZ;
408#ifdef CONFIG_IXGB_NAPI
409 netdev->poll = &ixgb_clean;
410 netdev->weight = 64;
411#endif
412 netdev->vlan_rx_register = ixgb_vlan_rx_register;
413 netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
414 netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
415#ifdef CONFIG_NET_POLL_CONTROLLER
416 netdev->poll_controller = ixgb_netpoll;
417#endif
418
419 netdev->mem_start = mmio_start;
420 netdev->mem_end = mmio_start + mmio_len;
421 netdev->base_addr = adapter->hw.io_base;
422
423 adapter->bd_number = cards_found;
424 adapter->link_speed = 0;
425 adapter->link_duplex = 0;
426
427 /* setup the private structure */
428
429 if((err = ixgb_sw_init(adapter)))
430 goto err_sw_init;
431
432 netdev->features = NETIF_F_SG |
433 NETIF_F_HW_CSUM |
434 NETIF_F_HW_VLAN_TX |
435 NETIF_F_HW_VLAN_RX |
436 NETIF_F_HW_VLAN_FILTER;
437#ifdef NETIF_F_TSO
438 netdev->features |= NETIF_F_TSO;
439#endif
440
441 if(pci_using_dac)
442 netdev->features |= NETIF_F_HIGHDMA;
443
444 /* make sure the EEPROM is good */
445
446 if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
447 printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
448 err = -EIO;
449 goto err_eeprom;
450 }
451
452 ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
453
454 if(!is_valid_ether_addr(netdev->dev_addr)) {
455 err = -EIO;
456 goto err_eeprom;
457 }
458
459 adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
460
461 init_timer(&adapter->watchdog_timer);
462 adapter->watchdog_timer.function = &ixgb_watchdog;
463 adapter->watchdog_timer.data = (unsigned long)adapter;
464
465 INIT_WORK(&adapter->tx_timeout_task,
466 (void (*)(void *))ixgb_tx_timeout_task, netdev);
467
468 if((err = register_netdev(netdev)))
469 goto err_register;
470
471 /* we're going to reset, so assume we have no link for now */
472
473 netif_carrier_off(netdev);
474 netif_stop_queue(netdev);
475
476 printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
477 netdev->name);
478 ixgb_check_options(adapter);
479 /* reset the hardware with the new settings */
480
481 ixgb_reset(adapter);
482
483 cards_found++;
484 return 0;
485
486err_register:
487err_sw_init:
488err_eeprom:
489 iounmap(adapter->hw.hw_addr);
490err_ioremap:
491 free_netdev(netdev);
492err_alloc_etherdev:
493 pci_release_regions(pdev);
494 return err;
495}
496
497/**
498 * ixgb_remove - Device Removal Routine
499 * @pdev: PCI device information struct
500 *
501 * ixgb_remove is called by the PCI subsystem to alert the driver
502 * that it should release a PCI device. The could be caused by a
503 * Hot-Plug event, or because the driver is going to be removed from
504 * memory.
505 **/
506
507static void __devexit
508ixgb_remove(struct pci_dev *pdev)
509{
510 struct net_device *netdev = pci_get_drvdata(pdev);
511 struct ixgb_adapter *adapter = netdev->priv;
512
513 unregister_netdev(netdev);
514
515 iounmap(adapter->hw.hw_addr);
516 pci_release_regions(pdev);
517
518 free_netdev(netdev);
519}
520
521/**
522 * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
523 * @adapter: board private structure to initialize
524 *
525 * ixgb_sw_init initializes the Adapter private data structure.
526 * Fields are initialized based on PCI device information and
527 * OS network device settings (MTU size).
528 **/
529
530static int __devinit
531ixgb_sw_init(struct ixgb_adapter *adapter)
532{
533 struct ixgb_hw *hw = &adapter->hw;
534 struct net_device *netdev = adapter->netdev;
535 struct pci_dev *pdev = adapter->pdev;
536
537 /* PCI config space info */
538
539 hw->vendor_id = pdev->vendor;
540 hw->device_id = pdev->device;
541 hw->subsystem_vendor_id = pdev->subsystem_vendor;
542 hw->subsystem_id = pdev->subsystem_device;
543
544 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
545
546 hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
547
548 if((hw->device_id == IXGB_DEVICE_ID_82597EX)
549 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
550 ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
551 hw->mac_type = ixgb_82597;
552 else {
553 /* should never have loaded on this device */
554 printk(KERN_ERR "ixgb: unsupported device id\n");
555 }
556
557 /* enable flow control to be programmed */
558 hw->fc.send_xon = 1;
559
560 atomic_set(&adapter->irq_sem, 1);
561 spin_lock_init(&adapter->tx_lock);
562
563 return 0;
564}
565
566/**
567 * ixgb_open - Called when a network interface is made active
568 * @netdev: network interface device structure
569 *
570 * Returns 0 on success, negative value on failure
571 *
572 * The open entry point is called when a network interface is made
573 * active by the system (IFF_UP). At this point all resources needed
574 * for transmit and receive operations are allocated, the interrupt
575 * handler is registered with the OS, the watchdog timer is started,
576 * and the stack is notified that the interface is ready.
577 **/
578
579static int
580ixgb_open(struct net_device *netdev)
581{
582 struct ixgb_adapter *adapter = netdev->priv;
583 int err;
584
585 /* allocate transmit descriptors */
586
587 if((err = ixgb_setup_tx_resources(adapter)))
588 goto err_setup_tx;
589
590 /* allocate receive descriptors */
591
592 if((err = ixgb_setup_rx_resources(adapter)))
593 goto err_setup_rx;
594
595 if((err = ixgb_up(adapter)))
596 goto err_up;
597
598 return 0;
599
600err_up:
601 ixgb_free_rx_resources(adapter);
602err_setup_rx:
603 ixgb_free_tx_resources(adapter);
604err_setup_tx:
605 ixgb_reset(adapter);
606
607 return err;
608}
609
610/**
611 * ixgb_close - Disables a network interface
612 * @netdev: network interface device structure
613 *
614 * Returns 0, this is not allowed to fail
615 *
616 * The close entry point is called when an interface is de-activated
617 * by the OS. The hardware is still under the drivers control, but
618 * needs to be disabled. A global MAC reset is issued to stop the
619 * hardware, and all transmit and receive resources are freed.
620 **/
621
622static int
623ixgb_close(struct net_device *netdev)
624{
625 struct ixgb_adapter *adapter = netdev->priv;
626
627 ixgb_down(adapter, TRUE);
628
629 ixgb_free_tx_resources(adapter);
630 ixgb_free_rx_resources(adapter);
631
632 return 0;
633}
634
635/**
636 * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
637 * @adapter: board private structure
638 *
639 * Return 0 on success, negative on failure
640 **/
641
642int
643ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
644{
645 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
646 struct pci_dev *pdev = adapter->pdev;
647 int size;
648
649 size = sizeof(struct ixgb_buffer) * txdr->count;
650 txdr->buffer_info = vmalloc(size);
651 if(!txdr->buffer_info) {
652 return -ENOMEM;
653 }
654 memset(txdr->buffer_info, 0, size);
655
656 /* round up to nearest 4K */
657
658 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
659 IXGB_ROUNDUP(txdr->size, 4096);
660
661 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
662 if(!txdr->desc) {
663 vfree(txdr->buffer_info);
664 return -ENOMEM;
665 }
666 memset(txdr->desc, 0, txdr->size);
667
668 txdr->next_to_use = 0;
669 txdr->next_to_clean = 0;
670
671 return 0;
672}
673
674/**
675 * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
676 * @adapter: board private structure
677 *
678 * Configure the Tx unit of the MAC after a reset.
679 **/
680
681static void
682ixgb_configure_tx(struct ixgb_adapter *adapter)
683{
684 uint64_t tdba = adapter->tx_ring.dma;
685 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
686 uint32_t tctl;
687 struct ixgb_hw *hw = &adapter->hw;
688
689 /* Setup the Base and Length of the Tx Descriptor Ring
690 * tx_ring.dma can be either a 32 or 64 bit value
691 */
692
693 IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
694 IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
695
696 IXGB_WRITE_REG(hw, TDLEN, tdlen);
697
698 /* Setup the HW Tx Head and Tail descriptor pointers */
699
700 IXGB_WRITE_REG(hw, TDH, 0);
701 IXGB_WRITE_REG(hw, TDT, 0);
702
703 /* don't set up txdctl, it induces performance problems if configured
704 * incorrectly */
705 /* Set the Tx Interrupt Delay register */
706
707 IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
708
709 /* Program the Transmit Control Register */
710
711 tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
712 IXGB_WRITE_REG(hw, TCTL, tctl);
713
714 /* Setup Transmit Descriptor Settings for this adapter */
715 adapter->tx_cmd_type =
716 IXGB_TX_DESC_TYPE
717 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
718}
719
720/**
721 * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
722 * @adapter: board private structure
723 *
724 * Returns 0 on success, negative on failure
725 **/
726
727int
728ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
729{
730 struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
731 struct pci_dev *pdev = adapter->pdev;
732 int size;
733
734 size = sizeof(struct ixgb_buffer) * rxdr->count;
735 rxdr->buffer_info = vmalloc(size);
736 if(!rxdr->buffer_info) {
737 return -ENOMEM;
738 }
739 memset(rxdr->buffer_info, 0, size);
740
741 /* Round up to nearest 4K */
742
743 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
744 IXGB_ROUNDUP(rxdr->size, 4096);
745
746 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
747
748 if(!rxdr->desc) {
749 vfree(rxdr->buffer_info);
750 return -ENOMEM;
751 }
752 memset(rxdr->desc, 0, rxdr->size);
753
754 rxdr->next_to_clean = 0;
755 rxdr->next_to_use = 0;
756
757 return 0;
758}
759
760/**
761 * ixgb_setup_rctl - configure the receive control register
762 * @adapter: Board private structure
763 **/
764
765static void
766ixgb_setup_rctl(struct ixgb_adapter *adapter)
767{
768 uint32_t rctl;
769
770 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
771
772 rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
773
774 rctl |=
775 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
776 IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
777 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
778
779 rctl |= IXGB_RCTL_SECRC;
780
781 switch (adapter->rx_buffer_len) {
782 case IXGB_RXBUFFER_2048:
783 default:
784 rctl |= IXGB_RCTL_BSIZE_2048;
785 break;
786 case IXGB_RXBUFFER_4096:
787 rctl |= IXGB_RCTL_BSIZE_4096;
788 break;
789 case IXGB_RXBUFFER_8192:
790 rctl |= IXGB_RCTL_BSIZE_8192;
791 break;
792 case IXGB_RXBUFFER_16384:
793 rctl |= IXGB_RCTL_BSIZE_16384;
794 break;
795 }
796
797 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
798}
799
800/**
801 * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
802 * @adapter: board private structure
803 *
804 * Configure the Rx unit of the MAC after a reset.
805 **/
806
807static void
808ixgb_configure_rx(struct ixgb_adapter *adapter)
809{
810 uint64_t rdba = adapter->rx_ring.dma;
811 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
812 struct ixgb_hw *hw = &adapter->hw;
813 uint32_t rctl;
814 uint32_t rxcsum;
815 uint32_t rxdctl;
816
817 /* make sure receives are disabled while setting up the descriptors */
818
819 rctl = IXGB_READ_REG(hw, RCTL);
820 IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
821
822 /* set the Receive Delay Timer Register */
823
824 IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
825
826 /* Setup the Base and Length of the Rx Descriptor Ring */
827
828 IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
829 IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
830
831 IXGB_WRITE_REG(hw, RDLEN, rdlen);
832
833 /* Setup the HW Rx Head and Tail Descriptor Pointers */
834 IXGB_WRITE_REG(hw, RDH, 0);
835 IXGB_WRITE_REG(hw, RDT, 0);
836
837 /* set up pre-fetching of receive buffers so we get some before we
838 * run out (default hardware behavior is to run out before fetching
839 * more). This sets up to fetch if HTHRESH rx descriptors are avail
840 * and the descriptors in hw cache are below PTHRESH. This avoids
841 * the hardware behavior of fetching <=512 descriptors in a single
842 * burst that pre-empts all other activity, usually causing fifo
843 * overflows. */
844 /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
845 rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
846 RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
847 RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
848 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
849
850 /* Enable Receive Checksum Offload for TCP and UDP */
851 if(adapter->rx_csum == TRUE) {
852 rxcsum = IXGB_READ_REG(hw, RXCSUM);
853 rxcsum |= IXGB_RXCSUM_TUOFL;
854 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
855 }
856
857 /* Enable Receives */
858
859 IXGB_WRITE_REG(hw, RCTL, rctl);
860}
861
862/**
863 * ixgb_free_tx_resources - Free Tx Resources
864 * @adapter: board private structure
865 *
866 * Free all transmit software resources
867 **/
868
869void
870ixgb_free_tx_resources(struct ixgb_adapter *adapter)
871{
872 struct pci_dev *pdev = adapter->pdev;
873
874 ixgb_clean_tx_ring(adapter);
875
876 vfree(adapter->tx_ring.buffer_info);
877 adapter->tx_ring.buffer_info = NULL;
878
879 pci_free_consistent(pdev, adapter->tx_ring.size,
880 adapter->tx_ring.desc, adapter->tx_ring.dma);
881
882 adapter->tx_ring.desc = NULL;
883}
884
885static inline void
886ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
887 struct ixgb_buffer *buffer_info)
888{
889 struct pci_dev *pdev = adapter->pdev;
890 if(buffer_info->dma) {
891 pci_unmap_page(pdev,
892 buffer_info->dma,
893 buffer_info->length,
894 PCI_DMA_TODEVICE);
895 buffer_info->dma = 0;
896 }
897 if(buffer_info->skb) {
898 dev_kfree_skb_any(buffer_info->skb);
899 buffer_info->skb = NULL;
900 }
901}
902
903/**
904 * ixgb_clean_tx_ring - Free Tx Buffers
905 * @adapter: board private structure
906 **/
907
908static void
909ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
910{
911 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
912 struct ixgb_buffer *buffer_info;
913 unsigned long size;
914 unsigned int i;
915
916 /* Free all the Tx ring sk_buffs */
917
918 for(i = 0; i < tx_ring->count; i++) {
919 buffer_info = &tx_ring->buffer_info[i];
920 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
921 }
922
923 size = sizeof(struct ixgb_buffer) * tx_ring->count;
924 memset(tx_ring->buffer_info, 0, size);
925
926 /* Zero out the descriptor ring */
927
928 memset(tx_ring->desc, 0, tx_ring->size);
929
930 tx_ring->next_to_use = 0;
931 tx_ring->next_to_clean = 0;
932
933 IXGB_WRITE_REG(&adapter->hw, TDH, 0);
934 IXGB_WRITE_REG(&adapter->hw, TDT, 0);
935}
936
937/**
938 * ixgb_free_rx_resources - Free Rx Resources
939 * @adapter: board private structure
940 *
941 * Free all receive software resources
942 **/
943
944void
945ixgb_free_rx_resources(struct ixgb_adapter *adapter)
946{
947 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
948 struct pci_dev *pdev = adapter->pdev;
949
950 ixgb_clean_rx_ring(adapter);
951
952 vfree(rx_ring->buffer_info);
953 rx_ring->buffer_info = NULL;
954
955 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
956
957 rx_ring->desc = NULL;
958}
959
960/**
961 * ixgb_clean_rx_ring - Free Rx Buffers
962 * @adapter: board private structure
963 **/
964
965static void
966ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
967{
968 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
969 struct ixgb_buffer *buffer_info;
970 struct pci_dev *pdev = adapter->pdev;
971 unsigned long size;
972 unsigned int i;
973
974 /* Free all the Rx ring sk_buffs */
975
976 for(i = 0; i < rx_ring->count; i++) {
977 buffer_info = &rx_ring->buffer_info[i];
978 if(buffer_info->skb) {
979
980 pci_unmap_single(pdev,
981 buffer_info->dma,
982 buffer_info->length,
983 PCI_DMA_FROMDEVICE);
984
985 dev_kfree_skb(buffer_info->skb);
986
987 buffer_info->skb = NULL;
988 }
989 }
990
991 size = sizeof(struct ixgb_buffer) * rx_ring->count;
992 memset(rx_ring->buffer_info, 0, size);
993
994 /* Zero out the descriptor ring */
995
996 memset(rx_ring->desc, 0, rx_ring->size);
997
998 rx_ring->next_to_clean = 0;
999 rx_ring->next_to_use = 0;
1000
1001 IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1002 IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1003}
1004
1005/**
1006 * ixgb_set_mac - Change the Ethernet Address of the NIC
1007 * @netdev: network interface device structure
1008 * @p: pointer to an address structure
1009 *
1010 * Returns 0 on success, negative on failure
1011 **/
1012
1013static int
1014ixgb_set_mac(struct net_device *netdev, void *p)
1015{
1016 struct ixgb_adapter *adapter = netdev->priv;
1017 struct sockaddr *addr = p;
1018
1019 if(!is_valid_ether_addr(addr->sa_data))
1020 return -EADDRNOTAVAIL;
1021
1022 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1023
1024 ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1025
1026 return 0;
1027}
1028
1029/**
1030 * ixgb_set_multi - Multicast and Promiscuous mode set
1031 * @netdev: network interface device structure
1032 *
1033 * The set_multi entry point is called whenever the multicast address
1034 * list or the network interface flags are updated. This routine is
1035 * responsible for configuring the hardware for proper multicast,
1036 * promiscuous mode, and all-multi behavior.
1037 **/
1038
1039static void
1040ixgb_set_multi(struct net_device *netdev)
1041{
1042 struct ixgb_adapter *adapter = netdev->priv;
1043 struct ixgb_hw *hw = &adapter->hw;
1044 struct dev_mc_list *mc_ptr;
1045 uint32_t rctl;
1046 int i;
1047
1048 /* Check for Promiscuous and All Multicast modes */
1049
1050 rctl = IXGB_READ_REG(hw, RCTL);
1051
1052 if(netdev->flags & IFF_PROMISC) {
1053 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1054 } else if(netdev->flags & IFF_ALLMULTI) {
1055 rctl |= IXGB_RCTL_MPE;
1056 rctl &= ~IXGB_RCTL_UPE;
1057 } else {
1058 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1059 }
1060
1061 if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1062 rctl |= IXGB_RCTL_MPE;
1063 IXGB_WRITE_REG(hw, RCTL, rctl);
1064 } else {
1065 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1066
1067 IXGB_WRITE_REG(hw, RCTL, rctl);
1068
1069 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1070 i++, mc_ptr = mc_ptr->next)
1071 memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1072 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1073
1074 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1075 }
1076}
1077
1078/**
1079 * ixgb_watchdog - Timer Call-back
1080 * @data: pointer to netdev cast into an unsigned long
1081 **/
1082
1083static void
1084ixgb_watchdog(unsigned long data)
1085{
1086 struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1087 struct net_device *netdev = adapter->netdev;
1088 struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1089
1090 ixgb_check_for_link(&adapter->hw);
1091
1092 if (ixgb_check_for_bad_link(&adapter->hw)) {
1093 /* force the reset path */
1094 netif_stop_queue(netdev);
1095 }
1096
1097 if(adapter->hw.link_up) {
1098 if(!netif_carrier_ok(netdev)) {
1099 printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
1100 netdev->name, 10000, "Full Duplex");
1101 adapter->link_speed = 10000;
1102 adapter->link_duplex = FULL_DUPLEX;
1103 netif_carrier_on(netdev);
1104 netif_wake_queue(netdev);
1105 }
1106 } else {
1107 if(netif_carrier_ok(netdev)) {
1108 adapter->link_speed = 0;
1109 adapter->link_duplex = 0;
1110 printk(KERN_INFO
1111 "ixgb: %s NIC Link is Down\n",
1112 netdev->name);
1113 netif_carrier_off(netdev);
1114 netif_stop_queue(netdev);
1115
1116 }
1117 }
1118
1119 ixgb_update_stats(adapter);
1120
1121 if(!netif_carrier_ok(netdev)) {
1122 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1123 /* We've lost link, so the controller stops DMA,
1124 * but we've got queued Tx work that's never going
1125 * to get done, so reset controller to flush Tx.
1126 * (Do the reset outside of interrupt context). */
1127 schedule_work(&adapter->tx_timeout_task);
1128 }
1129 }
1130
1131 /* Force detection of hung controller every watchdog period */
1132 adapter->detect_tx_hung = TRUE;
1133
1134 /* generate an interrupt to force clean up of any stragglers */
1135 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1136
1137 /* Reset the timer */
1138 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1139}
1140
1141#define IXGB_TX_FLAGS_CSUM 0x00000001
1142#define IXGB_TX_FLAGS_VLAN 0x00000002
1143#define IXGB_TX_FLAGS_TSO 0x00000004
1144
1145static inline int
1146ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1147{
1148#ifdef NETIF_F_TSO
1149 struct ixgb_context_desc *context_desc;
1150 unsigned int i;
1151 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1152 uint16_t ipcse, tucse, mss;
1153 int err;
1154
1155 if(likely(skb_shinfo(skb)->tso_size)) {
1156 if (skb_header_cloned(skb)) {
1157 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1158 if (err)
1159 return err;
1160 }
1161
1162 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1163 mss = skb_shinfo(skb)->tso_size;
1164 skb->nh.iph->tot_len = 0;
1165 skb->nh.iph->check = 0;
1166 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1167 skb->nh.iph->daddr,
1168 0, IPPROTO_TCP, 0);
1169 ipcss = skb->nh.raw - skb->data;
1170 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1171 ipcse = skb->h.raw - skb->data - 1;
1172 tucss = skb->h.raw - skb->data;
1173 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1174 tucse = 0;
1175
1176 i = adapter->tx_ring.next_to_use;
1177 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1178
1179 context_desc->ipcss = ipcss;
1180 context_desc->ipcso = ipcso;
1181 context_desc->ipcse = cpu_to_le16(ipcse);
1182 context_desc->tucss = tucss;
1183 context_desc->tucso = tucso;
1184 context_desc->tucse = cpu_to_le16(tucse);
1185 context_desc->mss = cpu_to_le16(mss);
1186 context_desc->hdr_len = hdr_len;
1187 context_desc->status = 0;
1188 context_desc->cmd_type_len = cpu_to_le32(
1189 IXGB_CONTEXT_DESC_TYPE
1190 | IXGB_CONTEXT_DESC_CMD_TSE
1191 | IXGB_CONTEXT_DESC_CMD_IP
1192 | IXGB_CONTEXT_DESC_CMD_TCP
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 | IXGB_CONTEXT_DESC_CMD_IDE
1194 | (skb->len - (hdr_len)));
1195
Malli Chilakala06c2f9e2005-04-28 18:46:51 -07001196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 if(++i == adapter->tx_ring.count) i = 0;
1198 adapter->tx_ring.next_to_use = i;
1199
1200 return 1;
1201 }
1202#endif
1203
1204 return 0;
1205}
1206
1207static inline boolean_t
1208ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1209{
1210 struct ixgb_context_desc *context_desc;
1211 unsigned int i;
1212 uint8_t css, cso;
1213
1214 if(likely(skb->ip_summed == CHECKSUM_HW)) {
1215 css = skb->h.raw - skb->data;
1216 cso = (skb->h.raw + skb->csum) - skb->data;
1217
1218 i = adapter->tx_ring.next_to_use;
1219 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1220
1221 context_desc->tucss = css;
1222 context_desc->tucso = cso;
1223 context_desc->tucse = 0;
1224 /* zero out any previously existing data in one instruction */
1225 *(uint32_t *)&(context_desc->ipcss) = 0;
1226 context_desc->status = 0;
1227 context_desc->hdr_len = 0;
1228 context_desc->mss = 0;
1229 context_desc->cmd_type_len =
1230 cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
Malli Chilakala06c2f9e2005-04-28 18:46:51 -07001231 | IXGB_TX_DESC_CMD_IDE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
1233 if(++i == adapter->tx_ring.count) i = 0;
1234 adapter->tx_ring.next_to_use = i;
1235
1236 return TRUE;
1237 }
1238
1239 return FALSE;
1240}
1241
1242#define IXGB_MAX_TXD_PWR 14
1243#define IXGB_MAX_DATA_PER_TXD (1<<IXGB_MAX_TXD_PWR)
1244
1245static inline int
1246ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1247 unsigned int first)
1248{
1249 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1250 struct ixgb_buffer *buffer_info;
1251 int len = skb->len;
1252 unsigned int offset = 0, size, count = 0, i;
1253
1254 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1255 unsigned int f;
1256 len -= skb->data_len;
1257
1258 i = tx_ring->next_to_use;
1259
1260 while(len) {
1261 buffer_info = &tx_ring->buffer_info[i];
1262 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1263 buffer_info->length = size;
1264 buffer_info->dma =
1265 pci_map_single(adapter->pdev,
1266 skb->data + offset,
1267 size,
1268 PCI_DMA_TODEVICE);
1269 buffer_info->time_stamp = jiffies;
1270
1271 len -= size;
1272 offset += size;
1273 count++;
1274 if(++i == tx_ring->count) i = 0;
1275 }
1276
1277 for(f = 0; f < nr_frags; f++) {
1278 struct skb_frag_struct *frag;
1279
1280 frag = &skb_shinfo(skb)->frags[f];
1281 len = frag->size;
1282 offset = 0;
1283
1284 while(len) {
1285 buffer_info = &tx_ring->buffer_info[i];
1286 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1287 buffer_info->length = size;
1288 buffer_info->dma =
1289 pci_map_page(adapter->pdev,
1290 frag->page,
1291 frag->page_offset + offset,
1292 size,
1293 PCI_DMA_TODEVICE);
1294 buffer_info->time_stamp = jiffies;
1295
1296 len -= size;
1297 offset += size;
1298 count++;
1299 if(++i == tx_ring->count) i = 0;
1300 }
1301 }
1302 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1303 tx_ring->buffer_info[i].skb = skb;
1304 tx_ring->buffer_info[first].next_to_watch = i;
1305
1306 return count;
1307}
1308
1309static inline void
1310ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1311{
1312 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1313 struct ixgb_tx_desc *tx_desc = NULL;
1314 struct ixgb_buffer *buffer_info;
1315 uint32_t cmd_type_len = adapter->tx_cmd_type;
1316 uint8_t status = 0;
1317 uint8_t popts = 0;
1318 unsigned int i;
1319
1320 if(tx_flags & IXGB_TX_FLAGS_TSO) {
1321 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1322 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1323 }
1324
1325 if(tx_flags & IXGB_TX_FLAGS_CSUM)
1326 popts |= IXGB_TX_DESC_POPTS_TXSM;
1327
1328 if(tx_flags & IXGB_TX_FLAGS_VLAN) {
1329 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1330 }
1331
1332 i = tx_ring->next_to_use;
1333
1334 while(count--) {
1335 buffer_info = &tx_ring->buffer_info[i];
1336 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1337 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1338 tx_desc->cmd_type_len =
1339 cpu_to_le32(cmd_type_len | buffer_info->length);
1340 tx_desc->status = status;
1341 tx_desc->popts = popts;
1342 tx_desc->vlan = cpu_to_le16(vlan_id);
1343
1344 if(++i == tx_ring->count) i = 0;
1345 }
1346
1347 tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
1348 | IXGB_TX_DESC_CMD_RS );
1349
1350 /* Force memory writes to complete before letting h/w
1351 * know there are new descriptors to fetch. (Only
1352 * applicable for weak-ordered memory model archs,
1353 * such as IA-64). */
1354 wmb();
1355
1356 tx_ring->next_to_use = i;
1357 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1358}
1359
1360/* Tx Descriptors needed, worst case */
1361#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1362 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1363#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1364 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
1365
1366static int
1367ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1368{
1369 struct ixgb_adapter *adapter = netdev->priv;
1370 unsigned int first;
1371 unsigned int tx_flags = 0;
1372 unsigned long flags;
1373 int vlan_id = 0;
1374 int tso;
1375
1376 if(skb->len <= 0) {
1377 dev_kfree_skb_any(skb);
1378 return 0;
1379 }
1380
1381 spin_lock_irqsave(&adapter->tx_lock, flags);
1382 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1383 netif_stop_queue(netdev);
1384 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1385 return 1;
1386 }
1387 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1388
1389 if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1390 tx_flags |= IXGB_TX_FLAGS_VLAN;
1391 vlan_id = vlan_tx_tag_get(skb);
1392 }
1393
1394 first = adapter->tx_ring.next_to_use;
1395
1396 tso = ixgb_tso(adapter, skb);
1397 if (tso < 0) {
1398 dev_kfree_skb_any(skb);
1399 return NETDEV_TX_OK;
1400 }
1401
1402 if (tso)
1403 tx_flags |= IXGB_TX_FLAGS_TSO;
1404 else if(ixgb_tx_csum(adapter, skb))
1405 tx_flags |= IXGB_TX_FLAGS_CSUM;
1406
1407 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1408 tx_flags);
1409
1410 netdev->trans_start = jiffies;
1411
1412 return 0;
1413}
1414
1415/**
1416 * ixgb_tx_timeout - Respond to a Tx Hang
1417 * @netdev: network interface device structure
1418 **/
1419
1420static void
1421ixgb_tx_timeout(struct net_device *netdev)
1422{
1423 struct ixgb_adapter *adapter = netdev->priv;
1424
1425 /* Do the reset outside of interrupt context */
1426 schedule_work(&adapter->tx_timeout_task);
1427}
1428
1429static void
1430ixgb_tx_timeout_task(struct net_device *netdev)
1431{
1432 struct ixgb_adapter *adapter = netdev->priv;
1433
1434 ixgb_down(adapter, TRUE);
1435 ixgb_up(adapter);
1436}
1437
1438/**
1439 * ixgb_get_stats - Get System Network Statistics
1440 * @netdev: network interface device structure
1441 *
1442 * Returns the address of the device statistics structure.
1443 * The statistics are actually updated from the timer callback.
1444 **/
1445
1446static struct net_device_stats *
1447ixgb_get_stats(struct net_device *netdev)
1448{
1449 struct ixgb_adapter *adapter = netdev->priv;
1450
1451 return &adapter->net_stats;
1452}
1453
1454/**
1455 * ixgb_change_mtu - Change the Maximum Transfer Unit
1456 * @netdev: network interface device structure
1457 * @new_mtu: new value for maximum frame size
1458 *
1459 * Returns 0 on success, negative on failure
1460 **/
1461
1462static int
1463ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1464{
1465 struct ixgb_adapter *adapter = netdev->priv;
1466 int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1467 int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1468
1469
1470 if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1471 || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1472 IXGB_ERR("Invalid MTU setting\n");
1473 return -EINVAL;
1474 }
1475
1476 if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1477 || (max_frame <= IXGB_RXBUFFER_2048)) {
1478 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1479
1480 } else if(max_frame <= IXGB_RXBUFFER_4096) {
1481 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1482
1483 } else if(max_frame <= IXGB_RXBUFFER_8192) {
1484 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1485
1486 } else {
1487 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1488 }
1489
1490 netdev->mtu = new_mtu;
1491
1492 if(old_max_frame != max_frame && netif_running(netdev)) {
1493
1494 ixgb_down(adapter, TRUE);
1495 ixgb_up(adapter);
1496 }
1497
1498 return 0;
1499}
1500
1501/**
1502 * ixgb_update_stats - Update the board statistics counters.
1503 * @adapter: board private structure
1504 **/
1505
1506void
1507ixgb_update_stats(struct ixgb_adapter *adapter)
1508{
Malli Chilakala56336842005-04-28 18:45:50 -07001509 struct net_device *netdev = adapter->netdev;
1510
1511 if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1512 (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1513 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1514 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1515 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1516 u64 bcast = ((u64)bcast_h << 32) | bcast_l;
1517
1518 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1519 /* fix up multicast stats by removing broadcasts */
1520 multi -= bcast;
1521
1522 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1523 adapter->stats.mprch += (multi >> 32);
1524 adapter->stats.bprcl += bcast_l;
1525 adapter->stats.bprch += bcast_h;
1526 } else {
1527 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1528 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1529 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1530 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1533 adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1534 adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1535 adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1537 adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1538 adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1539 adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1540 adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1541 adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1542 adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1543 adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1544 adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1545 adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1546 adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1547 adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1548 adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1549 adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1550 adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1551 adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1552 adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1553 adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1554 adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1555 adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1556 adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1557 adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1558 adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1559 adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1560 adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1561 adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1562 adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1563 adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1564 adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1565 adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1566 adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1567 adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1568 adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1569 adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1570 adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1571 adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1572 adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1573 adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1574 adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1575 adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1576 adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1577 adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1578 adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1579 adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1580 adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1581 adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1582 adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1583 adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1584 adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1585 adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1586 adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1587 adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1588
1589 /* Fill out the OS statistics structure */
1590
1591 adapter->net_stats.rx_packets = adapter->stats.gprcl;
1592 adapter->net_stats.tx_packets = adapter->stats.gptcl;
1593 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1594 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1595 adapter->net_stats.multicast = adapter->stats.mprcl;
1596 adapter->net_stats.collisions = 0;
1597
1598 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1599 * with a length in the type/len field */
1600 adapter->net_stats.rx_errors =
1601 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1602 adapter->stats.ruc +
1603 adapter->stats.roc /*+ adapter->stats.rlec */ +
1604 adapter->stats.icbc +
1605 adapter->stats.ecbc + adapter->stats.mpc;
1606
1607 adapter->net_stats.rx_dropped = adapter->stats.mpc;
1608
1609 /* see above
1610 * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1611 */
1612
1613 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1614 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1615 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1616 adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1617
1618 adapter->net_stats.tx_errors = 0;
1619 adapter->net_stats.rx_frame_errors = 0;
1620 adapter->net_stats.tx_aborted_errors = 0;
1621 adapter->net_stats.tx_carrier_errors = 0;
1622 adapter->net_stats.tx_fifo_errors = 0;
1623 adapter->net_stats.tx_heartbeat_errors = 0;
1624 adapter->net_stats.tx_window_errors = 0;
1625}
1626
1627#define IXGB_MAX_INTR 10
1628/**
1629 * ixgb_intr - Interrupt Handler
1630 * @irq: interrupt number
1631 * @data: pointer to a network interface device structure
1632 * @pt_regs: CPU registers structure
1633 **/
1634
1635static irqreturn_t
1636ixgb_intr(int irq, void *data, struct pt_regs *regs)
1637{
1638 struct net_device *netdev = data;
1639 struct ixgb_adapter *adapter = netdev->priv;
1640 struct ixgb_hw *hw = &adapter->hw;
1641 uint32_t icr = IXGB_READ_REG(hw, ICR);
1642#ifndef CONFIG_IXGB_NAPI
1643 unsigned int i;
1644#endif
1645
1646 if(unlikely(!icr))
1647 return IRQ_NONE; /* Not our interrupt */
1648
1649 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1650 mod_timer(&adapter->watchdog_timer, jiffies);
1651 }
1652
1653#ifdef CONFIG_IXGB_NAPI
1654 if(netif_rx_schedule_prep(netdev)) {
1655
1656 /* Disable interrupts and register for poll. The flush
1657 of the posted write is intentionally left out.
1658 */
1659
1660 atomic_inc(&adapter->irq_sem);
1661 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1662 __netif_rx_schedule(netdev);
1663 }
1664#else
1665 /* yes, that is actually a & and it is meant to make sure that
1666 * every pass through this for loop checks both receive and
1667 * transmit queues for completed descriptors, intended to
1668 * avoid starvation issues and assist tx/rx fairness. */
1669 for(i = 0; i < IXGB_MAX_INTR; i++)
1670 if(!ixgb_clean_rx_irq(adapter) &
1671 !ixgb_clean_tx_irq(adapter))
1672 break;
1673#endif
1674 return IRQ_HANDLED;
1675}
1676
1677#ifdef CONFIG_IXGB_NAPI
1678/**
1679 * ixgb_clean - NAPI Rx polling callback
1680 * @adapter: board private structure
1681 **/
1682
1683static int
1684ixgb_clean(struct net_device *netdev, int *budget)
1685{
1686 struct ixgb_adapter *adapter = netdev->priv;
1687 int work_to_do = min(*budget, netdev->quota);
1688 int tx_cleaned;
1689 int work_done = 0;
1690
1691 tx_cleaned = ixgb_clean_tx_irq(adapter);
1692 ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1693
1694 *budget -= work_done;
1695 netdev->quota -= work_done;
1696
1697 /* if no Tx and not enough Rx work done, exit the polling mode */
1698 if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1699 netif_rx_complete(netdev);
1700 ixgb_irq_enable(adapter);
1701 return 0;
1702 }
1703
1704 return 1;
1705}
1706#endif
1707
1708/**
1709 * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1710 * @adapter: board private structure
1711 **/
1712
1713static boolean_t
1714ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1715{
1716 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1717 struct net_device *netdev = adapter->netdev;
1718 struct ixgb_tx_desc *tx_desc, *eop_desc;
1719 struct ixgb_buffer *buffer_info;
1720 unsigned int i, eop;
1721 boolean_t cleaned = FALSE;
1722
1723 i = tx_ring->next_to_clean;
1724 eop = tx_ring->buffer_info[i].next_to_watch;
1725 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1726
1727 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1728
1729 for(cleaned = FALSE; !cleaned; ) {
1730 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1731 buffer_info = &tx_ring->buffer_info[i];
1732
1733 if (tx_desc->popts
1734 & (IXGB_TX_DESC_POPTS_TXSM |
1735 IXGB_TX_DESC_POPTS_IXSM))
1736 adapter->hw_csum_tx_good++;
1737
1738 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1739
1740 *(uint32_t *)&(tx_desc->status) = 0;
1741
1742 cleaned = (i == eop);
1743 if(++i == tx_ring->count) i = 0;
1744 }
1745
1746 eop = tx_ring->buffer_info[i].next_to_watch;
1747 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1748 }
1749
1750 tx_ring->next_to_clean = i;
1751
1752 spin_lock(&adapter->tx_lock);
1753 if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1754 (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1755
1756 netif_wake_queue(netdev);
1757 }
1758 spin_unlock(&adapter->tx_lock);
1759
1760 if(adapter->detect_tx_hung) {
1761 /* detect a transmit hang in hardware, this serializes the
1762 * check with the clearing of time_stamp and movement of i */
1763 adapter->detect_tx_hung = FALSE;
1764 if(tx_ring->buffer_info[i].dma &&
1765 time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
1766 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1767 IXGB_STATUS_TXOFF))
1768 netif_stop_queue(netdev);
1769 }
1770
1771 return cleaned;
1772}
1773
1774/**
1775 * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1776 * @adapter: board private structure
1777 * @rx_desc: receive descriptor
1778 * @sk_buff: socket buffer with received data
1779 **/
1780
1781static inline void
1782ixgb_rx_checksum(struct ixgb_adapter *adapter,
1783 struct ixgb_rx_desc *rx_desc,
1784 struct sk_buff *skb)
1785{
1786 /* Ignore Checksum bit is set OR
1787 * TCP Checksum has not been calculated
1788 */
1789 if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1790 (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1791 skb->ip_summed = CHECKSUM_NONE;
1792 return;
1793 }
1794
1795 /* At this point we know the hardware did the TCP checksum */
1796 /* now look at the TCP checksum error bit */
1797 if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1798 /* let the stack verify checksum errors */
1799 skb->ip_summed = CHECKSUM_NONE;
1800 adapter->hw_csum_rx_error++;
1801 } else {
1802 /* TCP checksum is good */
1803 skb->ip_summed = CHECKSUM_UNNECESSARY;
1804 adapter->hw_csum_rx_good++;
1805 }
1806}
1807
1808/**
1809 * ixgb_clean_rx_irq - Send received data up the network stack,
1810 * @adapter: board private structure
1811 **/
1812
1813static boolean_t
1814#ifdef CONFIG_IXGB_NAPI
1815ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1816#else
1817ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1818#endif
1819{
1820 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1821 struct net_device *netdev = adapter->netdev;
1822 struct pci_dev *pdev = adapter->pdev;
1823 struct ixgb_rx_desc *rx_desc, *next_rxd;
1824 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1825 struct sk_buff *skb, *next_skb;
1826 uint32_t length;
1827 unsigned int i, j;
1828 boolean_t cleaned = FALSE;
1829
1830 i = rx_ring->next_to_clean;
1831 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1832 buffer_info = &rx_ring->buffer_info[i];
1833
1834 while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1835
1836#ifdef CONFIG_IXGB_NAPI
1837 if(*work_done >= work_to_do)
1838 break;
1839
1840 (*work_done)++;
1841#endif
1842 skb = buffer_info->skb;
1843 prefetch(skb->data);
1844
1845 if(++i == rx_ring->count) i = 0;
1846 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1847 prefetch(next_rxd);
1848
1849 if((j = i + 1) == rx_ring->count) j = 0;
1850 next2_buffer = &rx_ring->buffer_info[j];
1851 prefetch(next2_buffer);
1852
1853 next_buffer = &rx_ring->buffer_info[i];
1854 next_skb = next_buffer->skb;
1855 prefetch(next_skb);
1856
1857
1858 cleaned = TRUE;
1859
1860 pci_unmap_single(pdev,
1861 buffer_info->dma,
1862 buffer_info->length,
1863 PCI_DMA_FROMDEVICE);
1864
1865 length = le16_to_cpu(rx_desc->length);
1866
1867 if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
1868
1869 /* All receives must fit into a single buffer */
1870
1871 IXGB_DBG("Receive packet consumed multiple buffers "
1872 "length<%x>\n", length);
1873
1874 dev_kfree_skb_irq(skb);
1875 rx_desc->status = 0;
1876 buffer_info->skb = NULL;
1877
1878 rx_desc = next_rxd;
1879 buffer_info = next_buffer;
1880 continue;
1881 }
1882
1883 if (unlikely(rx_desc->errors
1884 & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
1885 | IXGB_RX_DESC_ERRORS_P |
1886 IXGB_RX_DESC_ERRORS_RXE))) {
1887
1888 dev_kfree_skb_irq(skb);
1889 rx_desc->status = 0;
1890 buffer_info->skb = NULL;
1891
1892 rx_desc = next_rxd;
1893 buffer_info = next_buffer;
1894 continue;
1895 }
1896
1897 /* Good Receive */
1898 skb_put(skb, length);
1899
1900 /* Receive Checksum Offload */
1901 ixgb_rx_checksum(adapter, rx_desc, skb);
1902
1903 skb->protocol = eth_type_trans(skb, netdev);
1904#ifdef CONFIG_IXGB_NAPI
1905 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1906 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1907 le16_to_cpu(rx_desc->special) &
1908 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1909 } else {
1910 netif_receive_skb(skb);
1911 }
1912#else /* CONFIG_IXGB_NAPI */
1913 if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
1914 vlan_hwaccel_rx(skb, adapter->vlgrp,
1915 le16_to_cpu(rx_desc->special) &
1916 IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1917 } else {
1918 netif_rx(skb);
1919 }
1920#endif /* CONFIG_IXGB_NAPI */
1921 netdev->last_rx = jiffies;
1922
1923 rx_desc->status = 0;
1924 buffer_info->skb = NULL;
1925
1926 rx_desc = next_rxd;
1927 buffer_info = next_buffer;
1928 }
1929
1930 rx_ring->next_to_clean = i;
1931
1932 ixgb_alloc_rx_buffers(adapter);
1933
1934 return cleaned;
1935}
1936
1937/**
1938 * ixgb_alloc_rx_buffers - Replace used receive buffers
1939 * @adapter: address of board private structure
1940 **/
1941
1942static void
1943ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
1944{
1945 struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1946 struct net_device *netdev = adapter->netdev;
1947 struct pci_dev *pdev = adapter->pdev;
1948 struct ixgb_rx_desc *rx_desc;
1949 struct ixgb_buffer *buffer_info;
1950 struct sk_buff *skb;
1951 unsigned int i;
1952 int num_group_tail_writes;
1953 long cleancount;
1954
1955 i = rx_ring->next_to_use;
1956 buffer_info = &rx_ring->buffer_info[i];
1957 cleancount = IXGB_DESC_UNUSED(rx_ring);
1958
1959 num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
1960
Malli Chilakala41639fe2005-04-28 18:51:54 -07001961 /* leave three descriptors unused */
1962 while(--cleancount > 2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 rx_desc = IXGB_RX_DESC(*rx_ring, i);
1964
1965 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1966
1967 if(unlikely(!skb)) {
1968 /* Better luck next round */
1969 break;
1970 }
1971
1972 /* Make buffer alignment 2 beyond a 16 byte boundary
1973 * this will result in a 16 byte aligned IP header after
1974 * the 14 byte MAC header is removed
1975 */
1976 skb_reserve(skb, NET_IP_ALIGN);
1977
1978 skb->dev = netdev;
1979
1980 buffer_info->skb = skb;
1981 buffer_info->length = adapter->rx_buffer_len;
1982 buffer_info->dma =
1983 pci_map_single(pdev,
1984 skb->data,
1985 adapter->rx_buffer_len,
1986 PCI_DMA_FROMDEVICE);
1987
1988 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
Malli Chilakala41639fe2005-04-28 18:51:54 -07001989 /* guarantee DD bit not set now before h/w gets descriptor
1990 * this is the rest of the workaround for h/w double
1991 * writeback. */
1992 rx_desc->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
1994 if((i & ~(num_group_tail_writes- 1)) == i) {
1995 /* Force memory writes to complete before letting h/w
1996 * know there are new descriptors to fetch. (Only
1997 * applicable for weak-ordered memory model archs,
1998 * such as IA-64). */
1999 wmb();
2000
2001 IXGB_WRITE_REG(&adapter->hw, RDT, i);
2002 }
2003
2004 if(++i == rx_ring->count) i = 0;
2005 buffer_info = &rx_ring->buffer_info[i];
2006 }
2007
2008 rx_ring->next_to_use = i;
2009}
2010
2011/**
2012 * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2013 *
2014 * @param netdev network interface device structure
2015 * @param grp indicates to enable or disable tagging/stripping
2016 **/
2017static void
2018ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2019{
2020 struct ixgb_adapter *adapter = netdev->priv;
2021 uint32_t ctrl, rctl;
2022
2023 ixgb_irq_disable(adapter);
2024 adapter->vlgrp = grp;
2025
2026 if(grp) {
2027 /* enable VLAN tag insert/strip */
2028 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2029 ctrl |= IXGB_CTRL0_VME;
2030 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2031
2032 /* enable VLAN receive filtering */
2033
2034 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2035 rctl |= IXGB_RCTL_VFE;
2036 rctl &= ~IXGB_RCTL_CFIEN;
2037 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2038 } else {
2039 /* disable VLAN tag insert/strip */
2040
2041 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2042 ctrl &= ~IXGB_CTRL0_VME;
2043 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2044
2045 /* disable VLAN filtering */
2046
2047 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2048 rctl &= ~IXGB_RCTL_VFE;
2049 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2050 }
2051
2052 ixgb_irq_enable(adapter);
2053}
2054
2055static void
2056ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2057{
2058 struct ixgb_adapter *adapter = netdev->priv;
2059 uint32_t vfta, index;
2060
2061 /* add VID to filter table */
2062
2063 index = (vid >> 5) & 0x7F;
2064 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2065 vfta |= (1 << (vid & 0x1F));
2066 ixgb_write_vfta(&adapter->hw, index, vfta);
2067}
2068
2069static void
2070ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2071{
2072 struct ixgb_adapter *adapter = netdev->priv;
2073 uint32_t vfta, index;
2074
2075 ixgb_irq_disable(adapter);
2076
2077 if(adapter->vlgrp)
2078 adapter->vlgrp->vlan_devices[vid] = NULL;
2079
2080 ixgb_irq_enable(adapter);
2081
2082 /* remove VID from filter table*/
2083
2084 index = (vid >> 5) & 0x7F;
2085 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2086 vfta &= ~(1 << (vid & 0x1F));
2087 ixgb_write_vfta(&adapter->hw, index, vfta);
2088}
2089
2090static void
2091ixgb_restore_vlan(struct ixgb_adapter *adapter)
2092{
2093 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2094
2095 if(adapter->vlgrp) {
2096 uint16_t vid;
2097 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2098 if(!adapter->vlgrp->vlan_devices[vid])
2099 continue;
2100 ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2101 }
2102 }
2103}
2104
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105#ifdef CONFIG_NET_POLL_CONTROLLER
2106/*
2107 * Polling 'interrupt' - used by things like netconsole to send skbs
2108 * without having to re-enable interrupts. It's not called while
2109 * the interrupt routine is executing.
2110 */
2111
2112static void ixgb_netpoll(struct net_device *dev)
2113{
2114 struct ixgb_adapter *adapter = dev->priv;
2115 disable_irq(adapter->pdev->irq);
2116 ixgb_intr(adapter->pdev->irq, dev, NULL);
2117 enable_irq(adapter->pdev->irq);
2118}
2119#endif
2120
2121/* ixgb_main.c */