blob: 145bf47f0a8dada5d9d257d6e5921fa72588952d [file] [log] [blame]
Amit S. Kale3d396eb2006-10-21 15:33:03 -04001/*
2 * Copyright (C) 2003 - 2006 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen,
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
28 *
29 *
30 * Main source file for NetXen NIC Driver on Linux
31 *
32 */
33
34#include "netxen_nic_hw.h"
35
36#include "netxen_nic.h"
37#define DEFINE_GLOBAL_RECV_CRB
38#include "netxen_nic_phan_reg.h"
39#include "netxen_nic_ioctl.h"
40
Jeff Garzik1494a812006-11-07 05:12:16 -050041#include <linux/dma-mapping.h>
42#include <linux/vmalloc.h>
43
Amit S. Kale3d396eb2006-10-21 15:33:03 -040044MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
45MODULE_LICENSE("GPL");
46MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
47
48char netxen_nic_driver_name[] = "netxen";
49static char netxen_nic_driver_string[] = "NetXen Network Driver version "
50 NETXEN_NIC_LINUX_VERSIONID "-" NETXEN_NIC_BUILD_NO;
51
52#define NETXEN_NETDEV_WEIGHT 120
53#define NETXEN_ADAPTER_UP_MAGIC 777
54
55/* Local functions to NetXen NIC driver */
56static int __devinit netxen_nic_probe(struct pci_dev *pdev,
57 const struct pci_device_id *ent);
58static void __devexit netxen_nic_remove(struct pci_dev *pdev);
59static int netxen_nic_open(struct net_device *netdev);
60static int netxen_nic_close(struct net_device *netdev);
61static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *);
62static void netxen_tx_timeout(struct net_device *netdev);
63static void netxen_tx_timeout_task(struct net_device *netdev);
64static void netxen_watchdog(unsigned long);
65static int netxen_handle_int(struct netxen_adapter *, struct net_device *);
66static int netxen_nic_ioctl(struct net_device *netdev,
67 struct ifreq *ifr, int cmd);
68static int netxen_nic_poll(struct net_device *dev, int *budget);
69#ifdef CONFIG_NET_POLL_CONTROLLER
70static void netxen_nic_poll_controller(struct net_device *netdev);
71#endif
Jeff Garzik1494a812006-11-07 05:12:16 -050072static irqreturn_t netxen_intr(int irq, void *data);
Amit S. Kale3d396eb2006-10-21 15:33:03 -040073
74/* PCI Device ID Table */
75static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
76 {PCI_DEVICE(0x4040, 0x0001)},
77 {PCI_DEVICE(0x4040, 0x0002)},
78 {PCI_DEVICE(0x4040, 0x0003)},
79 {PCI_DEVICE(0x4040, 0x0004)},
80 {PCI_DEVICE(0x4040, 0x0005)},
81 {0,}
82};
83
84MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
85
86/*
87 * netxen_nic_probe()
88 *
89 * The Linux system will invoke this after identifying the vendor ID and
90 * device Id in the pci_tbl supported by this module.
91 *
92 * A quad port card has one operational PCI config space, (function 0),
93 * which is used to access all four ports.
94 *
95 * This routine will initialize the adapter, and setup the global parameters
96 * along with the port's specific structure.
97 */
98static int __devinit
99netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
100{
101 struct net_device *netdev = NULL;
102 struct netxen_adapter *adapter = NULL;
103 struct netxen_port *port = NULL;
104 u8 __iomem *mem_ptr = NULL;
105 unsigned long mem_base, mem_len;
106 int pci_using_dac, i, err;
107 int ring;
108 struct netxen_recv_context *recv_ctx = NULL;
109 struct netxen_rcv_desc_ctx *rcv_desc = NULL;
110 struct netxen_cmd_buffer *cmd_buf_arr = NULL;
111 u64 mac_addr[FLASH_NUM_PORTS + 1];
112 int valid_mac;
113
114 if ((err = pci_enable_device(pdev)))
115 return err;
116 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
117 err = -ENODEV;
118 goto err_out_disable_pdev;
119 }
120
121 if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
122 goto err_out_disable_pdev;
123
124 pci_set_master(pdev);
125 if ((pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) &&
126 (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) == 0))
127 pci_using_dac = 1;
128 else {
129 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
130 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
131 goto err_out_free_res;
132
133 pci_using_dac = 0;
134 }
135
136 /* remap phys address */
137 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
138 mem_len = pci_resource_len(pdev, 0);
139
140 /* 128 Meg of memory */
141 mem_ptr = ioremap(mem_base, NETXEN_PCI_MAPSIZE_BYTES);
142 if (mem_ptr == 0UL) {
143 printk(KERN_ERR "%s: Cannot ioremap adapter memory aborting."
144 ":%p\n", netxen_nic_driver_name, mem_ptr);
145 err = -EIO;
146 goto err_out_free_res;
147 }
148
149/*
150 * Allocate a adapter structure which will manage all the initialization
151 * as well as the common resources for all ports...
152 * all the ports will have pointer to this adapter as well as Adapter
153 * will have pointers of all the ports structures.
154 */
155
156 /* One adapter structure for all 4 ports.... */
157 adapter = kzalloc(sizeof(struct netxen_adapter), GFP_KERNEL);
158 if (adapter == NULL) {
159 printk(KERN_ERR "%s: Could not allocate adapter memory:%d\n",
160 netxen_nic_driver_name,
161 (int)sizeof(struct netxen_adapter));
162 err = -ENOMEM;
163 goto err_out_iounmap;
164 }
165
166 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
167 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
168 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
169
170 pci_set_drvdata(pdev, adapter);
171
172 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
173 if (cmd_buf_arr == NULL) {
174 err = -ENOMEM;
175 goto err_out_free_adapter;
176 }
177 memset(cmd_buf_arr, 0, TX_RINGSIZE);
178
179 for (i = 0; i < MAX_RCV_CTX; ++i) {
180 recv_ctx = &adapter->recv_ctx[i];
181 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
182 rcv_desc = &recv_ctx->rcv_desc[ring];
183 switch (RCV_DESC_TYPE(ring)) {
184 case RCV_DESC_NORMAL:
185 rcv_desc->max_rx_desc_count =
186 adapter->max_rx_desc_count;
187 rcv_desc->flags = RCV_DESC_NORMAL;
188 rcv_desc->dma_size = RX_DMA_MAP_LEN;
189 rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH;
190 break;
191
192 case RCV_DESC_JUMBO:
193 rcv_desc->max_rx_desc_count =
194 adapter->max_jumbo_rx_desc_count;
195 rcv_desc->flags = RCV_DESC_JUMBO;
196 rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN;
197 rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
198 break;
199
200 }
201 rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
202 vmalloc(RCV_BUFFSIZE);
203
204 if (rcv_desc->rx_buf_arr == NULL) {
205 err = -ENOMEM;
206 goto err_out_free_rx_buffer;
207 }
208 memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE);
209 }
210
211 }
212
213 adapter->ops = kzalloc(sizeof(struct netxen_drvops), GFP_KERNEL);
214 if (adapter->ops == NULL) {
215 printk(KERN_ERR
216 "%s: Could not allocate memory for adapter->ops:%d\n",
217 netxen_nic_driver_name,
218 (int)sizeof(struct netxen_adapter));
219 err = -ENOMEM;
220 goto err_out_free_rx_buffer;
221 }
222
223 adapter->cmd_buf_arr = cmd_buf_arr;
224 adapter->ahw.pci_base = mem_ptr;
225 spin_lock_init(&adapter->tx_lock);
226 spin_lock_init(&adapter->lock);
227 /* initialize the buffers in adapter */
228 netxen_initialize_adapter_sw(adapter);
229 /*
230 * Set the CRB window to invalid. If any register in window 0 is
231 * accessed it should set the window to 0 and then reset it to 1.
232 */
233 adapter->curr_window = 255;
234 /*
235 * Adapter in our case is quad port so initialize it before
236 * initializing the ports
237 */
238 netxen_initialize_adapter_hw(adapter); /* initialize the adapter */
239
240 netxen_initialize_adapter_ops(adapter);
241
242 init_timer(&adapter->watchdog_timer);
243 adapter->ahw.xg_linkup = 0;
244 adapter->watchdog_timer.function = &netxen_watchdog;
245 adapter->watchdog_timer.data = (unsigned long)adapter;
246 INIT_WORK(&adapter->watchdog_task,
247 (void (*)(void *))netxen_watchdog_task, adapter);
248 adapter->ahw.pdev = pdev;
249 adapter->proc_cmd_buf_counter = 0;
250 pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id);
251
252 if (pci_enable_msi(pdev)) {
253 adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
254 printk(KERN_WARNING "%s: unable to allocate MSI interrupt"
255 " error\n", netxen_nic_driver_name);
256 } else
257 adapter->flags |= NETXEN_NIC_MSI_ENABLED;
258
259 if (netxen_is_flash_supported(adapter) == 0 &&
260 netxen_get_flash_mac_addr(adapter, mac_addr) == 0)
261 valid_mac = 1;
262 else
263 valid_mac = 0;
264
265 /* initialize the all the ports */
266
267 for (i = 0; i < adapter->ahw.max_ports; i++) {
268 netdev = alloc_etherdev(sizeof(struct netxen_port));
269 if (!netdev) {
270 printk(KERN_ERR "%s: could not allocate netdev for port"
271 " %d\n", netxen_nic_driver_name, i + 1);
272 goto err_out_free_dev;
273 }
274
275 SET_MODULE_OWNER(netdev);
276
277 port = netdev_priv(netdev);
278 port->netdev = netdev;
279 port->pdev = pdev;
280 port->adapter = adapter;
281 port->portnum = i; /* Gigabit port number from 0-3 */
282
283 netdev->open = netxen_nic_open;
284 netdev->stop = netxen_nic_close;
285 netdev->hard_start_xmit = netxen_nic_xmit_frame;
286 netdev->get_stats = netxen_nic_get_stats;
287 netdev->set_multicast_list = netxen_nic_set_multi;
288 netdev->set_mac_address = netxen_nic_set_mac;
289 netdev->change_mtu = netxen_nic_change_mtu;
290 netdev->do_ioctl = netxen_nic_ioctl;
291 netdev->tx_timeout = netxen_tx_timeout;
292 netdev->watchdog_timeo = HZ;
293
294 SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
295 netdev->poll = netxen_nic_poll;
296 netdev->weight = NETXEN_NETDEV_WEIGHT;
297#ifdef CONFIG_NET_POLL_CONTROLLER
298 netdev->poll_controller = netxen_nic_poll_controller;
299#endif
300 /* ScatterGather support */
301 netdev->features = NETIF_F_SG;
302 netdev->features |= NETIF_F_IP_CSUM;
303 netdev->features |= NETIF_F_TSO;
304
305 if (pci_using_dac)
306 netdev->features |= NETIF_F_HIGHDMA;
307
308 if (valid_mac) {
309 unsigned char *p = (unsigned char *)&mac_addr[i];
310 netdev->dev_addr[0] = *(p + 5);
311 netdev->dev_addr[1] = *(p + 4);
312 netdev->dev_addr[2] = *(p + 3);
313 netdev->dev_addr[3] = *(p + 2);
314 netdev->dev_addr[4] = *(p + 1);
315 netdev->dev_addr[5] = *(p + 0);
316
317 memcpy(netdev->perm_addr, netdev->dev_addr,
318 netdev->addr_len);
319 if (!is_valid_ether_addr(netdev->perm_addr)) {
320 printk(KERN_ERR "%s: Bad MAC address "
321 "%02x:%02x:%02x:%02x:%02x:%02x.\n",
322 netxen_nic_driver_name,
323 netdev->dev_addr[0],
324 netdev->dev_addr[1],
325 netdev->dev_addr[2],
326 netdev->dev_addr[3],
327 netdev->dev_addr[4],
328 netdev->dev_addr[5]);
329 } else {
330 if (adapter->ops->macaddr_set)
331 adapter->ops->macaddr_set(port,
332 netdev->
333 dev_addr);
334 }
335 }
336 INIT_WORK(&adapter->tx_timeout_task,
337 (void (*)(void *))netxen_tx_timeout_task, netdev);
338 netif_carrier_off(netdev);
339 netif_stop_queue(netdev);
340
341 if ((err = register_netdev(netdev))) {
342 printk(KERN_ERR "%s: register_netdev failed port #%d"
343 " aborting\n", netxen_nic_driver_name, i + 1);
344 err = -EIO;
345 free_netdev(netdev);
346 goto err_out_free_dev;
347 }
348 adapter->port_count++;
349 adapter->active_ports = 0;
350 adapter->port[i] = port;
351 }
352
353 /*
354 * Initialize all the CRB registers here.
355 */
356 /* Window = 1 */
357 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
358 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
359 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
360
361 netxen_phantom_init(adapter);
362 /*
363 * delay a while to ensure that the Pegs are up & running.
364 * Otherwise, we might see some flaky behaviour.
365 */
366 udelay(100);
367
368 switch (adapter->ahw.board_type) {
369 case NETXEN_NIC_GBE:
370 printk("%s: QUAD GbE board initialized\n",
371 netxen_nic_driver_name);
372 break;
373
374 case NETXEN_NIC_XGBE:
375 printk("%s: XGbE board initialized\n", netxen_nic_driver_name);
376 break;
377 }
378
379 adapter->driver_mismatch = 0;
380
381 return 0;
382
383 err_out_free_dev:
384 if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
385 pci_disable_msi(pdev);
386 for (i = 0; i < adapter->port_count; i++) {
387 port = adapter->port[i];
388 if ((port) && (port->netdev)) {
389 unregister_netdev(port->netdev);
390 free_netdev(port->netdev);
391 }
392 }
393 kfree(adapter->ops);
394
395 err_out_free_rx_buffer:
396 for (i = 0; i < MAX_RCV_CTX; ++i) {
397 recv_ctx = &adapter->recv_ctx[i];
398 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
399 rcv_desc = &recv_ctx->rcv_desc[ring];
400 if (rcv_desc->rx_buf_arr != NULL) {
401 vfree(rcv_desc->rx_buf_arr);
402 rcv_desc->rx_buf_arr = NULL;
403 }
404 }
405 }
406
407 vfree(cmd_buf_arr);
408
409 kfree(adapter->port);
410
411 err_out_free_adapter:
412 pci_set_drvdata(pdev, NULL);
413 kfree(adapter);
414
415 err_out_iounmap:
416 iounmap(mem_ptr);
417 err_out_free_res:
418 pci_release_regions(pdev);
419 err_out_disable_pdev:
420 pci_disable_device(pdev);
421 return err;
422}
423
424static void __devexit netxen_nic_remove(struct pci_dev *pdev)
425{
426 struct netxen_adapter *adapter;
427 struct netxen_port *port;
428 struct netxen_rx_buffer *buffer;
429 struct netxen_recv_context *recv_ctx;
430 struct netxen_rcv_desc_ctx *rcv_desc;
431 int i;
432 int ctxid, ring;
433
434 adapter = pci_get_drvdata(pdev);
435 if (adapter == NULL)
436 return;
437
438 netxen_nic_stop_all_ports(adapter);
439 /* leave the hw in the same state as reboot */
440 netxen_pinit_from_rom(adapter, 0);
441 udelay(500);
442 netxen_load_firmware(adapter);
443
444 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
445 netxen_nic_disable_int(adapter);
446
447 udelay(500); /* Delay for a while to drain the DMA engines */
448 for (i = 0; i < adapter->port_count; i++) {
449 port = adapter->port[i];
450 if ((port) && (port->netdev)) {
451 unregister_netdev(port->netdev);
452 free_netdev(port->netdev);
453 }
454 }
455
456 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
457 pci_disable_msi(pdev);
458 pci_set_drvdata(pdev, NULL);
459 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
460 netxen_free_hw_resources(adapter);
461
462 iounmap(adapter->ahw.pci_base);
463
464 pci_release_regions(pdev);
465 pci_disable_device(pdev);
466
467 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
468 recv_ctx = &adapter->recv_ctx[ctxid];
469 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
470 rcv_desc = &recv_ctx->rcv_desc[ring];
471 for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) {
472 buffer = &(rcv_desc->rx_buf_arr[i]);
473 if (buffer->state == NETXEN_BUFFER_FREE)
474 continue;
475 pci_unmap_single(pdev, buffer->dma,
476 rcv_desc->dma_size,
477 PCI_DMA_FROMDEVICE);
478 if (buffer->skb != NULL)
479 dev_kfree_skb_any(buffer->skb);
480 }
481 vfree(rcv_desc->rx_buf_arr);
482 }
483 }
484
485 vfree(adapter->cmd_buf_arr);
486 kfree(adapter->ops);
487 kfree(adapter);
488}
489
490/*
491 * Called when a network interface is made active
492 * @returns 0 on success, negative value on failure
493 */
494static int netxen_nic_open(struct net_device *netdev)
495{
496 struct netxen_port *port = netdev_priv(netdev);
497 struct netxen_adapter *adapter = port->adapter;
498 struct netxen_rcv_desc_ctx *rcv_desc;
499 int err = 0;
500 int ctx, ring;
501
502 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) {
503 err = netxen_init_firmware(adapter);
504 if (err != 0) {
505 printk(KERN_ERR "Failed to init firmware\n");
506 return -EIO;
507 }
508 netxen_nic_flash_print(adapter);
509
510 /* setup all the resources for the Phantom... */
511 /* this include the descriptors for rcv, tx, and status */
512 netxen_nic_clear_stats(adapter);
513 err = netxen_nic_hw_resources(adapter);
514 if (err) {
515 printk(KERN_ERR "Error in setting hw resources:%d\n",
516 err);
517 return err;
518 }
519 if (adapter->ops->init_port
520 && adapter->ops->init_port(adapter, port->portnum) != 0) {
521 printk(KERN_ERR "%s: Failed to initialize port %d\n",
522 netxen_nic_driver_name, port->portnum);
523 netxen_free_hw_resources(adapter);
524 return -EIO;
525 }
526 if (adapter->ops->init_niu)
527 adapter->ops->init_niu(adapter);
528 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
529 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
530 rcv_desc =
531 &adapter->recv_ctx[ctx].rcv_desc[ring];
532 netxen_post_rx_buffers(adapter, ctx, ring);
533 }
534 }
535 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
536 }
537 adapter->active_ports++;
538 if (adapter->active_ports == 1) {
539 err = request_irq(adapter->ahw.pdev->irq, &netxen_intr,
540 SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name,
541 adapter);
542 if (err) {
543 printk(KERN_ERR "request_irq failed with: %d\n", err);
544 adapter->active_ports--;
545 return err;
546 }
547 adapter->irq = adapter->ahw.pdev->irq;
548 if (!adapter->driver_mismatch)
549 mod_timer(&adapter->watchdog_timer, jiffies);
550
551 netxen_nic_enable_int(adapter);
552 }
553
554 /* Done here again so that even if phantom sw overwrote it,
555 * we set it */
556 if (adapter->ops->macaddr_set)
557 adapter->ops->macaddr_set(port, netdev->dev_addr);
558 netxen_nic_set_link_parameters(port);
559
560 netxen_nic_set_multi(netdev);
561 if (!adapter->driver_mismatch)
562 netif_start_queue(netdev);
563
564 return 0;
565}
566
567/*
568 * netxen_nic_close - Disables a network interface entry point
569 */
570static int netxen_nic_close(struct net_device *netdev)
571{
572 struct netxen_port *port = netdev_priv(netdev);
573 struct netxen_adapter *adapter = port->adapter;
574 int i, j;
575 struct netxen_cmd_buffer *cmd_buff;
576 struct netxen_skb_frag *buffrag;
577
578 netif_carrier_off(netdev);
579 netif_stop_queue(netdev);
580
581 /* disable phy_ints */
582 if (adapter->ops->disable_phy_interrupts)
583 adapter->ops->disable_phy_interrupts(adapter, port->portnum);
584
585 adapter->active_ports--;
586
587 if (!adapter->active_ports) {
588 netxen_nic_disable_int(adapter);
589 if (adapter->irq)
590 free_irq(adapter->irq, adapter);
591 cmd_buff = adapter->cmd_buf_arr;
592 for (i = 0; i < adapter->max_tx_desc_count; i++) {
593 buffrag = cmd_buff->frag_array;
594 if (buffrag->dma) {
595 pci_unmap_single(port->pdev, buffrag->dma,
596 buffrag->length,
597 PCI_DMA_TODEVICE);
598 buffrag->dma = (u64) NULL;
599 }
600 for (j = 0; j < cmd_buff->frag_count; j++) {
601 buffrag++;
602 if (buffrag->dma) {
603 pci_unmap_page(port->pdev,
604 buffrag->dma,
605 buffrag->length,
606 PCI_DMA_TODEVICE);
607 buffrag->dma = (u64) NULL;
608 }
609 }
610 /* Free the skb we received in netxen_nic_xmit_frame */
611 if (cmd_buff->skb) {
612 dev_kfree_skb_any(cmd_buff->skb);
613 cmd_buff->skb = NULL;
614 }
615 cmd_buff++;
616 }
617 del_timer_sync(&adapter->watchdog_timer);
618 }
619
620 return 0;
621}
622
623static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
624{
625 struct netxen_port *port = netdev_priv(netdev);
626 struct netxen_adapter *adapter = port->adapter;
627 struct netxen_hardware_context *hw = &adapter->ahw;
628 unsigned int first_seg_len = skb->len - skb->data_len;
629 struct netxen_skb_frag *buffrag;
630 unsigned int i;
631
632 u32 producer = 0;
633 u32 saved_producer = 0;
634 struct cmd_desc_type0 *hwdesc;
635 int k;
636 struct netxen_cmd_buffer *pbuf = NULL;
637 unsigned int tries = 0;
638 static int dropped_packet = 0;
639 int frag_count;
640 u32 local_producer = 0;
641 u32 max_tx_desc_count = 0;
642 u32 last_cmd_consumer = 0;
643 int no_of_desc;
644
645 port->stats.xmitcalled++;
646 frag_count = skb_shinfo(skb)->nr_frags + 1;
647
648 if (unlikely(skb->len <= 0)) {
649 dev_kfree_skb_any(skb);
650 port->stats.badskblen++;
651 return NETDEV_TX_OK;
652 }
653
654 if (frag_count > MAX_BUFFERS_PER_CMD) {
655 printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)"
656 "too large, can handle only %d frags\n",
657 netxen_nic_driver_name, netdev->name,
658 frag_count, MAX_BUFFERS_PER_CMD);
659 port->stats.txdropped++;
660 if ((++dropped_packet & 0xff) == 0xff)
661 printk("%s: %s droppped packets = %d\n",
662 netxen_nic_driver_name, netdev->name,
663 dropped_packet);
664
665 return NETDEV_TX_OK;
666 }
667
668 /*
669 * Everything is set up. Now, we just need to transmit it out.
670 * Note that we have to copy the contents of buffer over to
671 * right place. Later on, this can be optimized out by de-coupling the
672 * producer index from the buffer index.
673 */
674 retry_getting_window:
675 spin_lock_bh(&adapter->tx_lock);
676 if (adapter->total_threads == MAX_XMIT_PRODUCERS) {
677 spin_unlock_bh(&adapter->tx_lock);
678 /*
679 * Yield CPU
680 */
681 if (!in_atomic())
682 schedule();
683 else {
684 for (i = 0; i < 20; i++)
685 cpu_relax(); /*This a nop instr on i386 */
686 }
687 goto retry_getting_window;
688 }
689 local_producer = adapter->cmd_producer;
690 /* There 4 fragments per descriptor */
691 no_of_desc = (frag_count + 3) >> 2;
692 if (skb_shinfo(skb)->gso_size > 0) {
693 no_of_desc++;
694 if (((skb->nh.iph)->ihl * sizeof(u32)) +
695 ((skb->h.th)->doff * sizeof(u32)) +
696 sizeof(struct ethhdr) >
697 (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) {
698 no_of_desc++;
699 }
700 }
701 k = adapter->cmd_producer;
702 max_tx_desc_count = adapter->max_tx_desc_count;
703 last_cmd_consumer = adapter->last_cmd_consumer;
704 if ((k + no_of_desc) >=
705 ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
706 last_cmd_consumer)) {
707 spin_unlock_bh(&adapter->tx_lock);
708 if (tries == 0) {
709 local_bh_disable();
710 netxen_process_cmd_ring((unsigned long)adapter);
711 local_bh_enable();
712 ++tries;
713 goto retry_getting_window;
714 } else {
715 port->stats.nocmddescriptor++;
716 DPRINTK(ERR, "No command descriptors available,"
717 " producer = %d, consumer = %d count=%llu,"
718 " dropping packet\n", producer,
719 adapter->last_cmd_consumer,
720 port->stats.nocmddescriptor);
721
722 spin_lock_bh(&adapter->tx_lock);
723 netif_stop_queue(netdev);
724 port->flags |= NETXEN_NETDEV_STATUS;
725 spin_unlock_bh(&adapter->tx_lock);
726 return NETDEV_TX_BUSY;
727 }
728 }
729 k = get_index_range(k, max_tx_desc_count, no_of_desc);
730 adapter->cmd_producer = k;
731 adapter->total_threads++;
732 adapter->num_threads++;
733
734 spin_unlock_bh(&adapter->tx_lock);
735 /* Copy the descriptors into the hardware */
736 producer = local_producer;
737 saved_producer = producer;
738 hwdesc = &hw->cmd_desc_head[producer];
739 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
740 /* Take skb->data itself */
741 pbuf = &adapter->cmd_buf_arr[producer];
742 if (skb_shinfo(skb)->gso_size > 0) {
743 pbuf->mss = skb_shinfo(skb)->gso_size;
744 hwdesc->mss = skb_shinfo(skb)->gso_size;
745 } else {
746 pbuf->mss = 0;
747 hwdesc->mss = 0;
748 }
749 pbuf->no_of_descriptors = no_of_desc;
750 pbuf->total_length = skb->len;
751 pbuf->skb = skb;
752 pbuf->cmd = TX_ETHER_PKT;
753 pbuf->frag_count = frag_count;
754 pbuf->port = port->portnum;
755 buffrag = &pbuf->frag_array[0];
756 buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len,
757 PCI_DMA_TODEVICE);
758 buffrag->length = first_seg_len;
759 CMD_DESC_TOTAL_LENGTH_WRT(hwdesc, skb->len);
760 hwdesc->num_of_buffers = frag_count;
761 hwdesc->opcode = TX_ETHER_PKT;
762
763 CMD_DESC_PORT_WRT(hwdesc, port->portnum);
764 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
765 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
766
767 for (i = 1, k = 1; i < frag_count; i++, k++) {
768 struct skb_frag_struct *frag;
769 int len, temp_len;
770 unsigned long offset;
771 dma_addr_t temp_dma;
772
773 /* move to next desc. if there is a need */
774 if ((i & 0x3) == 0) {
775 k = 0;
776 producer = get_next_index(producer,
777 adapter->max_tx_desc_count);
778 hwdesc = &hw->cmd_desc_head[producer];
779 memset(hwdesc, 0, sizeof(struct cmd_desc_type0));
780 }
781 frag = &skb_shinfo(skb)->frags[i - 1];
782 len = frag->size;
783 offset = frag->page_offset;
784
785 temp_len = len;
786 temp_dma = pci_map_page(port->pdev, frag->page, offset,
787 len, PCI_DMA_TODEVICE);
788
789 buffrag++;
790 buffrag->dma = temp_dma;
791 buffrag->length = temp_len;
792
793 DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k);
794 switch (k) {
795 case 0:
796 hwdesc->buffer1_length = cpu_to_le16(temp_len);
797 hwdesc->addr_buffer1 = cpu_to_le64(temp_dma);
798 break;
799 case 1:
800 hwdesc->buffer2_length = cpu_to_le16(temp_len);
801 hwdesc->addr_buffer2 = cpu_to_le64(temp_dma);
802 break;
803 case 2:
804 hwdesc->buffer3_length = cpu_to_le16(temp_len);
805 hwdesc->addr_buffer3 = cpu_to_le64(temp_dma);
806 break;
807 case 3:
808 hwdesc->buffer4_length = temp_len;
809 hwdesc->addr_buffer4 = cpu_to_le64(temp_dma);
810 break;
811 }
812 frag++;
813 }
814 producer = get_next_index(producer, adapter->max_tx_desc_count);
815
816 /* might change opcode to TX_TCP_LSO */
817 netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb);
818
819 /* For LSO, we need to copy the MAC/IP/TCP headers into
820 * the descriptor ring
821 */
822 if (hw->cmd_desc_head[saved_producer].opcode == TX_TCP_LSO) {
823 int hdr_len, first_hdr_len, more_hdr;
824 hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
825 if (hdr_len > (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) {
826 first_hdr_len =
827 sizeof(struct cmd_desc_type0) - NET_IP_ALIGN;
828 more_hdr = 1;
829 } else {
830 first_hdr_len = hdr_len;
831 more_hdr = 0;
832 }
833 /* copy the MAC/IP/TCP headers to the cmd descriptor list */
834 hwdesc = &hw->cmd_desc_head[producer];
835
836 /* copy the first 64 bytes */
837 memcpy(((void *)hwdesc) + NET_IP_ALIGN,
838 (void *)(skb->data), first_hdr_len);
839 producer = get_next_index(producer, max_tx_desc_count);
840
841 if (more_hdr) {
842 hwdesc = &hw->cmd_desc_head[producer];
843 /* copy the next 64 bytes - should be enough except
844 * for pathological case
845 */
846 memcpy((void *)hwdesc, (void *)(skb->data) +
847 first_hdr_len, hdr_len - first_hdr_len);
848 producer = get_next_index(producer, max_tx_desc_count);
849 }
850 }
851 spin_lock_bh(&adapter->tx_lock);
852 port->stats.txbytes +=
853 CMD_DESC_TOTAL_LENGTH(&hw->cmd_desc_head[saved_producer]);
854 /* Code to update the adapter considering how many producer threads
855 are currently working */
856 if ((--adapter->num_threads) == 0) {
857 /* This is the last thread */
858 u32 crb_producer = adapter->cmd_producer;
859 writel(crb_producer,
860 NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
861 wmb();
862 adapter->total_threads = 0;
863 } else {
864 u32 crb_producer = 0;
865 crb_producer =
866 readl(NETXEN_CRB_NORMALIZE
867 (adapter, CRB_CMD_PRODUCER_OFFSET));
868 if (crb_producer == local_producer) {
869 crb_producer = get_index_range(crb_producer,
870 max_tx_desc_count,
871 no_of_desc);
872 writel(crb_producer,
873 NETXEN_CRB_NORMALIZE(adapter,
874 CRB_CMD_PRODUCER_OFFSET));
875 wmb();
876 }
877 }
878
879 port->stats.xmitfinished++;
880 spin_unlock_bh(&adapter->tx_lock);
881
882 netdev->trans_start = jiffies;
883
884 DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer);
885
886 DPRINTK(INFO, "Done. Send\n");
887 return NETDEV_TX_OK;
888}
889
890static void netxen_watchdog(unsigned long v)
891{
892 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
893 schedule_work(&adapter->watchdog_task);
894}
895
896static void netxen_tx_timeout(struct net_device *netdev)
897{
898 struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
899 struct netxen_adapter *adapter = port->adapter;
900
901 schedule_work(&adapter->tx_timeout_task);
902}
903
904static void netxen_tx_timeout_task(struct net_device *netdev)
905{
906 struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
907 unsigned long flags;
908
909 printk(KERN_ERR "%s %s: transmit timeout, resetting.\n",
910 netxen_nic_driver_name, netdev->name);
911
912 spin_lock_irqsave(&port->adapter->lock, flags);
913 netxen_nic_close(netdev);
914 netxen_nic_open(netdev);
915 spin_unlock_irqrestore(&port->adapter->lock, flags);
916 netdev->trans_start = jiffies;
917 netif_wake_queue(netdev);
918}
919
920static int
921netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
922{
923 u32 ret = 0;
924
925 DPRINTK(INFO, "Entered handle ISR\n");
926
927 adapter->stats.ints++;
928
929 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
930 int count = 0;
931 u32 mask;
932 netxen_nic_disable_int(adapter);
933 /* Window = 0 or 1 */
934 do {
935 writel(0xffffffff, (void __iomem *)
936 (adapter->ahw.pci_base + ISR_INT_TARGET_STATUS));
937 mask = readl((void __iomem *)
938 (adapter->ahw.pci_base + ISR_INT_VECTOR));
939 } while (((mask & 0x80) != 0) && (++count < 32));
940 if ((mask & 0x80) != 0)
941 printk("Could not disable interrupt completely\n");
942
943 }
944 adapter->stats.hostints++;
945
946 if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) {
947 if (netif_rx_schedule_prep(netdev)) {
948 /*
949 * Interrupts are already disabled.
950 */
951 __netif_rx_schedule(netdev);
952 } else {
953 static unsigned int intcount = 0;
954 if ((++intcount & 0xfff) == 0xfff)
955 printk(KERN_ERR
956 "%s: %s interrupt %d while in poll\n",
957 netxen_nic_driver_name, netdev->name,
958 intcount);
959 }
960 ret = 1;
961 }
962
963 if (ret == 0) {
964 netxen_nic_enable_int(adapter);
965 }
966
967 return ret;
968}
969
970/*
971 * netxen_intr - Interrupt Handler
972 * @irq: interrupt number
973 * data points to adapter stucture (which may be handling more than 1 port
974 */
Jeff Garzik1494a812006-11-07 05:12:16 -0500975irqreturn_t netxen_intr(int irq, void *data)
Amit S. Kale3d396eb2006-10-21 15:33:03 -0400976{
977 struct netxen_adapter *adapter;
978 struct netxen_port *port;
979 struct net_device *netdev;
980 int i;
981
982 if (unlikely(!irq)) {
983 return IRQ_NONE; /* Not our interrupt */
984 }
985
986 adapter = (struct netxen_adapter *)data;
987 for (i = 0; i < adapter->ahw.max_ports; i++) {
988 port = adapter->port[i];
989 netdev = port->netdev;
990
991 /* process our status queue (for all 4 ports) */
992 netxen_handle_int(adapter, netdev);
993 }
994
995 return IRQ_HANDLED;
996}
997
998static int netxen_nic_poll(struct net_device *netdev, int *budget)
999{
1000 struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
1001 struct netxen_adapter *adapter = port->adapter;
1002 int work_to_do = min(*budget, netdev->quota);
1003 int done = 1;
1004 int ctx;
1005 int this_work_done;
1006
1007 DPRINTK(INFO, "polling for %d descriptors\n", *budget);
1008 port->stats.polled++;
1009
1010 adapter->work_done = 0;
1011 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1012 /*
1013 * Fairness issue. This will give undue weight to the
1014 * receive context 0.
1015 */
1016
1017 /*
1018 * To avoid starvation, we give each of our receivers,
1019 * a fraction of the quota. Sometimes, it might happen that we
1020 * have enough quota to process every packet, but since all the
1021 * packets are on one context, it gets only half of the quota,
1022 * and ends up not processing it.
1023 */
1024 this_work_done = netxen_process_rcv_ring(adapter, ctx,
1025 work_to_do /
1026 MAX_RCV_CTX);
1027 adapter->work_done += this_work_done;
1028 }
1029
1030 netdev->quota -= adapter->work_done;
1031 *budget -= adapter->work_done;
1032
1033 if (adapter->work_done >= work_to_do
1034 && netxen_nic_rx_has_work(adapter) != 0)
1035 done = 0;
1036
1037 netxen_process_cmd_ring((unsigned long)adapter);
1038
1039 DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
1040 adapter->work_done, work_to_do);
1041 if (done) {
1042 netif_rx_complete(netdev);
1043 netxen_nic_enable_int(adapter);
1044 }
1045
1046 return (done ? 0 : 1);
1047}
1048
1049#ifdef CONFIG_NET_POLL_CONTROLLER
1050static void netxen_nic_poll_controller(struct net_device *netdev)
1051{
1052 struct netxen_port *port = netdev_priv(netdev);
1053 struct netxen_adapter *adapter = port->adapter;
1054 disable_irq(adapter->irq);
Jeff Garzik1494a812006-11-07 05:12:16 -05001055 netxen_intr(adapter->irq, adapter);
Amit S. Kale3d396eb2006-10-21 15:33:03 -04001056 enable_irq(adapter->irq);
1057}
1058#endif
1059/*
1060 * netxen_nic_ioctl () We provide the tcl/phanmon support through these
1061 * ioctls.
1062 */
1063static int
1064netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1065{
1066 int err = 0;
1067 struct netxen_port *port = netdev_priv(netdev);
1068 struct netxen_adapter *adapter = port->adapter;
1069
1070 DPRINTK(INFO, "doing ioctl for %s\n", netdev->name);
1071 switch (cmd) {
1072 case NETXEN_NIC_CMD:
1073 err = netxen_nic_do_ioctl(adapter, (void *)ifr->ifr_data, port);
1074 break;
1075
1076 case NETXEN_NIC_NAME:
1077 DPRINTK(INFO, "ioctl cmd for NetXen\n");
1078 if (ifr->ifr_data) {
1079 put_user(port->portnum, (u16 __user *) ifr->ifr_data);
1080 }
1081 break;
1082
1083 default:
1084 DPRINTK(INFO, "ioctl cmd %x not supported\n", cmd);
1085 err = -EOPNOTSUPP;
1086 break;
1087 }
1088
1089 return err;
1090}
1091
1092static struct pci_driver netxen_driver = {
1093 .name = netxen_nic_driver_name,
1094 .id_table = netxen_pci_tbl,
1095 .probe = netxen_nic_probe,
1096 .remove = __devexit_p(netxen_nic_remove)
1097};
1098
1099/* Driver Registration on NetXen card */
1100
1101static int __init netxen_init_module(void)
1102{
1103 printk(KERN_INFO "%s \n", netxen_nic_driver_string);
1104
1105 return pci_module_init(&netxen_driver);
1106}
1107
1108module_init(netxen_init_module);
1109
1110static void __exit netxen_exit_module(void)
1111{
1112 /*
1113 * Wait for some time to allow the dma to drain, if any.
1114 */
1115 mdelay(5);
1116 pci_unregister_driver(&netxen_driver);
1117}
1118
1119module_exit(netxen_exit_module);