blob: 7259adc32631bb97fc1b9892e70ddc1fb36c7b75 [file] [log] [blame]
Amit Kumar Salechaaf19b492010-01-13 00:37:25 +00001/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/vmalloc.h>
26#include <linux/interrupt.h>
27
28#include "qlcnic.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/if_vlan.h>
32#include <net/ip.h>
33#include <linux/ipv6.h>
34#include <linux/inetdevice.h>
35#include <linux/sysfs.h>
36
37MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
38MODULE_LICENSE("GPL");
39MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
40MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
41
42char qlcnic_driver_name[] = "qlcnic";
43static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
44 QLCNIC_LINUX_VERSIONID;
45
46static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
47
48/* Default to restricted 1G auto-neg mode */
49static int wol_port_mode = 5;
50
51static int use_msi = 1;
52module_param(use_msi, int, 0644);
53MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
54
55static int use_msi_x = 1;
56module_param(use_msi_x, int, 0644);
57MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
58
59static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
62
63static int __devinit qlcnic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65static void __devexit qlcnic_remove(struct pci_dev *pdev);
66static int qlcnic_open(struct net_device *netdev);
67static int qlcnic_close(struct net_device *netdev);
68static netdev_tx_t qlcnic_xmit_frame(struct sk_buff *,
69 struct net_device *);
70static void qlcnic_tx_timeout(struct net_device *netdev);
71static void qlcnic_tx_timeout_task(struct work_struct *work);
72static void qlcnic_attach_work(struct work_struct *work);
73static void qlcnic_fwinit_work(struct work_struct *work);
74static void qlcnic_fw_poll_work(struct work_struct *work);
75static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
76 work_func_t func, int delay);
77static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
78static int qlcnic_poll(struct napi_struct *napi, int budget);
79#ifdef CONFIG_NET_POLL_CONTROLLER
80static void qlcnic_poll_controller(struct net_device *netdev);
81#endif
82
83static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
86static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
87
88static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
89static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
90
91static irqreturn_t qlcnic_intr(int irq, void *data);
92static irqreturn_t qlcnic_msi_intr(int irq, void *data);
93static irqreturn_t qlcnic_msix_intr(int irq, void *data);
94
95static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
96static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
97
98/* PCI Device ID Table */
99#define ENTRY(device) \
100 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
102
103#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
104
Amit Kumar Salecha6a902882010-02-01 05:24:54 +0000105static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
Amit Kumar Salechaaf19b492010-01-13 00:37:25 +0000106 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
107 {0,}
108};
109
110MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
111
112
113void
114qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
115 struct qlcnic_host_tx_ring *tx_ring)
116{
117 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
118
119 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
120 netif_stop_queue(adapter->netdev);
121 smp_mb();
122 }
123}
124
125static const u32 msi_tgt_status[8] = {
126 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
127 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
128 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
129 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
130};
131
132static const
133struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
134
135static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
136{
137 writel(0, sds_ring->crb_intr_mask);
138}
139
140static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
141{
142 struct qlcnic_adapter *adapter = sds_ring->adapter;
143
144 writel(0x1, sds_ring->crb_intr_mask);
145
146 if (!QLCNIC_IS_MSI_FAMILY(adapter))
147 writel(0xfbff, adapter->tgt_mask_reg);
148}
149
150static int
151qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
152{
153 int size = sizeof(struct qlcnic_host_sds_ring) * count;
154
155 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
156
157 return (recv_ctx->sds_rings == NULL);
158}
159
160static void
161qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
162{
163 if (recv_ctx->sds_rings != NULL)
164 kfree(recv_ctx->sds_rings);
165
166 recv_ctx->sds_rings = NULL;
167}
168
169static int
170qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
171{
172 int ring;
173 struct qlcnic_host_sds_ring *sds_ring;
174 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
175
176 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
177 return -ENOMEM;
178
179 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
180 sds_ring = &recv_ctx->sds_rings[ring];
181 netif_napi_add(netdev, &sds_ring->napi,
182 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
183 }
184
185 return 0;
186}
187
188static void
189qlcnic_napi_del(struct qlcnic_adapter *adapter)
190{
191 int ring;
192 struct qlcnic_host_sds_ring *sds_ring;
193 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
194
195 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
196 sds_ring = &recv_ctx->sds_rings[ring];
197 netif_napi_del(&sds_ring->napi);
198 }
199
200 qlcnic_free_sds_rings(&adapter->recv_ctx);
201}
202
203static void
204qlcnic_napi_enable(struct qlcnic_adapter *adapter)
205{
206 int ring;
207 struct qlcnic_host_sds_ring *sds_ring;
208 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
209
210 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
211 sds_ring = &recv_ctx->sds_rings[ring];
212 napi_enable(&sds_ring->napi);
213 qlcnic_enable_int(sds_ring);
214 }
215}
216
217static void
218qlcnic_napi_disable(struct qlcnic_adapter *adapter)
219{
220 int ring;
221 struct qlcnic_host_sds_ring *sds_ring;
222 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
223
224 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
225 sds_ring = &recv_ctx->sds_rings[ring];
226 qlcnic_disable_int(sds_ring);
227 napi_synchronize(&sds_ring->napi);
228 napi_disable(&sds_ring->napi);
229 }
230}
231
232static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
233{
234 memset(&adapter->stats, 0, sizeof(adapter->stats));
235 return;
236}
237
238static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
239{
240 struct pci_dev *pdev = adapter->pdev;
241 u64 mask, cmask;
242
243 adapter->pci_using_dac = 0;
244
245 mask = DMA_BIT_MASK(39);
246 cmask = mask;
247
248 if (pci_set_dma_mask(pdev, mask) == 0 &&
249 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
250 adapter->pci_using_dac = 1;
251 return 0;
252 }
253
254 return -EIO;
255}
256
257/* Update addressable range if firmware supports it */
258static int
259qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
260{
261 int change, shift, err;
262 u64 mask, old_mask, old_cmask;
263 struct pci_dev *pdev = adapter->pdev;
264
265 change = 0;
266
267 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
268 if (shift > 32)
269 return 0;
270
271 if (shift > 9)
272 change = 1;
273
274 if (change) {
275 old_mask = pdev->dma_mask;
276 old_cmask = pdev->dev.coherent_dma_mask;
277
278 mask = DMA_BIT_MASK(32+shift);
279
280 err = pci_set_dma_mask(pdev, mask);
281 if (err)
282 goto err_out;
283
284 err = pci_set_consistent_dma_mask(pdev, mask);
285 if (err)
286 goto err_out;
287 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
288 }
289
290 return 0;
291
292err_out:
293 pci_set_dma_mask(pdev, old_mask);
294 pci_set_consistent_dma_mask(pdev, old_cmask);
295 return err;
296}
297
298static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
299{
300 u32 val, data;
301
302 val = adapter->ahw.board_type;
303 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
304 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
305 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
306 data = QLCNIC_PORT_MODE_802_3_AP;
307 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
308 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
309 data = QLCNIC_PORT_MODE_XG;
310 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
311 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
312 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
313 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
314 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
315 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
316 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
317 } else {
318 data = QLCNIC_PORT_MODE_AUTO_NEG;
319 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
320 }
321
322 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
323 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
324 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
325 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
326 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
327 }
328 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
329 }
330}
331
332static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
333{
334 u32 control;
335 int pos;
336
337 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
338 if (pos) {
339 pci_read_config_dword(pdev, pos, &control);
340 if (enable)
341 control |= PCI_MSIX_FLAGS_ENABLE;
342 else
343 control = 0;
344 pci_write_config_dword(pdev, pos, control);
345 }
346}
347
348static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
349{
350 int i;
351
352 for (i = 0; i < count; i++)
353 adapter->msix_entries[i].entry = i;
354}
355
356static int
357qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
358{
359 int i;
360 unsigned char *p;
361 u64 mac_addr;
362 struct net_device *netdev = adapter->netdev;
363 struct pci_dev *pdev = adapter->pdev;
364
365 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
366 return -EIO;
367
368 p = (unsigned char *)&mac_addr;
369 for (i = 0; i < 6; i++)
370 netdev->dev_addr[i] = *(p + 5 - i);
371
372 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
373 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
374
375 /* set station address */
376
377 if (!is_valid_ether_addr(netdev->perm_addr))
378 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
379 netdev->dev_addr);
380
381 return 0;
382}
383
384static int qlcnic_set_mac(struct net_device *netdev, void *p)
385{
386 struct qlcnic_adapter *adapter = netdev_priv(netdev);
387 struct sockaddr *addr = p;
388
389 if (!is_valid_ether_addr(addr->sa_data))
390 return -EINVAL;
391
392 if (netif_running(netdev)) {
393 netif_device_detach(netdev);
394 qlcnic_napi_disable(adapter);
395 }
396
397 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
398 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
399 qlcnic_set_multi(adapter->netdev);
400
401 if (netif_running(netdev)) {
402 netif_device_attach(netdev);
403 qlcnic_napi_enable(adapter);
404 }
405 return 0;
406}
407
408static const struct net_device_ops qlcnic_netdev_ops = {
409 .ndo_open = qlcnic_open,
410 .ndo_stop = qlcnic_close,
411 .ndo_start_xmit = qlcnic_xmit_frame,
412 .ndo_get_stats = qlcnic_get_stats,
413 .ndo_validate_addr = eth_validate_addr,
414 .ndo_set_multicast_list = qlcnic_set_multi,
415 .ndo_set_mac_address = qlcnic_set_mac,
416 .ndo_change_mtu = qlcnic_change_mtu,
417 .ndo_tx_timeout = qlcnic_tx_timeout,
418#ifdef CONFIG_NET_POLL_CONTROLLER
419 .ndo_poll_controller = qlcnic_poll_controller,
420#endif
421};
422
423static void
424qlcnic_setup_intr(struct qlcnic_adapter *adapter)
425{
426 const struct qlcnic_legacy_intr_set *legacy_intrp;
427 struct pci_dev *pdev = adapter->pdev;
428 int err, num_msix;
429
430 if (adapter->rss_supported) {
431 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
432 MSIX_ENTRIES_PER_ADAPTER : 2;
433 } else
434 num_msix = 1;
435
436 adapter->max_sds_rings = 1;
437
438 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
439
440 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
441
442 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
443 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
444 legacy_intrp->tgt_status_reg);
445 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
446 legacy_intrp->tgt_mask_reg);
447 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
448
449 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
450 ISR_INT_STATE_REG);
451
452 qlcnic_set_msix_bit(pdev, 0);
453
454 if (adapter->msix_supported) {
455
456 qlcnic_init_msix_entries(adapter, num_msix);
457 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
458 if (err == 0) {
459 adapter->flags |= QLCNIC_MSIX_ENABLED;
460 qlcnic_set_msix_bit(pdev, 1);
461
462 if (adapter->rss_supported)
463 adapter->max_sds_rings = num_msix;
464
465 dev_info(&pdev->dev, "using msi-x interrupts\n");
466 return;
467 }
468
469 if (err > 0)
470 pci_disable_msix(pdev);
471
472 /* fall through for msi */
473 }
474
475 if (use_msi && !pci_enable_msi(pdev)) {
476 adapter->flags |= QLCNIC_MSI_ENABLED;
477 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
478 msi_tgt_status[adapter->ahw.pci_func]);
479 dev_info(&pdev->dev, "using msi interrupts\n");
480 adapter->msix_entries[0].vector = pdev->irq;
481 return;
482 }
483
484 dev_info(&pdev->dev, "using legacy interrupts\n");
485 adapter->msix_entries[0].vector = pdev->irq;
486}
487
488static void
489qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
490{
491 if (adapter->flags & QLCNIC_MSIX_ENABLED)
492 pci_disable_msix(adapter->pdev);
493 if (adapter->flags & QLCNIC_MSI_ENABLED)
494 pci_disable_msi(adapter->pdev);
495}
496
497static void
498qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
499{
500 if (adapter->ahw.pci_base0 != NULL)
501 iounmap(adapter->ahw.pci_base0);
502}
503
504static int
505qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
506{
507 void __iomem *mem_ptr0 = NULL;
508 resource_size_t mem_base;
509 unsigned long mem_len, pci_len0 = 0;
510
511 struct pci_dev *pdev = adapter->pdev;
512 int pci_func = adapter->ahw.pci_func;
513
514 /*
515 * Set the CRB window to invalid. If any register in window 0 is
516 * accessed it should set the window to 0 and then reset it to 1.
517 */
518 adapter->ahw.crb_win = -1;
519 adapter->ahw.ocm_win = -1;
520
521 /* remap phys address */
522 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
523 mem_len = pci_resource_len(pdev, 0);
524
525 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
526
527 mem_ptr0 = pci_ioremap_bar(pdev, 0);
528 if (mem_ptr0 == NULL) {
529 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
530 return -EIO;
531 }
532 pci_len0 = mem_len;
533 } else {
534 return -EIO;
535 }
536
537 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
538
539 adapter->ahw.pci_base0 = mem_ptr0;
540 adapter->ahw.pci_len0 = pci_len0;
541
542 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
543 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
544
545 return 0;
546}
547
548static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
549{
550 struct pci_dev *pdev = adapter->pdev;
551 int i, found = 0;
552
553 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
554 if (qlcnic_boards[i].vendor == pdev->vendor &&
555 qlcnic_boards[i].device == pdev->device &&
556 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
557 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
558 strcpy(name, qlcnic_boards[i].short_name);
559 found = 1;
560 break;
561 }
562
563 }
564
565 if (!found)
566 name = "Unknown";
567}
568
569static void
570qlcnic_check_options(struct qlcnic_adapter *adapter)
571{
572 u32 fw_major, fw_minor, fw_build;
573 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
574 char serial_num[32];
575 int i, offset, val;
576 int *ptr32;
577 struct pci_dev *pdev = adapter->pdev;
578
579 adapter->driver_mismatch = 0;
580
581 ptr32 = (int *)&serial_num;
582 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
583 for (i = 0; i < 8; i++) {
584 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
585 dev_err(&pdev->dev, "error reading board info\n");
586 adapter->driver_mismatch = 1;
587 return;
588 }
589 ptr32[i] = cpu_to_le32(val);
590 offset += sizeof(u32);
591 }
592
593 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
594 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
595 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
596
597 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
598
599 if (adapter->portnum == 0) {
600 get_brd_name(adapter, brd_name);
601
602 pr_info("%s: %s Board Chip rev 0x%x\n",
603 module_name(THIS_MODULE),
604 brd_name, adapter->ahw.revision_id);
605 }
606
607 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
608 adapter->driver_mismatch = 1;
609 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
610 fw_major, fw_minor, fw_build);
611 return;
612 }
613
614 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
615 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
616
617 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
618 fw_major, fw_minor, fw_build,
619 adapter->ahw.cut_through ? "cut-through" : "legacy");
620
621 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
622 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
623
624 adapter->flags &= ~QLCNIC_LRO_ENABLED;
625
626 if (adapter->ahw.port_type == QLCNIC_XGBE) {
627 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
628 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
629 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
630 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
631 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
632 }
633
634 adapter->msix_supported = !!use_msi_x;
635 adapter->rss_supported = !!use_msi_x;
636
637 adapter->num_txd = MAX_CMD_DESCRIPTORS;
638
639 adapter->num_lro_rxd = 0;
640 adapter->max_rds_rings = 2;
641}
642
643static int
644qlcnic_start_firmware(struct qlcnic_adapter *adapter)
645{
646 int val, err, first_boot;
647
648 err = qlcnic_set_dma_mask(adapter);
649 if (err)
650 return err;
651
652 if (!qlcnic_can_start_firmware(adapter))
653 goto wait_init;
654
655 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
656 if (first_boot == 0x55555555)
657 /* This is the first boot after power up */
658 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
659
660 qlcnic_request_firmware(adapter);
661
662 err = qlcnic_need_fw_reset(adapter);
663 if (err < 0)
664 goto err_out;
665 if (err == 0)
666 goto wait_init;
667
668 if (first_boot != 0x55555555) {
669 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
670 qlcnic_pinit_from_rom(adapter);
671 msleep(1);
672 }
673
674 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
675 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
676 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
677
678 qlcnic_set_port_mode(adapter);
679
680 err = qlcnic_load_firmware(adapter);
681 if (err)
682 goto err_out;
683
684 qlcnic_release_firmware(adapter);
685
686 val = (_QLCNIC_LINUX_MAJOR << 16)
687 | ((_QLCNIC_LINUX_MINOR << 8))
688 | (_QLCNIC_LINUX_SUBVERSION);
689 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
690
691wait_init:
692 /* Handshake with the card before we register the devices. */
693 err = qlcnic_phantom_init(adapter);
694 if (err)
695 goto err_out;
696
697 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
698
699 qlcnic_update_dma_mask(adapter);
700
701 qlcnic_check_options(adapter);
702
703 adapter->need_fw_reset = 0;
704
705 /* fall through and release firmware */
706
707err_out:
708 qlcnic_release_firmware(adapter);
709 return err;
710}
711
712static int
713qlcnic_request_irq(struct qlcnic_adapter *adapter)
714{
715 irq_handler_t handler;
716 struct qlcnic_host_sds_ring *sds_ring;
717 int err, ring;
718
719 unsigned long flags = 0;
720 struct net_device *netdev = adapter->netdev;
721 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
722
723 if (adapter->flags & QLCNIC_MSIX_ENABLED)
724 handler = qlcnic_msix_intr;
725 else if (adapter->flags & QLCNIC_MSI_ENABLED)
726 handler = qlcnic_msi_intr;
727 else {
728 flags |= IRQF_SHARED;
729 handler = qlcnic_intr;
730 }
731 adapter->irq = netdev->irq;
732
733 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
734 sds_ring = &recv_ctx->sds_rings[ring];
735 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
736 err = request_irq(sds_ring->irq, handler,
737 flags, sds_ring->name, sds_ring);
738 if (err)
739 return err;
740 }
741
742 return 0;
743}
744
745static void
746qlcnic_free_irq(struct qlcnic_adapter *adapter)
747{
748 int ring;
749 struct qlcnic_host_sds_ring *sds_ring;
750
751 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
752
753 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
754 sds_ring = &recv_ctx->sds_rings[ring];
755 free_irq(sds_ring->irq, sds_ring);
756 }
757}
758
759static void
760qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
761{
762 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
763 adapter->coal.normal.data.rx_time_us =
764 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
765 adapter->coal.normal.data.rx_packets =
766 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
767 adapter->coal.normal.data.tx_time_us =
768 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
769 adapter->coal.normal.data.tx_packets =
770 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
771}
772
773static int
774__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
775{
776 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
777 return -EIO;
778
779 qlcnic_set_multi(netdev);
780 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
781
782 adapter->ahw.linkup = 0;
783
784 if (adapter->max_sds_rings > 1)
785 qlcnic_config_rss(adapter, 1);
786
787 qlcnic_config_intr_coalesce(adapter);
788
789 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
790 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
791
792 qlcnic_napi_enable(adapter);
793
794 qlcnic_linkevent_request(adapter, 1);
795
796 set_bit(__QLCNIC_DEV_UP, &adapter->state);
797 return 0;
798}
799
800/* Usage: During resume and firmware recovery module.*/
801
802static int
803qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
804{
805 int err = 0;
806
807 rtnl_lock();
808 if (netif_running(netdev))
809 err = __qlcnic_up(adapter, netdev);
810 rtnl_unlock();
811
812 return err;
813}
814
815static void
816__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
817{
818 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
819 return;
820
821 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
822 return;
823
824 smp_mb();
825 spin_lock(&adapter->tx_clean_lock);
826 netif_carrier_off(netdev);
827 netif_tx_disable(netdev);
828
829 qlcnic_free_mac_list(adapter);
830
831 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
832
833 qlcnic_napi_disable(adapter);
834
835 qlcnic_release_tx_buffers(adapter);
836 spin_unlock(&adapter->tx_clean_lock);
837}
838
839/* Usage: During suspend and firmware recovery module */
840
841static void
842qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
843{
844 rtnl_lock();
845 if (netif_running(netdev))
846 __qlcnic_down(adapter, netdev);
847 rtnl_unlock();
848
849}
850
851static int
852qlcnic_attach(struct qlcnic_adapter *adapter)
853{
854 struct net_device *netdev = adapter->netdev;
855 struct pci_dev *pdev = adapter->pdev;
856 int err, ring;
857 struct qlcnic_host_rds_ring *rds_ring;
858
859 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
860 return 0;
861
862 err = qlcnic_init_firmware(adapter);
863 if (err)
864 return err;
865
866 err = qlcnic_napi_add(adapter, netdev);
867 if (err)
868 return err;
869
870 err = qlcnic_alloc_sw_resources(adapter);
871 if (err) {
872 dev_err(&pdev->dev, "Error in setting sw resources\n");
873 return err;
874 }
875
876 err = qlcnic_alloc_hw_resources(adapter);
877 if (err) {
878 dev_err(&pdev->dev, "Error in setting hw resources\n");
879 goto err_out_free_sw;
880 }
881
882
883 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
884 rds_ring = &adapter->recv_ctx.rds_rings[ring];
885 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
886 }
887
888 err = qlcnic_request_irq(adapter);
889 if (err) {
890 dev_err(&pdev->dev, "failed to setup interrupt\n");
891 goto err_out_free_rxbuf;
892 }
893
894 qlcnic_init_coalesce_defaults(adapter);
895
896 qlcnic_create_sysfs_entries(adapter);
897
898 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
899 return 0;
900
901err_out_free_rxbuf:
902 qlcnic_release_rx_buffers(adapter);
903 qlcnic_free_hw_resources(adapter);
904err_out_free_sw:
905 qlcnic_free_sw_resources(adapter);
906 return err;
907}
908
909static void
910qlcnic_detach(struct qlcnic_adapter *adapter)
911{
912 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
913 return;
914
915 qlcnic_remove_sysfs_entries(adapter);
916
917 qlcnic_free_hw_resources(adapter);
918 qlcnic_release_rx_buffers(adapter);
919 qlcnic_free_irq(adapter);
920 qlcnic_napi_del(adapter);
921 qlcnic_free_sw_resources(adapter);
922
923 adapter->is_up = 0;
924}
925
926int
927qlcnic_reset_context(struct qlcnic_adapter *adapter)
928{
929 int err = 0;
930 struct net_device *netdev = adapter->netdev;
931
932 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
933 return -EBUSY;
934
935 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
936
937 netif_device_detach(netdev);
938
939 if (netif_running(netdev))
940 __qlcnic_down(adapter, netdev);
941
942 qlcnic_detach(adapter);
943
944 if (netif_running(netdev)) {
945 err = qlcnic_attach(adapter);
946 if (!err)
947 err = __qlcnic_up(adapter, netdev);
948
949 if (err)
950 goto done;
951 }
952
953 netif_device_attach(netdev);
954 }
955
956done:
957 clear_bit(__QLCNIC_RESETTING, &adapter->state);
958 return err;
959}
960
961static int
962qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
963 struct net_device *netdev)
964{
965 int err;
966 struct pci_dev *pdev = adapter->pdev;
967
968 adapter->rx_csum = 1;
969 adapter->mc_enabled = 0;
970 adapter->max_mc_count = 38;
971
972 netdev->netdev_ops = &qlcnic_netdev_ops;
973 netdev->watchdog_timeo = 2*HZ;
974
975 qlcnic_change_mtu(netdev, netdev->mtu);
976
977 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
978
979 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
980 netdev->features |= (NETIF_F_GRO);
981 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
982
983 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
984 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
985
986 if (adapter->pci_using_dac) {
987 netdev->features |= NETIF_F_HIGHDMA;
988 netdev->vlan_features |= NETIF_F_HIGHDMA;
989 }
990
991 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
992 netdev->features |= (NETIF_F_HW_VLAN_TX);
993
994 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
995 netdev->features |= NETIF_F_LRO;
996
997 netdev->irq = adapter->msix_entries[0].vector;
998
999 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1000
1001 if (qlcnic_read_mac_addr(adapter))
1002 dev_warn(&pdev->dev, "failed to read mac addr\n");
1003
1004 netif_carrier_off(netdev);
1005 netif_stop_queue(netdev);
1006
1007 err = register_netdev(netdev);
1008 if (err) {
1009 dev_err(&pdev->dev, "failed to register net device\n");
1010 return err;
1011 }
1012
1013 return 0;
1014}
1015
1016static int __devinit
1017qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1018{
1019 struct net_device *netdev = NULL;
1020 struct qlcnic_adapter *adapter = NULL;
1021 int err;
1022 int pci_func_id = PCI_FUNC(pdev->devfn);
1023 uint8_t revision_id;
1024
1025 err = pci_enable_device(pdev);
1026 if (err)
1027 return err;
1028
1029 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1030 err = -ENODEV;
1031 goto err_out_disable_pdev;
1032 }
1033
1034 err = pci_request_regions(pdev, qlcnic_driver_name);
1035 if (err)
1036 goto err_out_disable_pdev;
1037
1038 pci_set_master(pdev);
1039
1040 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1041 if (!netdev) {
1042 dev_err(&pdev->dev, "failed to allocate net_device\n");
1043 err = -ENOMEM;
1044 goto err_out_free_res;
1045 }
1046
1047 SET_NETDEV_DEV(netdev, &pdev->dev);
1048
1049 adapter = netdev_priv(netdev);
1050 adapter->netdev = netdev;
1051 adapter->pdev = pdev;
1052 adapter->ahw.pci_func = pci_func_id;
1053
1054 revision_id = pdev->revision;
1055 adapter->ahw.revision_id = revision_id;
1056
1057 rwlock_init(&adapter->ahw.crb_lock);
1058 mutex_init(&adapter->ahw.mem_lock);
1059
1060 spin_lock_init(&adapter->tx_clean_lock);
1061 INIT_LIST_HEAD(&adapter->mac_list);
1062
1063 err = qlcnic_setup_pci_map(adapter);
1064 if (err)
1065 goto err_out_free_netdev;
1066
1067 /* This will be reset for mezz cards */
1068 adapter->portnum = pci_func_id;
1069
1070 err = qlcnic_get_board_info(adapter);
1071 if (err) {
1072 dev_err(&pdev->dev, "Error getting board config info.\n");
1073 goto err_out_iounmap;
1074 }
1075
1076
1077 err = qlcnic_start_firmware(adapter);
1078 if (err)
1079 goto err_out_decr_ref;
1080
1081 /*
1082 * See if the firmware gave us a virtual-physical port mapping.
1083 */
1084 adapter->physical_port = adapter->portnum;
1085
1086 qlcnic_clear_stats(adapter);
1087
1088 qlcnic_setup_intr(adapter);
1089
1090 err = qlcnic_setup_netdev(adapter, netdev);
1091 if (err)
1092 goto err_out_disable_msi;
1093
1094 pci_set_drvdata(pdev, adapter);
1095
1096 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1097
1098 switch (adapter->ahw.port_type) {
1099 case QLCNIC_GBE:
1100 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1101 adapter->netdev->name);
1102 break;
1103 case QLCNIC_XGBE:
1104 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1105 adapter->netdev->name);
1106 break;
1107 }
1108
1109 qlcnic_create_diag_entries(adapter);
1110
1111 return 0;
1112
1113err_out_disable_msi:
1114 qlcnic_teardown_intr(adapter);
1115
1116err_out_decr_ref:
1117 qlcnic_clr_all_drv_state(adapter);
1118
1119err_out_iounmap:
1120 qlcnic_cleanup_pci_map(adapter);
1121
1122err_out_free_netdev:
1123 free_netdev(netdev);
1124
1125err_out_free_res:
1126 pci_release_regions(pdev);
1127
1128err_out_disable_pdev:
1129 pci_set_drvdata(pdev, NULL);
1130 pci_disable_device(pdev);
1131 return err;
1132}
1133
1134static void __devexit qlcnic_remove(struct pci_dev *pdev)
1135{
1136 struct qlcnic_adapter *adapter;
1137 struct net_device *netdev;
1138
1139 adapter = pci_get_drvdata(pdev);
1140 if (adapter == NULL)
1141 return;
1142
1143 netdev = adapter->netdev;
1144
1145 qlcnic_cancel_fw_work(adapter);
1146
1147 unregister_netdev(netdev);
1148
1149 cancel_work_sync(&adapter->tx_timeout_task);
1150
1151 qlcnic_detach(adapter);
1152
1153 qlcnic_clr_all_drv_state(adapter);
1154
1155 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1156
1157 qlcnic_teardown_intr(adapter);
1158
1159 qlcnic_remove_diag_entries(adapter);
1160
1161 qlcnic_cleanup_pci_map(adapter);
1162
1163 qlcnic_release_firmware(adapter);
1164
1165 pci_release_regions(pdev);
1166 pci_disable_device(pdev);
1167 pci_set_drvdata(pdev, NULL);
1168
1169 free_netdev(netdev);
1170}
1171static int __qlcnic_shutdown(struct pci_dev *pdev)
1172{
1173 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1174 struct net_device *netdev = adapter->netdev;
1175 int retval;
1176
1177 netif_device_detach(netdev);
1178
1179 qlcnic_cancel_fw_work(adapter);
1180
1181 if (netif_running(netdev))
1182 qlcnic_down(adapter, netdev);
1183
1184 cancel_work_sync(&adapter->tx_timeout_task);
1185
1186 qlcnic_detach(adapter);
1187
1188 qlcnic_clr_all_drv_state(adapter);
1189
1190 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1191
1192 retval = pci_save_state(pdev);
1193 if (retval)
1194 return retval;
1195
1196 if (qlcnic_wol_supported(adapter)) {
1197 pci_enable_wake(pdev, PCI_D3cold, 1);
1198 pci_enable_wake(pdev, PCI_D3hot, 1);
1199 }
1200
1201 return 0;
1202}
1203
1204static void qlcnic_shutdown(struct pci_dev *pdev)
1205{
1206 if (__qlcnic_shutdown(pdev))
1207 return;
1208
1209 pci_disable_device(pdev);
1210}
1211
1212#ifdef CONFIG_PM
1213static int
1214qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1215{
1216 int retval;
1217
1218 retval = __qlcnic_shutdown(pdev);
1219 if (retval)
1220 return retval;
1221
1222 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1223 return 0;
1224}
1225
1226static int
1227qlcnic_resume(struct pci_dev *pdev)
1228{
1229 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1230 struct net_device *netdev = adapter->netdev;
1231 int err;
1232
1233 err = pci_enable_device(pdev);
1234 if (err)
1235 return err;
1236
1237 pci_set_power_state(pdev, PCI_D0);
1238 pci_set_master(pdev);
1239 pci_restore_state(pdev);
1240
1241 adapter->ahw.crb_win = -1;
1242 adapter->ahw.ocm_win = -1;
1243
1244 err = qlcnic_start_firmware(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "failed to start firmware\n");
1247 return err;
1248 }
1249
1250 if (netif_running(netdev)) {
1251 err = qlcnic_attach(adapter);
1252 if (err)
1253 goto err_out;
1254
1255 err = qlcnic_up(adapter, netdev);
1256 if (err)
1257 goto err_out_detach;
1258
1259
1260 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1261 }
1262
1263 netif_device_attach(netdev);
1264 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1265 return 0;
1266
1267err_out_detach:
1268 qlcnic_detach(adapter);
1269err_out:
1270 qlcnic_clr_all_drv_state(adapter);
1271 return err;
1272}
1273#endif
1274
1275static int qlcnic_open(struct net_device *netdev)
1276{
1277 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1278 int err;
1279
1280 if (adapter->driver_mismatch)
1281 return -EIO;
1282
1283 err = qlcnic_attach(adapter);
1284 if (err)
1285 return err;
1286
1287 err = __qlcnic_up(adapter, netdev);
1288 if (err)
1289 goto err_out;
1290
1291 netif_start_queue(netdev);
1292
1293 return 0;
1294
1295err_out:
1296 qlcnic_detach(adapter);
1297 return err;
1298}
1299
1300/*
1301 * qlcnic_close - Disables a network interface entry point
1302 */
1303static int qlcnic_close(struct net_device *netdev)
1304{
1305 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1306
1307 __qlcnic_down(adapter, netdev);
1308 return 0;
1309}
1310
1311static void
1312qlcnic_tso_check(struct net_device *netdev,
1313 struct qlcnic_host_tx_ring *tx_ring,
1314 struct cmd_desc_type0 *first_desc,
1315 struct sk_buff *skb)
1316{
1317 u8 opcode = TX_ETHER_PKT;
1318 __be16 protocol = skb->protocol;
1319 u16 flags = 0, vid = 0;
1320 u32 producer;
1321 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1322 struct cmd_desc_type0 *hwdesc;
1323 struct vlan_ethhdr *vh;
1324
1325 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1326
1327 vh = (struct vlan_ethhdr *)skb->data;
1328 protocol = vh->h_vlan_encapsulated_proto;
1329 flags = FLAGS_VLAN_TAGGED;
1330
1331 } else if (vlan_tx_tag_present(skb)) {
1332
1333 flags = FLAGS_VLAN_OOB;
1334 vid = vlan_tx_tag_get(skb);
1335 qlcnic_set_tx_vlan_tci(first_desc, vid);
1336 vlan_oob = 1;
1337 }
1338
1339 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1340 skb_shinfo(skb)->gso_size > 0) {
1341
1342 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1343
1344 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1345 first_desc->total_hdr_length = hdr_len;
1346 if (vlan_oob) {
1347 first_desc->total_hdr_length += VLAN_HLEN;
1348 first_desc->tcp_hdr_offset = VLAN_HLEN;
1349 first_desc->ip_hdr_offset = VLAN_HLEN;
1350 /* Only in case of TSO on vlan device */
1351 flags |= FLAGS_VLAN_TAGGED;
1352 }
1353
1354 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1355 TX_TCP_LSO6 : TX_TCP_LSO;
1356 tso = 1;
1357
1358 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1359 u8 l4proto;
1360
1361 if (protocol == cpu_to_be16(ETH_P_IP)) {
1362 l4proto = ip_hdr(skb)->protocol;
1363
1364 if (l4proto == IPPROTO_TCP)
1365 opcode = TX_TCP_PKT;
1366 else if (l4proto == IPPROTO_UDP)
1367 opcode = TX_UDP_PKT;
1368 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1369 l4proto = ipv6_hdr(skb)->nexthdr;
1370
1371 if (l4proto == IPPROTO_TCP)
1372 opcode = TX_TCPV6_PKT;
1373 else if (l4proto == IPPROTO_UDP)
1374 opcode = TX_UDPV6_PKT;
1375 }
1376 }
1377
1378 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1379 first_desc->ip_hdr_offset += skb_network_offset(skb);
1380 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1381
1382 if (!tso)
1383 return;
1384
1385 /* For LSO, we need to copy the MAC/IP/TCP headers into
1386 * the descriptor ring
1387 */
1388 producer = tx_ring->producer;
1389 copied = 0;
1390 offset = 2;
1391
1392 if (vlan_oob) {
1393 /* Create a TSO vlan header template for firmware */
1394
1395 hwdesc = &tx_ring->desc_head[producer];
1396 tx_ring->cmd_buf_arr[producer].skb = NULL;
1397
1398 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1399 hdr_len + VLAN_HLEN);
1400
1401 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1402 skb_copy_from_linear_data(skb, vh, 12);
1403 vh->h_vlan_proto = htons(ETH_P_8021Q);
1404 vh->h_vlan_TCI = htons(vid);
1405 skb_copy_from_linear_data_offset(skb, 12,
1406 (char *)vh + 16, copy_len - 16);
1407
1408 copied = copy_len - VLAN_HLEN;
1409 offset = 0;
1410
1411 producer = get_next_index(producer, tx_ring->num_desc);
1412 }
1413
1414 while (copied < hdr_len) {
1415
1416 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1417 (hdr_len - copied));
1418
1419 hwdesc = &tx_ring->desc_head[producer];
1420 tx_ring->cmd_buf_arr[producer].skb = NULL;
1421
1422 skb_copy_from_linear_data_offset(skb, copied,
1423 (char *)hwdesc + offset, copy_len);
1424
1425 copied += copy_len;
1426 offset = 0;
1427
1428 producer = get_next_index(producer, tx_ring->num_desc);
1429 }
1430
1431 tx_ring->producer = producer;
1432 barrier();
1433}
1434
1435static int
1436qlcnic_map_tx_skb(struct pci_dev *pdev,
1437 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1438{
1439 struct qlcnic_skb_frag *nf;
1440 struct skb_frag_struct *frag;
1441 int i, nr_frags;
1442 dma_addr_t map;
1443
1444 nr_frags = skb_shinfo(skb)->nr_frags;
1445 nf = &pbuf->frag_array[0];
1446
1447 map = pci_map_single(pdev, skb->data,
1448 skb_headlen(skb), PCI_DMA_TODEVICE);
1449 if (pci_dma_mapping_error(pdev, map))
1450 goto out_err;
1451
1452 nf->dma = map;
1453 nf->length = skb_headlen(skb);
1454
1455 for (i = 0; i < nr_frags; i++) {
1456 frag = &skb_shinfo(skb)->frags[i];
1457 nf = &pbuf->frag_array[i+1];
1458
1459 map = pci_map_page(pdev, frag->page, frag->page_offset,
1460 frag->size, PCI_DMA_TODEVICE);
1461 if (pci_dma_mapping_error(pdev, map))
1462 goto unwind;
1463
1464 nf->dma = map;
1465 nf->length = frag->size;
1466 }
1467
1468 return 0;
1469
1470unwind:
1471 while (--i >= 0) {
1472 nf = &pbuf->frag_array[i+1];
1473 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1474 }
1475
1476 nf = &pbuf->frag_array[0];
1477 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1478
1479out_err:
1480 return -ENOMEM;
1481}
1482
1483static inline void
1484qlcnic_clear_cmddesc(u64 *desc)
1485{
1486 desc[0] = 0ULL;
1487 desc[2] = 0ULL;
1488}
1489
1490static netdev_tx_t
1491qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1492{
1493 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1494 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1495 struct qlcnic_cmd_buffer *pbuf;
1496 struct qlcnic_skb_frag *buffrag;
1497 struct cmd_desc_type0 *hwdesc, *first_desc;
1498 struct pci_dev *pdev;
1499 int i, k;
1500
1501 u32 producer;
1502 int frag_count, no_of_desc;
1503 u32 num_txd = tx_ring->num_desc;
1504
1505 frag_count = skb_shinfo(skb)->nr_frags + 1;
1506
1507 /* 4 fragments per cmd des */
1508 no_of_desc = (frag_count + 3) >> 2;
1509
1510 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1511 netif_stop_queue(netdev);
1512 return NETDEV_TX_BUSY;
1513 }
1514
1515 producer = tx_ring->producer;
1516 pbuf = &tx_ring->cmd_buf_arr[producer];
1517
1518 pdev = adapter->pdev;
1519
1520 if (qlcnic_map_tx_skb(pdev, skb, pbuf))
1521 goto drop_packet;
1522
1523 pbuf->skb = skb;
1524 pbuf->frag_count = frag_count;
1525
1526 first_desc = hwdesc = &tx_ring->desc_head[producer];
1527 qlcnic_clear_cmddesc((u64 *)hwdesc);
1528
1529 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1530 qlcnic_set_tx_port(first_desc, adapter->portnum);
1531
1532 for (i = 0; i < frag_count; i++) {
1533
1534 k = i % 4;
1535
1536 if ((k == 0) && (i > 0)) {
1537 /* move to next desc.*/
1538 producer = get_next_index(producer, num_txd);
1539 hwdesc = &tx_ring->desc_head[producer];
1540 qlcnic_clear_cmddesc((u64 *)hwdesc);
1541 tx_ring->cmd_buf_arr[producer].skb = NULL;
1542 }
1543
1544 buffrag = &pbuf->frag_array[i];
1545
1546 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1547 switch (k) {
1548 case 0:
1549 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1550 break;
1551 case 1:
1552 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1553 break;
1554 case 2:
1555 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1556 break;
1557 case 3:
1558 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1559 break;
1560 }
1561 }
1562
1563 tx_ring->producer = get_next_index(producer, num_txd);
1564
1565 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1566
1567 qlcnic_update_cmd_producer(adapter, tx_ring);
1568
1569 adapter->stats.txbytes += skb->len;
1570 adapter->stats.xmitcalled++;
1571
1572 return NETDEV_TX_OK;
1573
1574drop_packet:
1575 adapter->stats.txdropped++;
1576 dev_kfree_skb_any(skb);
1577 return NETDEV_TX_OK;
1578}
1579
1580static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1581{
1582 struct net_device *netdev = adapter->netdev;
1583 u32 temp, temp_state, temp_val;
1584 int rv = 0;
1585
1586 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1587
1588 temp_state = qlcnic_get_temp_state(temp);
1589 temp_val = qlcnic_get_temp_val(temp);
1590
1591 if (temp_state == QLCNIC_TEMP_PANIC) {
1592 dev_err(&netdev->dev,
1593 "Device temperature %d degrees C exceeds"
1594 " maximum allowed. Hardware has been shut down.\n",
1595 temp_val);
1596 rv = 1;
1597 } else if (temp_state == QLCNIC_TEMP_WARN) {
1598 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1599 dev_err(&netdev->dev,
1600 "Device temperature %d degrees C "
1601 "exceeds operating range."
1602 " Immediate action needed.\n",
1603 temp_val);
1604 }
1605 } else {
1606 if (adapter->temp == QLCNIC_TEMP_WARN) {
1607 dev_info(&netdev->dev,
1608 "Device temperature is now %d degrees C"
1609 " in normal range.\n", temp_val);
1610 }
1611 }
1612 adapter->temp = temp_state;
1613 return rv;
1614}
1615
1616void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1617{
1618 struct net_device *netdev = adapter->netdev;
1619
1620 if (adapter->ahw.linkup && !linkup) {
1621 dev_info(&netdev->dev, "NIC Link is down\n");
1622 adapter->ahw.linkup = 0;
1623 if (netif_running(netdev)) {
1624 netif_carrier_off(netdev);
1625 netif_stop_queue(netdev);
1626 }
1627 } else if (!adapter->ahw.linkup && linkup) {
1628 dev_info(&netdev->dev, "NIC Link is up\n");
1629 adapter->ahw.linkup = 1;
1630 if (netif_running(netdev)) {
1631 netif_carrier_on(netdev);
1632 netif_wake_queue(netdev);
1633 }
1634 }
1635}
1636
1637static void qlcnic_tx_timeout(struct net_device *netdev)
1638{
1639 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1640
1641 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1642 return;
1643
1644 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1645 schedule_work(&adapter->tx_timeout_task);
1646}
1647
1648static void qlcnic_tx_timeout_task(struct work_struct *work)
1649{
1650 struct qlcnic_adapter *adapter =
1651 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1652
1653 if (!netif_running(adapter->netdev))
1654 return;
1655
1656 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1657 return;
1658
1659 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1660 goto request_reset;
1661
1662 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1663 if (!qlcnic_reset_context(adapter)) {
1664 adapter->netdev->trans_start = jiffies;
1665 return;
1666
1667 /* context reset failed, fall through for fw reset */
1668 }
1669
1670request_reset:
1671 adapter->need_fw_reset = 1;
1672 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1673}
1674
1675static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1676{
1677 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1678 struct net_device_stats *stats = &netdev->stats;
1679
1680 memset(stats, 0, sizeof(*stats));
1681
1682 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1683 stats->tx_packets = adapter->stats.xmitfinished;
1684 stats->rx_bytes = adapter->stats.rxbytes;
1685 stats->tx_bytes = adapter->stats.txbytes;
1686 stats->rx_dropped = adapter->stats.rxdropped;
1687 stats->tx_dropped = adapter->stats.txdropped;
1688
1689 return stats;
1690}
1691
1692static irqreturn_t qlcnic_intr(int irq, void *data)
1693{
1694 struct qlcnic_host_sds_ring *sds_ring = data;
1695 struct qlcnic_adapter *adapter = sds_ring->adapter;
1696 u32 status;
1697
1698 status = readl(adapter->isr_int_vec);
1699
1700 if (!(status & adapter->int_vec_bit))
1701 return IRQ_NONE;
1702
1703 /* check interrupt state machine, to be sure */
1704 status = readl(adapter->crb_int_state_reg);
1705 if (!ISR_LEGACY_INT_TRIGGERED(status))
1706 return IRQ_NONE;
1707
1708 writel(0xffffffff, adapter->tgt_status_reg);
1709 /* read twice to ensure write is flushed */
1710 readl(adapter->isr_int_vec);
1711 readl(adapter->isr_int_vec);
1712
1713 napi_schedule(&sds_ring->napi);
1714
1715 return IRQ_HANDLED;
1716}
1717
1718static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1719{
1720 struct qlcnic_host_sds_ring *sds_ring = data;
1721 struct qlcnic_adapter *adapter = sds_ring->adapter;
1722
1723 /* clear interrupt */
1724 writel(0xffffffff, adapter->tgt_status_reg);
1725
1726 napi_schedule(&sds_ring->napi);
1727 return IRQ_HANDLED;
1728}
1729
1730static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1731{
1732 struct qlcnic_host_sds_ring *sds_ring = data;
1733
1734 napi_schedule(&sds_ring->napi);
1735 return IRQ_HANDLED;
1736}
1737
1738static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1739{
1740 u32 sw_consumer, hw_consumer;
1741 int count = 0, i;
1742 struct qlcnic_cmd_buffer *buffer;
1743 struct pci_dev *pdev = adapter->pdev;
1744 struct net_device *netdev = adapter->netdev;
1745 struct qlcnic_skb_frag *frag;
1746 int done;
1747 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1748
1749 if (!spin_trylock(&adapter->tx_clean_lock))
1750 return 1;
1751
1752 sw_consumer = tx_ring->sw_consumer;
1753 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1754
1755 while (sw_consumer != hw_consumer) {
1756 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1757 if (buffer->skb) {
1758 frag = &buffer->frag_array[0];
1759 pci_unmap_single(pdev, frag->dma, frag->length,
1760 PCI_DMA_TODEVICE);
1761 frag->dma = 0ULL;
1762 for (i = 1; i < buffer->frag_count; i++) {
1763 frag++;
1764 pci_unmap_page(pdev, frag->dma, frag->length,
1765 PCI_DMA_TODEVICE);
1766 frag->dma = 0ULL;
1767 }
1768
1769 adapter->stats.xmitfinished++;
1770 dev_kfree_skb_any(buffer->skb);
1771 buffer->skb = NULL;
1772 }
1773
1774 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1775 if (++count >= MAX_STATUS_HANDLE)
1776 break;
1777 }
1778
1779 if (count && netif_running(netdev)) {
1780 tx_ring->sw_consumer = sw_consumer;
1781
1782 smp_mb();
1783
1784 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1785 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1786 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1787 netif_wake_queue(netdev);
1788 adapter->tx_timeo_cnt = 0;
1789 }
1790 __netif_tx_unlock(tx_ring->txq);
1791 }
1792 }
1793 /*
1794 * If everything is freed up to consumer then check if the ring is full
1795 * If the ring is full then check if more needs to be freed and
1796 * schedule the call back again.
1797 *
1798 * This happens when there are 2 CPUs. One could be freeing and the
1799 * other filling it. If the ring is full when we get out of here and
1800 * the card has already interrupted the host then the host can miss the
1801 * interrupt.
1802 *
1803 * There is still a possible race condition and the host could miss an
1804 * interrupt. The card has to take care of this.
1805 */
1806 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1807 done = (sw_consumer == hw_consumer);
1808 spin_unlock(&adapter->tx_clean_lock);
1809
1810 return done;
1811}
1812
1813static int qlcnic_poll(struct napi_struct *napi, int budget)
1814{
1815 struct qlcnic_host_sds_ring *sds_ring =
1816 container_of(napi, struct qlcnic_host_sds_ring, napi);
1817
1818 struct qlcnic_adapter *adapter = sds_ring->adapter;
1819
1820 int tx_complete;
1821 int work_done;
1822
1823 tx_complete = qlcnic_process_cmd_ring(adapter);
1824
1825 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1826
1827 if ((work_done < budget) && tx_complete) {
1828 napi_complete(&sds_ring->napi);
1829 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1830 qlcnic_enable_int(sds_ring);
1831 }
1832
1833 return work_done;
1834}
1835
1836#ifdef CONFIG_NET_POLL_CONTROLLER
1837static void qlcnic_poll_controller(struct net_device *netdev)
1838{
1839 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1840 disable_irq(adapter->irq);
1841 qlcnic_intr(adapter->irq, adapter);
1842 enable_irq(adapter->irq);
1843}
1844#endif
1845
1846static void
1847qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1848{
1849 u32 val;
1850
1851 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1852 state != QLCNIC_DEV_NEED_QUISCENT);
1853
1854 if (qlcnic_api_lock(adapter))
1855 return ;
1856
1857 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1858
1859 if (state == QLCNIC_DEV_NEED_RESET)
1860 val |= ((u32)0x1 << (adapter->portnum * 4));
1861 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1862 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
1863
1864 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1865
1866 qlcnic_api_unlock(adapter);
1867}
1868
Amit Kumar Salecha1b95a832010-02-01 05:24:56 +00001869static int
1870qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1871{
1872 u32 val;
1873
1874 if (qlcnic_api_lock(adapter))
1875 return -EBUSY;
1876
1877 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1878 val &= ~((u32)0x3 << (adapter->portnum * 4));
1879 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1880
1881 qlcnic_api_unlock(adapter);
1882
1883 return 0;
1884}
1885
Amit Kumar Salechaaf19b492010-01-13 00:37:25 +00001886static void
1887qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1888{
1889 u32 val;
1890
1891 if (qlcnic_api_lock(adapter))
1892 goto err;
1893
1894 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1895 val &= ~((u32)0x1 << (adapter->portnum * 4));
1896 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1897
1898 if (!(val & 0x11111111))
1899 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1900
1901 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1902 val &= ~((u32)0x3 << (adapter->portnum * 4));
1903 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1904
1905 qlcnic_api_unlock(adapter);
1906err:
1907 adapter->fw_fail_cnt = 0;
1908 clear_bit(__QLCNIC_START_FW, &adapter->state);
1909 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1910}
1911
1912static int
1913qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
1914{
1915 int act, state;
1916
1917 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1918 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1919
1920 if (((state & 0x11111111) == (act & 0x11111111)) ||
1921 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
1922 return 0;
1923 else
1924 return 1;
1925}
1926
1927static int
1928qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
1929{
1930 u32 val, prev_state;
1931 int cnt = 0;
1932 int portnum = adapter->portnum;
1933
1934 if (qlcnic_api_lock(adapter))
1935 return -1;
1936
1937 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1938 if (!(val & ((int)0x1 << (portnum * 4)))) {
1939 val |= ((u32)0x1 << (portnum * 4));
1940 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1941 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
1942 goto start_fw;
1943 }
1944
1945 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1946
1947 switch (prev_state) {
1948 case QLCNIC_DEV_COLD:
1949start_fw:
1950 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
1951 qlcnic_api_unlock(adapter);
1952 return 1;
1953
1954 case QLCNIC_DEV_READY:
1955 qlcnic_api_unlock(adapter);
1956 return 0;
1957
1958 case QLCNIC_DEV_NEED_RESET:
1959 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1960 val |= ((u32)0x1 << (portnum * 4));
1961 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1962 break;
1963
1964 case QLCNIC_DEV_NEED_QUISCENT:
1965 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1966 val |= ((u32)0x1 << ((portnum * 4) + 1));
1967 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1968 break;
1969
1970 case QLCNIC_DEV_FAILED:
1971 qlcnic_api_unlock(adapter);
1972 return -1;
1973 }
1974
1975 qlcnic_api_unlock(adapter);
1976 msleep(1000);
1977 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
1978 ++cnt < 20)
1979 msleep(1000);
1980
1981 if (cnt >= 20)
1982 return -1;
1983
1984 if (qlcnic_api_lock(adapter))
1985 return -1;
1986
1987 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1988 val &= ~((u32)0x3 << (portnum * 4));
1989 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1990
1991 qlcnic_api_unlock(adapter);
1992
1993 return 0;
1994}
1995
1996static void
1997qlcnic_fwinit_work(struct work_struct *work)
1998{
1999 struct qlcnic_adapter *adapter = container_of(work,
2000 struct qlcnic_adapter, fw_work.work);
2001 int dev_state;
2002
2003 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
2004 goto err_ret;
2005
2006 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
2007
2008 if (qlcnic_check_drv_state(adapter)) {
2009 qlcnic_schedule_work(adapter,
2010 qlcnic_fwinit_work, FW_POLL_DELAY);
2011 return;
2012 }
2013
2014 if (!qlcnic_start_firmware(adapter)) {
2015 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2016 return;
2017 }
2018
2019 goto err_ret;
2020 }
2021
2022 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2023 switch (dev_state) {
2024 case QLCNIC_DEV_READY:
2025 if (!qlcnic_start_firmware(adapter)) {
2026 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2027 return;
2028 }
2029 case QLCNIC_DEV_FAILED:
2030 break;
2031
2032 default:
2033 qlcnic_schedule_work(adapter,
2034 qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
2035 return;
2036 }
2037
2038err_ret:
2039 qlcnic_clr_all_drv_state(adapter);
2040}
2041
2042static void
2043qlcnic_detach_work(struct work_struct *work)
2044{
2045 struct qlcnic_adapter *adapter = container_of(work,
2046 struct qlcnic_adapter, fw_work.work);
2047 struct net_device *netdev = adapter->netdev;
2048 u32 status;
2049
2050 netif_device_detach(netdev);
2051
2052 qlcnic_down(adapter, netdev);
2053
Amit Kumar Salechace668442010-02-01 05:24:57 +00002054 rtnl_lock();
Amit Kumar Salechaaf19b492010-01-13 00:37:25 +00002055 qlcnic_detach(adapter);
Amit Kumar Salechace668442010-02-01 05:24:57 +00002056 rtnl_unlock();
Amit Kumar Salechaaf19b492010-01-13 00:37:25 +00002057
2058 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2059
2060 if (status & QLCNIC_RCODE_FATAL_ERROR)
2061 goto err_ret;
2062
2063 if (adapter->temp == QLCNIC_TEMP_PANIC)
2064 goto err_ret;
2065
2066 qlcnic_set_drv_state(adapter, adapter->dev_state);
2067
2068 adapter->fw_wait_cnt = 0;
2069
2070 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2071
2072 return;
2073
2074err_ret:
2075 qlcnic_clr_all_drv_state(adapter);
2076
2077}
2078
2079static void
2080qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2081{
2082 u32 state;
2083
2084 if (qlcnic_api_lock(adapter))
2085 return;
2086
2087 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2088
2089 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2090 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2091 set_bit(__QLCNIC_START_FW, &adapter->state);
2092 }
2093
2094 qlcnic_api_unlock(adapter);
2095}
2096
2097static void
2098qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2099 work_func_t func, int delay)
2100{
2101 INIT_DELAYED_WORK(&adapter->fw_work, func);
2102 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2103}
2104
2105static void
2106qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2107{
2108 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2109 msleep(10);
2110
2111 cancel_delayed_work_sync(&adapter->fw_work);
2112}
2113
2114static void
2115qlcnic_attach_work(struct work_struct *work)
2116{
2117 struct qlcnic_adapter *adapter = container_of(work,
2118 struct qlcnic_adapter, fw_work.work);
2119 struct net_device *netdev = adapter->netdev;
2120 int err;
2121
2122 if (netif_running(netdev)) {
2123 err = qlcnic_attach(adapter);
2124 if (err)
2125 goto done;
2126
2127 err = qlcnic_up(adapter, netdev);
2128 if (err) {
2129 qlcnic_detach(adapter);
2130 goto done;
2131 }
2132
2133 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2134 }
2135
2136 netif_device_attach(netdev);
2137
2138done:
2139 adapter->fw_fail_cnt = 0;
2140 clear_bit(__QLCNIC_RESETTING, &adapter->state);
Amit Kumar Salecha1b95a832010-02-01 05:24:56 +00002141
2142 if (!qlcnic_clr_drv_state(adapter))
2143 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2144 FW_POLL_DELAY);
Amit Kumar Salechaaf19b492010-01-13 00:37:25 +00002145}
2146
2147static int
2148qlcnic_check_health(struct qlcnic_adapter *adapter)
2149{
2150 u32 state = 0, heartbit;
2151 struct net_device *netdev = adapter->netdev;
2152
2153 if (qlcnic_check_temp(adapter))
2154 goto detach;
2155
2156 if (adapter->need_fw_reset) {
2157 qlcnic_dev_request_reset(adapter);
2158 goto detach;
2159 }
2160
2161 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2162 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2163 adapter->need_fw_reset = 1;
2164
2165 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2166 if (heartbit != adapter->heartbit) {
2167 adapter->heartbit = heartbit;
2168 adapter->fw_fail_cnt = 0;
2169 if (adapter->need_fw_reset)
2170 goto detach;
2171 return 0;
2172 }
2173
2174 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2175 return 0;
2176
2177 qlcnic_dev_request_reset(adapter);
2178
2179 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2180
2181 dev_info(&netdev->dev, "firmware hang detected\n");
2182
2183detach:
2184 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2185 QLCNIC_DEV_NEED_RESET;
2186
2187 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2188 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2189 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2190
2191 return 1;
2192}
2193
2194static void
2195qlcnic_fw_poll_work(struct work_struct *work)
2196{
2197 struct qlcnic_adapter *adapter = container_of(work,
2198 struct qlcnic_adapter, fw_work.work);
2199
2200 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2201 goto reschedule;
2202
2203
2204 if (qlcnic_check_health(adapter))
2205 return;
2206
2207reschedule:
2208 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2209}
2210
2211static ssize_t
2212qlcnic_store_bridged_mode(struct device *dev,
2213 struct device_attribute *attr, const char *buf, size_t len)
2214{
2215 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2216 unsigned long new;
2217 int ret = -EINVAL;
2218
2219 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2220 goto err_out;
2221
2222 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2223 goto err_out;
2224
2225 if (strict_strtoul(buf, 2, &new))
2226 goto err_out;
2227
2228 if (!qlcnic_config_bridged_mode(adapter, !!new))
2229 ret = len;
2230
2231err_out:
2232 return ret;
2233}
2234
2235static ssize_t
2236qlcnic_show_bridged_mode(struct device *dev,
2237 struct device_attribute *attr, char *buf)
2238{
2239 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2240 int bridged_mode = 0;
2241
2242 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2243 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2244
2245 return sprintf(buf, "%d\n", bridged_mode);
2246}
2247
2248static struct device_attribute dev_attr_bridged_mode = {
2249 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2250 .show = qlcnic_show_bridged_mode,
2251 .store = qlcnic_store_bridged_mode,
2252};
2253
2254static ssize_t
2255qlcnic_store_diag_mode(struct device *dev,
2256 struct device_attribute *attr, const char *buf, size_t len)
2257{
2258 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2259 unsigned long new;
2260
2261 if (strict_strtoul(buf, 2, &new))
2262 return -EINVAL;
2263
2264 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2265 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2266
2267 return len;
2268}
2269
2270static ssize_t
2271qlcnic_show_diag_mode(struct device *dev,
2272 struct device_attribute *attr, char *buf)
2273{
2274 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2275
2276 return sprintf(buf, "%d\n",
2277 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2278}
2279
2280static struct device_attribute dev_attr_diag_mode = {
2281 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2282 .show = qlcnic_show_diag_mode,
2283 .store = qlcnic_store_diag_mode,
2284};
2285
2286static int
2287qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2288 loff_t offset, size_t size)
2289{
2290 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2291 return -EIO;
2292
2293 if ((size != 4) || (offset & 0x3))
2294 return -EINVAL;
2295
2296 if (offset < QLCNIC_PCI_CRBSPACE)
2297 return -EINVAL;
2298
2299 return 0;
2300}
2301
2302static ssize_t
2303qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2304 char *buf, loff_t offset, size_t size)
2305{
2306 struct device *dev = container_of(kobj, struct device, kobj);
2307 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2308 u32 data;
2309 int ret;
2310
2311 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2312 if (ret != 0)
2313 return ret;
2314
2315 data = QLCRD32(adapter, offset);
2316 memcpy(buf, &data, size);
2317 return size;
2318}
2319
2320static ssize_t
2321qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2322 char *buf, loff_t offset, size_t size)
2323{
2324 struct device *dev = container_of(kobj, struct device, kobj);
2325 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2326 u32 data;
2327 int ret;
2328
2329 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2330 if (ret != 0)
2331 return ret;
2332
2333 memcpy(&data, buf, size);
2334 QLCWR32(adapter, offset, data);
2335 return size;
2336}
2337
2338static int
2339qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2340 loff_t offset, size_t size)
2341{
2342 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2343 return -EIO;
2344
2345 if ((size != 8) || (offset & 0x7))
2346 return -EIO;
2347
2348 return 0;
2349}
2350
2351static ssize_t
2352qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2353 char *buf, loff_t offset, size_t size)
2354{
2355 struct device *dev = container_of(kobj, struct device, kobj);
2356 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2357 u64 data;
2358 int ret;
2359
2360 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2361 if (ret != 0)
2362 return ret;
2363
2364 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2365 return -EIO;
2366
2367 memcpy(buf, &data, size);
2368
2369 return size;
2370}
2371
2372static ssize_t
2373qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2374 char *buf, loff_t offset, size_t size)
2375{
2376 struct device *dev = container_of(kobj, struct device, kobj);
2377 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2378 u64 data;
2379 int ret;
2380
2381 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2382 if (ret != 0)
2383 return ret;
2384
2385 memcpy(&data, buf, size);
2386
2387 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2388 return -EIO;
2389
2390 return size;
2391}
2392
2393
2394static struct bin_attribute bin_attr_crb = {
2395 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2396 .size = 0,
2397 .read = qlcnic_sysfs_read_crb,
2398 .write = qlcnic_sysfs_write_crb,
2399};
2400
2401static struct bin_attribute bin_attr_mem = {
2402 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2403 .size = 0,
2404 .read = qlcnic_sysfs_read_mem,
2405 .write = qlcnic_sysfs_write_mem,
2406};
2407
2408static void
2409qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2410{
2411 struct device *dev = &adapter->pdev->dev;
2412
2413 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2414 if (device_create_file(dev, &dev_attr_bridged_mode))
2415 dev_warn(dev,
2416 "failed to create bridged_mode sysfs entry\n");
2417}
2418
2419static void
2420qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2421{
2422 struct device *dev = &adapter->pdev->dev;
2423
2424 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2425 device_remove_file(dev, &dev_attr_bridged_mode);
2426}
2427
2428static void
2429qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2430{
2431 struct device *dev = &adapter->pdev->dev;
2432
2433 if (device_create_file(dev, &dev_attr_diag_mode))
2434 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2435 if (device_create_bin_file(dev, &bin_attr_crb))
2436 dev_info(dev, "failed to create crb sysfs entry\n");
2437 if (device_create_bin_file(dev, &bin_attr_mem))
2438 dev_info(dev, "failed to create mem sysfs entry\n");
2439}
2440
2441
2442static void
2443qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2444{
2445 struct device *dev = &adapter->pdev->dev;
2446
2447 device_remove_file(dev, &dev_attr_diag_mode);
2448 device_remove_bin_file(dev, &bin_attr_crb);
2449 device_remove_bin_file(dev, &bin_attr_mem);
2450}
2451
2452#ifdef CONFIG_INET
2453
2454#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2455
2456static int
2457qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2458{
2459 if (adapter->ahw.cut_through)
2460 return 0;
2461
2462 return 1;
2463}
2464
2465static void
2466qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2467{
2468 struct in_device *indev;
2469 struct qlcnic_adapter *adapter = netdev_priv(dev);
2470
2471 if (!qlcnic_destip_supported(adapter))
2472 return;
2473
2474 indev = in_dev_get(dev);
2475 if (!indev)
2476 return;
2477
2478 for_ifa(indev) {
2479 switch (event) {
2480 case NETDEV_UP:
2481 qlcnic_config_ipaddr(adapter,
2482 ifa->ifa_address, QLCNIC_IP_UP);
2483 break;
2484 case NETDEV_DOWN:
2485 qlcnic_config_ipaddr(adapter,
2486 ifa->ifa_address, QLCNIC_IP_DOWN);
2487 break;
2488 default:
2489 break;
2490 }
2491 } endfor_ifa(indev);
2492
2493 in_dev_put(indev);
2494 return;
2495}
2496
2497static int qlcnic_netdev_event(struct notifier_block *this,
2498 unsigned long event, void *ptr)
2499{
2500 struct qlcnic_adapter *adapter;
2501 struct net_device *dev = (struct net_device *)ptr;
2502
2503recheck:
2504 if (dev == NULL)
2505 goto done;
2506
2507 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2508 dev = vlan_dev_real_dev(dev);
2509 goto recheck;
2510 }
2511
2512 if (!is_qlcnic_netdev(dev))
2513 goto done;
2514
2515 adapter = netdev_priv(dev);
2516
2517 if (!adapter)
2518 goto done;
2519
2520 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2521 goto done;
2522
2523 qlcnic_config_indev_addr(dev, event);
2524done:
2525 return NOTIFY_DONE;
2526}
2527
2528static int
2529qlcnic_inetaddr_event(struct notifier_block *this,
2530 unsigned long event, void *ptr)
2531{
2532 struct qlcnic_adapter *adapter;
2533 struct net_device *dev;
2534
2535 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2536
2537 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2538
2539recheck:
2540 if (dev == NULL || !netif_running(dev))
2541 goto done;
2542
2543 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2544 dev = vlan_dev_real_dev(dev);
2545 goto recheck;
2546 }
2547
2548 if (!is_qlcnic_netdev(dev))
2549 goto done;
2550
2551 adapter = netdev_priv(dev);
2552
2553 if (!adapter || !qlcnic_destip_supported(adapter))
2554 goto done;
2555
2556 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2557 goto done;
2558
2559 switch (event) {
2560 case NETDEV_UP:
2561 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2562 break;
2563 case NETDEV_DOWN:
2564 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2565 break;
2566 default:
2567 break;
2568 }
2569
2570done:
2571 return NOTIFY_DONE;
2572}
2573
2574static struct notifier_block qlcnic_netdev_cb = {
2575 .notifier_call = qlcnic_netdev_event,
2576};
2577
2578static struct notifier_block qlcnic_inetaddr_cb = {
2579 .notifier_call = qlcnic_inetaddr_event,
2580};
2581#else
2582static void
2583qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2584{ }
2585#endif
2586
2587static struct pci_driver qlcnic_driver = {
2588 .name = qlcnic_driver_name,
2589 .id_table = qlcnic_pci_tbl,
2590 .probe = qlcnic_probe,
2591 .remove = __devexit_p(qlcnic_remove),
2592#ifdef CONFIG_PM
2593 .suspend = qlcnic_suspend,
2594 .resume = qlcnic_resume,
2595#endif
2596 .shutdown = qlcnic_shutdown
2597};
2598
2599static int __init qlcnic_init_module(void)
2600{
2601
2602 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2603
2604#ifdef CONFIG_INET
2605 register_netdevice_notifier(&qlcnic_netdev_cb);
2606 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2607#endif
2608
2609
2610 return pci_register_driver(&qlcnic_driver);
2611}
2612
2613module_init(qlcnic_init_module);
2614
2615static void __exit qlcnic_exit_module(void)
2616{
2617
2618 pci_unregister_driver(&qlcnic_driver);
2619
2620#ifdef CONFIG_INET
2621 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2622 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2623#endif
2624}
2625
2626module_exit(qlcnic_exit_module);