blob: 585be044ebbbdac47d88b0bb76eeb7e932ea027a [file] [log] [blame]
Ron Mercer5a4faa872006-07-25 00:40:21 -07001/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dmapool.h>
18#include <linux/mempool.h>
19#include <linux/spinlock.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
Ron Mercerbd36b0a2007-01-03 16:26:08 -080025#include <linux/in.h>
Ron Mercer5a4faa872006-07-25 00:40:21 -070026#include <linux/if_arp.h>
27#include <linux/if_ether.h>
28#include <linux/netdevice.h>
29#include <linux/etherdevice.h>
30#include <linux/ethtool.h>
31#include <linux/skbuff.h>
32#include <linux/rtnetlink.h>
33#include <linux/if_vlan.h>
34#include <linux/init.h>
35#include <linux/delay.h>
36#include <linux/mm.h>
37
38#include "qla3xxx.h"
39
40#define DRV_NAME "qla3xxx"
41#define DRV_STRING "QLogic ISP3XXX Network Driver"
Ron Mercer201f27e2007-03-26 13:43:03 -070042#define DRV_VERSION "v2.03.00-k4"
Ron Mercer5a4faa872006-07-25 00:40:21 -070043#define PFX DRV_NAME " "
44
45static const char ql3xxx_driver_name[] = DRV_NAME;
46static const char ql3xxx_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("QLogic Corporation");
49MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg
54 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
55 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
56
57static int debug = -1; /* defaults above */
58module_param(debug, int, 0);
59MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60
61static int msi;
62module_param(msi, int, 0);
63MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
64
65static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
Ron Mercerbd36b0a2007-01-03 16:26:08 -080067 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
Ron Mercer5a4faa872006-07-25 00:40:21 -070068 /* required last entry */
69 {0,}
70};
71
72MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
73
74/*
Ron Mercer3efedf22007-03-26 12:43:52 -070075 * These are the known PHY's which are used
76 */
77typedef enum {
78 PHY_TYPE_UNKNOWN = 0,
79 PHY_VITESSE_VSC8211,
80 PHY_AGERE_ET1011C,
81 MAX_PHY_DEV_TYPES
82} PHY_DEVICE_et;
83
84typedef struct {
85 PHY_DEVICE_et phyDevice;
86 u32 phyIdOUI;
87 u16 phyIdModel;
88 char *name;
89} PHY_DEVICE_INFO_t;
90
Adrian Bunkb1fc1fa2007-03-31 22:55:40 +020091static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
Ron Mercer3efedf22007-03-26 12:43:52 -070092 {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
93 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
94 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
95};
96
97
98/*
Ron Mercer5a4faa872006-07-25 00:40:21 -070099 * Caller must take hw_lock.
100 */
101static int ql_sem_spinlock(struct ql3_adapter *qdev,
102 u32 sem_mask, u32 sem_bits)
103{
104 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
105 u32 value;
106 unsigned int seconds = 3;
107
108 do {
109 writel((sem_mask | sem_bits),
110 &port_regs->CommonRegs.semaphoreReg);
111 value = readl(&port_regs->CommonRegs.semaphoreReg);
112 if ((value & (sem_mask >> 16)) == sem_bits)
113 return 0;
114 ssleep(1);
115 } while(--seconds);
116 return -1;
117}
118
119static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
120{
121 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
122 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
123 readl(&port_regs->CommonRegs.semaphoreReg);
124}
125
126static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
127{
128 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
129 u32 value;
130
131 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
132 value = readl(&port_regs->CommonRegs.semaphoreReg);
133 return ((value & (sem_mask >> 16)) == sem_bits);
134}
135
136/*
137 * Caller holds hw_lock.
138 */
139static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
140{
141 int i = 0;
142
143 while (1) {
144 if (!ql_sem_lock(qdev,
145 QL_DRVR_SEM_MASK,
146 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
147 * 2) << 1)) {
148 if (i < 10) {
149 ssleep(1);
150 i++;
151 } else {
152 printk(KERN_ERR PFX "%s: Timed out waiting for "
153 "driver lock...\n",
154 qdev->ndev->name);
155 return 0;
156 }
157 } else {
158 printk(KERN_DEBUG PFX
159 "%s: driver lock acquired.\n",
160 qdev->ndev->name);
161 return 1;
162 }
163 }
164}
165
166static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
167{
168 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
169
170 writel(((ISP_CONTROL_NP_MASK << 16) | page),
171 &port_regs->CommonRegs.ispControlStatus);
172 readl(&port_regs->CommonRegs.ispControlStatus);
173 qdev->current_page = page;
174}
175
176static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
177 u32 __iomem * reg)
178{
179 u32 value;
180 unsigned long hw_flags;
181
182 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
183 value = readl(reg);
184 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
185
186 return value;
187}
188
189static u32 ql_read_common_reg(struct ql3_adapter *qdev,
190 u32 __iomem * reg)
191{
192 return readl(reg);
193}
194
195static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
196{
197 u32 value;
198 unsigned long hw_flags;
199
200 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
201
202 if (qdev->current_page != 0)
203 ql_set_register_page(qdev,0);
204 value = readl(reg);
205
206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
207 return value;
208}
209
210static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
211{
212 if (qdev->current_page != 0)
213 ql_set_register_page(qdev,0);
214 return readl(reg);
215}
216
217static void ql_write_common_reg_l(struct ql3_adapter *qdev,
Al Viroee111d12006-09-25 02:53:53 +0100218 u32 __iomem *reg, u32 value)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700219{
220 unsigned long hw_flags;
221
222 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
Al Viroee111d12006-09-25 02:53:53 +0100223 writel(value, reg);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700224 readl(reg);
225 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
226 return;
227}
228
229static void ql_write_common_reg(struct ql3_adapter *qdev,
Al Viroee111d12006-09-25 02:53:53 +0100230 u32 __iomem *reg, u32 value)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700231{
Al Viroee111d12006-09-25 02:53:53 +0100232 writel(value, reg);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700233 readl(reg);
234 return;
235}
236
Ron Mercer80b02e52007-01-03 16:26:07 -0800237static void ql_write_nvram_reg(struct ql3_adapter *qdev,
238 u32 __iomem *reg, u32 value)
239{
240 writel(value, reg);
241 readl(reg);
242 udelay(1);
243 return;
244}
245
Ron Mercer5a4faa872006-07-25 00:40:21 -0700246static void ql_write_page0_reg(struct ql3_adapter *qdev,
Al Viroee111d12006-09-25 02:53:53 +0100247 u32 __iomem *reg, u32 value)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700248{
249 if (qdev->current_page != 0)
250 ql_set_register_page(qdev,0);
Al Viroee111d12006-09-25 02:53:53 +0100251 writel(value, reg);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700252 readl(reg);
253 return;
254}
255
256/*
257 * Caller holds hw_lock. Only called during init.
258 */
259static void ql_write_page1_reg(struct ql3_adapter *qdev,
Al Viroee111d12006-09-25 02:53:53 +0100260 u32 __iomem *reg, u32 value)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700261{
262 if (qdev->current_page != 1)
263 ql_set_register_page(qdev,1);
Al Viroee111d12006-09-25 02:53:53 +0100264 writel(value, reg);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700265 readl(reg);
266 return;
267}
268
269/*
270 * Caller holds hw_lock. Only called during init.
271 */
272static void ql_write_page2_reg(struct ql3_adapter *qdev,
Al Viroee111d12006-09-25 02:53:53 +0100273 u32 __iomem *reg, u32 value)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700274{
275 if (qdev->current_page != 2)
276 ql_set_register_page(qdev,2);
Al Viroee111d12006-09-25 02:53:53 +0100277 writel(value, reg);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700278 readl(reg);
279 return;
280}
281
282static void ql_disable_interrupts(struct ql3_adapter *qdev)
283{
284 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
285
286 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
287 (ISP_IMR_ENABLE_INT << 16));
288
289}
290
291static void ql_enable_interrupts(struct ql3_adapter *qdev)
292{
293 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
294
295 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
296 ((0xff << 16) | ISP_IMR_ENABLE_INT));
297
298}
299
300static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
301 struct ql_rcv_buf_cb *lrg_buf_cb)
302{
Benjamin Li0f8ab892007-02-26 11:06:40 -0800303 dma_addr_t map;
304 int err;
Ron Mercer5a4faa872006-07-25 00:40:21 -0700305 lrg_buf_cb->next = NULL;
306
307 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
308 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
309 } else {
310 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
311 qdev->lrg_buf_free_tail = lrg_buf_cb;
312 }
313
314 if (!lrg_buf_cb->skb) {
Benjamin Licd238fa2007-02-26 11:06:33 -0800315 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
316 qdev->lrg_buffer_len);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700317 if (unlikely(!lrg_buf_cb->skb)) {
Benjamin Licd238fa2007-02-26 11:06:33 -0800318 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
Ron Mercer5a4faa872006-07-25 00:40:21 -0700319 qdev->ndev->name);
320 qdev->lrg_buf_skb_check++;
321 } else {
322 /*
323 * We save some space to copy the ethhdr from first
324 * buffer
325 */
326 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
327 map = pci_map_single(qdev->pdev,
328 lrg_buf_cb->skb->data,
329 qdev->lrg_buffer_len -
330 QL_HEADER_SPACE,
331 PCI_DMA_FROMDEVICE);
Benjamin Li0f8ab892007-02-26 11:06:40 -0800332 err = pci_dma_mapping_error(map);
333 if(err) {
334 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
335 qdev->ndev->name, err);
336 dev_kfree_skb(lrg_buf_cb->skb);
337 lrg_buf_cb->skb = NULL;
338
339 qdev->lrg_buf_skb_check++;
340 return;
341 }
342
Ron Mercer5a4faa872006-07-25 00:40:21 -0700343 lrg_buf_cb->buf_phy_addr_low =
344 cpu_to_le32(LS_64BITS(map));
345 lrg_buf_cb->buf_phy_addr_high =
346 cpu_to_le32(MS_64BITS(map));
347 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
348 pci_unmap_len_set(lrg_buf_cb, maplen,
349 qdev->lrg_buffer_len -
350 QL_HEADER_SPACE);
351 }
352 }
353
354 qdev->lrg_buf_free_count++;
355}
356
357static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
358 *qdev)
359{
360 struct ql_rcv_buf_cb *lrg_buf_cb;
361
362 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
363 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
364 qdev->lrg_buf_free_tail = NULL;
365 qdev->lrg_buf_free_count--;
366 }
367
368 return lrg_buf_cb;
369}
370
371static u32 addrBits = EEPROM_NO_ADDR_BITS;
372static u32 dataBits = EEPROM_NO_DATA_BITS;
373
374static void fm93c56a_deselect(struct ql3_adapter *qdev);
375static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
376 unsigned short *value);
377
378/*
379 * Caller holds hw_lock.
380 */
381static void fm93c56a_select(struct ql3_adapter *qdev)
382{
383 struct ql3xxx_port_registers __iomem *port_regs =
384 qdev->mem_map_registers;
385
386 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
Ron Mercer80b02e52007-01-03 16:26:07 -0800387 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700388 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
Ron Mercer80b02e52007-01-03 16:26:07 -0800389 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700390 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
391}
392
393/*
394 * Caller holds hw_lock.
395 */
396static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
397{
398 int i;
399 u32 mask;
400 u32 dataBit;
401 u32 previousBit;
402 struct ql3xxx_port_registers __iomem *port_regs =
403 qdev->mem_map_registers;
404
405 /* Clock in a zero, then do the start bit */
Ron Mercer80b02e52007-01-03 16:26:07 -0800406 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700407 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
408 AUBURN_EEPROM_DO_1);
Ron Mercer80b02e52007-01-03 16:26:07 -0800409 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700410 ISP_NVRAM_MASK | qdev->
411 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
412 AUBURN_EEPROM_CLK_RISE);
Ron Mercer80b02e52007-01-03 16:26:07 -0800413 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700414 ISP_NVRAM_MASK | qdev->
415 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
416 AUBURN_EEPROM_CLK_FALL);
417
418 mask = 1 << (FM93C56A_CMD_BITS - 1);
419 /* Force the previous data bit to be different */
420 previousBit = 0xffff;
421 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
422 dataBit =
423 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
424 if (previousBit != dataBit) {
425 /*
426 * If the bit changed, then change the DO state to
427 * match
428 */
Ron Mercer80b02e52007-01-03 16:26:07 -0800429 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700430 &port_regs->CommonRegs.
431 serialPortInterfaceReg,
432 ISP_NVRAM_MASK | qdev->
433 eeprom_cmd_data | dataBit);
434 previousBit = dataBit;
435 }
Ron Mercer80b02e52007-01-03 16:26:07 -0800436 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700437 &port_regs->CommonRegs.
438 serialPortInterfaceReg,
439 ISP_NVRAM_MASK | qdev->
440 eeprom_cmd_data | dataBit |
441 AUBURN_EEPROM_CLK_RISE);
Ron Mercer80b02e52007-01-03 16:26:07 -0800442 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700443 &port_regs->CommonRegs.
444 serialPortInterfaceReg,
445 ISP_NVRAM_MASK | qdev->
446 eeprom_cmd_data | dataBit |
447 AUBURN_EEPROM_CLK_FALL);
448 cmd = cmd << 1;
449 }
450
451 mask = 1 << (addrBits - 1);
452 /* Force the previous data bit to be different */
453 previousBit = 0xffff;
454 for (i = 0; i < addrBits; i++) {
455 dataBit =
456 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
457 AUBURN_EEPROM_DO_0;
458 if (previousBit != dataBit) {
459 /*
460 * If the bit changed, then change the DO state to
461 * match
462 */
Ron Mercer80b02e52007-01-03 16:26:07 -0800463 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700464 &port_regs->CommonRegs.
465 serialPortInterfaceReg,
466 ISP_NVRAM_MASK | qdev->
467 eeprom_cmd_data | dataBit);
468 previousBit = dataBit;
469 }
Ron Mercer80b02e52007-01-03 16:26:07 -0800470 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700471 &port_regs->CommonRegs.
472 serialPortInterfaceReg,
473 ISP_NVRAM_MASK | qdev->
474 eeprom_cmd_data | dataBit |
475 AUBURN_EEPROM_CLK_RISE);
Ron Mercer80b02e52007-01-03 16:26:07 -0800476 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700477 &port_regs->CommonRegs.
478 serialPortInterfaceReg,
479 ISP_NVRAM_MASK | qdev->
480 eeprom_cmd_data | dataBit |
481 AUBURN_EEPROM_CLK_FALL);
482 eepromAddr = eepromAddr << 1;
483 }
484}
485
486/*
487 * Caller holds hw_lock.
488 */
489static void fm93c56a_deselect(struct ql3_adapter *qdev)
490{
491 struct ql3xxx_port_registers __iomem *port_regs =
492 qdev->mem_map_registers;
493 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
Ron Mercer80b02e52007-01-03 16:26:07 -0800494 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
496}
497
498/*
499 * Caller holds hw_lock.
500 */
501static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
502{
503 int i;
504 u32 data = 0;
505 u32 dataBit;
506 struct ql3xxx_port_registers __iomem *port_regs =
507 qdev->mem_map_registers;
508
509 /* Read the data bits */
510 /* The first bit is a dummy. Clock right over it. */
511 for (i = 0; i < dataBits; i++) {
Ron Mercer80b02e52007-01-03 16:26:07 -0800512 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700513 &port_regs->CommonRegs.
514 serialPortInterfaceReg,
515 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
516 AUBURN_EEPROM_CLK_RISE);
Ron Mercer80b02e52007-01-03 16:26:07 -0800517 ql_write_nvram_reg(qdev,
Ron Mercer5a4faa872006-07-25 00:40:21 -0700518 &port_regs->CommonRegs.
519 serialPortInterfaceReg,
520 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
521 AUBURN_EEPROM_CLK_FALL);
522 dataBit =
523 (ql_read_common_reg
524 (qdev,
525 &port_regs->CommonRegs.
526 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
527 data = (data << 1) | dataBit;
528 }
529 *value = (u16) data;
530}
531
532/*
533 * Caller holds hw_lock.
534 */
535static void eeprom_readword(struct ql3_adapter *qdev,
536 u32 eepromAddr, unsigned short *value)
537{
538 fm93c56a_select(qdev);
539 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
540 fm93c56a_datain(qdev, value);
541 fm93c56a_deselect(qdev);
542}
543
544static void ql_swap_mac_addr(u8 * macAddress)
545{
546#ifdef __BIG_ENDIAN
547 u8 temp;
548 temp = macAddress[0];
549 macAddress[0] = macAddress[1];
550 macAddress[1] = temp;
551 temp = macAddress[2];
552 macAddress[2] = macAddress[3];
553 macAddress[3] = temp;
554 temp = macAddress[4];
555 macAddress[4] = macAddress[5];
556 macAddress[5] = temp;
557#endif
558}
559
560static int ql_get_nvram_params(struct ql3_adapter *qdev)
561{
562 u16 *pEEPROMData;
563 u16 checksum = 0;
564 u32 index;
565 unsigned long hw_flags;
566
567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
568
569 pEEPROMData = (u16 *) & qdev->nvram_data;
570 qdev->eeprom_cmd_data = 0;
571 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
572 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
573 2) << 10)) {
574 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
575 __func__);
576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
577 return -1;
578 }
579
580 for (index = 0; index < EEPROM_SIZE; index++) {
581 eeprom_readword(qdev, index, pEEPROMData);
582 checksum += *pEEPROMData;
583 pEEPROMData++;
584 }
585 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
586
587 if (checksum != 0) {
588 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
589 qdev->ndev->name, checksum);
590 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
591 return -1;
592 }
593
594 /*
595 * We have a problem with endianness for the MAC addresses
596 * and the two 8-bit values version, and numPorts. We
597 * have to swap them on big endian systems.
598 */
599 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
600 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
601 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
602 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
603 pEEPROMData = (u16 *) & qdev->nvram_data.version;
604 *pEEPROMData = le16_to_cpu(*pEEPROMData);
605
606 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
607 return checksum;
608}
609
610static const u32 PHYAddr[2] = {
611 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
612};
613
614static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
615{
616 struct ql3xxx_port_registers __iomem *port_regs =
617 qdev->mem_map_registers;
618 u32 temp;
619 int count = 1000;
620
621 while (count) {
622 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
623 if (!(temp & MAC_MII_STATUS_BSY))
624 return 0;
625 udelay(10);
626 count--;
627 }
628 return -1;
629}
630
631static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
632{
633 struct ql3xxx_port_registers __iomem *port_regs =
634 qdev->mem_map_registers;
635 u32 scanControl;
636
637 if (qdev->numPorts > 1) {
638 /* Auto scan will cycle through multiple ports */
639 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
640 } else {
641 scanControl = MAC_MII_CONTROL_SC;
642 }
643
644 /*
645 * Scan register 1 of PHY/PETBI,
646 * Set up to scan both devices
647 * The autoscan starts from the first register, completes
648 * the last one before rolling over to the first
649 */
650 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
651 PHYAddr[0] | MII_SCAN_REGISTER);
652
653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
654 (scanControl) |
655 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
656}
657
658static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
659{
660 u8 ret;
661 struct ql3xxx_port_registers __iomem *port_regs =
662 qdev->mem_map_registers;
663
664 /* See if scan mode is enabled before we turn it off */
665 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
666 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
667 /* Scan is enabled */
668 ret = 1;
669 } else {
670 /* Scan is disabled */
671 ret = 0;
672 }
673
674 /*
675 * When disabling scan mode you must first change the MII register
676 * address
677 */
678 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
679 PHYAddr[0] | MII_SCAN_REGISTER);
680
681 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
682 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
683 MAC_MII_CONTROL_RC) << 16));
684
685 return ret;
686}
687
688static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
Ron Mercer3efedf22007-03-26 12:43:52 -0700689 u16 regAddr, u16 value, u32 phyAddr)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700690{
691 struct ql3xxx_port_registers __iomem *port_regs =
692 qdev->mem_map_registers;
693 u8 scanWasEnabled;
694
695 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
696
697 if (ql_wait_for_mii_ready(qdev)) {
698 if (netif_msg_link(qdev))
699 printk(KERN_WARNING PFX
700 "%s Timed out waiting for management port to "
701 "get free before issuing command.\n",
702 qdev->ndev->name);
703 return -1;
704 }
705
706 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
Ron Mercer3efedf22007-03-26 12:43:52 -0700707 phyAddr | regAddr);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700708
709 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
710
711 /* Wait for write to complete 9/10/04 SJP */
712 if (ql_wait_for_mii_ready(qdev)) {
713 if (netif_msg_link(qdev))
714 printk(KERN_WARNING PFX
715 "%s: Timed out waiting for management port to"
716 "get free before issuing command.\n",
717 qdev->ndev->name);
718 return -1;
719 }
720
721 if (scanWasEnabled)
722 ql_mii_enable_scan_mode(qdev);
723
724 return 0;
725}
726
727static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
Ron Mercer3efedf22007-03-26 12:43:52 -0700728 u16 * value, u32 phyAddr)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700729{
730 struct ql3xxx_port_registers __iomem *port_regs =
731 qdev->mem_map_registers;
732 u8 scanWasEnabled;
733 u32 temp;
734
735 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
736
737 if (ql_wait_for_mii_ready(qdev)) {
738 if (netif_msg_link(qdev))
739 printk(KERN_WARNING PFX
740 "%s: Timed out waiting for management port to "
741 "get free before issuing command.\n",
742 qdev->ndev->name);
743 return -1;
744 }
745
746 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
Ron Mercer3efedf22007-03-26 12:43:52 -0700747 phyAddr | regAddr);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700748
749 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
750 (MAC_MII_CONTROL_RC << 16));
751
752 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
753 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
754
755 /* Wait for the read to complete */
756 if (ql_wait_for_mii_ready(qdev)) {
757 if (netif_msg_link(qdev))
758 printk(KERN_WARNING PFX
759 "%s: Timed out waiting for management port to "
760 "get free after issuing command.\n",
761 qdev->ndev->name);
762 return -1;
763 }
764
765 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
766 *value = (u16) temp;
767
768 if (scanWasEnabled)
769 ql_mii_enable_scan_mode(qdev);
770
771 return 0;
772}
773
774static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
775{
776 struct ql3xxx_port_registers __iomem *port_regs =
777 qdev->mem_map_registers;
778
779 ql_mii_disable_scan_mode(qdev);
780
781 if (ql_wait_for_mii_ready(qdev)) {
782 if (netif_msg_link(qdev))
783 printk(KERN_WARNING PFX
784 "%s: Timed out waiting for management port to "
785 "get free before issuing command.\n",
786 qdev->ndev->name);
787 return -1;
788 }
789
790 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
791 qdev->PHYAddr | regAddr);
792
793 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
794
795 /* Wait for write to complete. */
796 if (ql_wait_for_mii_ready(qdev)) {
797 if (netif_msg_link(qdev))
798 printk(KERN_WARNING PFX
799 "%s: Timed out waiting for management port to "
800 "get free before issuing command.\n",
801 qdev->ndev->name);
802 return -1;
803 }
804
805 ql_mii_enable_scan_mode(qdev);
806
807 return 0;
808}
809
810static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
811{
812 u32 temp;
813 struct ql3xxx_port_registers __iomem *port_regs =
814 qdev->mem_map_registers;
815
816 ql_mii_disable_scan_mode(qdev);
817
818 if (ql_wait_for_mii_ready(qdev)) {
819 if (netif_msg_link(qdev))
820 printk(KERN_WARNING PFX
821 "%s: Timed out waiting for management port to "
822 "get free before issuing command.\n",
823 qdev->ndev->name);
824 return -1;
825 }
826
827 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
828 qdev->PHYAddr | regAddr);
829
830 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
831 (MAC_MII_CONTROL_RC << 16));
832
833 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
834 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
835
836 /* Wait for the read to complete */
837 if (ql_wait_for_mii_ready(qdev)) {
838 if (netif_msg_link(qdev))
839 printk(KERN_WARNING PFX
840 "%s: Timed out waiting for management port to "
841 "get free before issuing command.\n",
842 qdev->ndev->name);
843 return -1;
844 }
845
846 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
847 *value = (u16) temp;
848
849 ql_mii_enable_scan_mode(qdev);
850
851 return 0;
852}
853
854static void ql_petbi_reset(struct ql3_adapter *qdev)
855{
856 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
857}
858
859static void ql_petbi_start_neg(struct ql3_adapter *qdev)
860{
861 u16 reg;
862
863 /* Enable Auto-negotiation sense */
864 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
865 reg |= PETBI_TBI_AUTO_SENSE;
866 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
867
868 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
869 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
870
871 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
872 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
873 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
874
875}
876
Ron Mercer3efedf22007-03-26 12:43:52 -0700877static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700878{
879 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
Ron Mercer3efedf22007-03-26 12:43:52 -0700880 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700881}
882
Ron Mercer3efedf22007-03-26 12:43:52 -0700883static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700884{
885 u16 reg;
886
887 /* Enable Auto-negotiation sense */
Ron Mercer3efedf22007-03-26 12:43:52 -0700888 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
889 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700890 reg |= PETBI_TBI_AUTO_SENSE;
Ron Mercer3efedf22007-03-26 12:43:52 -0700891 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
892 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700893
894 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
Ron Mercer3efedf22007-03-26 12:43:52 -0700895 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
896 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700897
898 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
899 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
900 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
Ron Mercer3efedf22007-03-26 12:43:52 -0700901 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700902}
903
904static void ql_petbi_init(struct ql3_adapter *qdev)
905{
906 ql_petbi_reset(qdev);
907 ql_petbi_start_neg(qdev);
908}
909
Ron Mercer3efedf22007-03-26 12:43:52 -0700910static void ql_petbi_init_ex(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -0700911{
Ron Mercer3efedf22007-03-26 12:43:52 -0700912 ql_petbi_reset_ex(qdev);
913 ql_petbi_start_neg_ex(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -0700914}
915
916static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
917{
918 u16 reg;
919
920 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
921 return 0;
922
923 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
924}
925
Ron Mercer3efedf22007-03-26 12:43:52 -0700926static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
927{
928 printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
929 /* power down device bit 11 = 1 */
930 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
931 /* enable diagnostic mode bit 2 = 1 */
932 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
933 /* 1000MB amplitude adjust (see Agere errata) */
934 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
935 /* 1000MB amplitude adjust (see Agere errata) */
936 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
937 /* 100MB amplitude adjust (see Agere errata) */
938 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
939 /* 100MB amplitude adjust (see Agere errata) */
940 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
941 /* 10MB amplitude adjust (see Agere errata) */
942 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
943 /* 10MB amplitude adjust (see Agere errata) */
944 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
945 /* point to hidden reg 0x2806 */
946 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
947 /* Write new PHYAD w/bit 5 set */
948 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
949 /*
950 * Disable diagnostic mode bit 2 = 0
951 * Power up device bit 11 = 0
952 * Link up (on) and activity (blink)
953 */
954 ql_mii_write_reg(qdev, 0x12, 0x840a);
955 ql_mii_write_reg(qdev, 0x00, 0x1140);
956 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
957}
958
959static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
960 u16 phyIdReg0, u16 phyIdReg1)
961{
962 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
963 u32 oui;
964 u16 model;
965 int i;
966
967 if (phyIdReg0 == 0xffff) {
968 return result;
969 }
970
971 if (phyIdReg1 == 0xffff) {
972 return result;
973 }
974
975 /* oui is split between two registers */
976 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
977
978 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
979
980 /* Scan table for this PHY */
981 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
982 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
983 {
984 result = PHY_DEVICES[i].phyDevice;
985
986 printk(KERN_INFO "%s: Phy: %s\n",
987 qdev->ndev->name, PHY_DEVICES[i].name);
988
989 break;
990 }
991 }
992
993 return result;
994}
995
Ron Mercer5a4faa872006-07-25 00:40:21 -0700996static int ql_phy_get_speed(struct ql3_adapter *qdev)
997{
998 u16 reg;
999
Ron Mercer3efedf22007-03-26 12:43:52 -07001000 switch(qdev->phyType) {
1001 case PHY_AGERE_ET1011C:
1002 {
1003 if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
1004 return 0;
1005
1006 reg = (reg >> 8) & 3;
1007 break;
1008 }
1009 default:
Ron Mercer5a4faa872006-07-25 00:40:21 -07001010 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1011 return 0;
1012
1013 reg = (((reg & 0x18) >> 3) & 3);
Ron Mercer3efedf22007-03-26 12:43:52 -07001014 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001015
Ron Mercer3efedf22007-03-26 12:43:52 -07001016 switch(reg) {
1017 case 2:
Ron Mercer5a4faa872006-07-25 00:40:21 -07001018 return SPEED_1000;
Ron Mercer3efedf22007-03-26 12:43:52 -07001019 case 1:
Ron Mercer5a4faa872006-07-25 00:40:21 -07001020 return SPEED_100;
Ron Mercer3efedf22007-03-26 12:43:52 -07001021 case 0:
Ron Mercer5a4faa872006-07-25 00:40:21 -07001022 return SPEED_10;
Ron Mercer3efedf22007-03-26 12:43:52 -07001023 default:
Ron Mercer5a4faa872006-07-25 00:40:21 -07001024 return -1;
Ron Mercer3efedf22007-03-26 12:43:52 -07001025 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001026}
1027
1028static int ql_is_full_dup(struct ql3_adapter *qdev)
1029{
1030 u16 reg;
1031
Ron Mercer3efedf22007-03-26 12:43:52 -07001032 switch(qdev->phyType) {
1033 case PHY_AGERE_ET1011C:
1034 {
1035 if (ql_mii_read_reg(qdev, 0x1A, &reg))
1036 return 0;
1037
1038 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1039 }
1040 case PHY_VITESSE_VSC8211:
1041 default:
1042 {
1043 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
1044 return 0;
1045 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
1046 }
1047 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001048}
1049
1050static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
1051{
1052 u16 reg;
1053
1054 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
1055 return 0;
1056
1057 return (reg & PHY_NEG_PAUSE) != 0;
1058}
1059
Ron Mercer3efedf22007-03-26 12:43:52 -07001060static int PHY_Setup(struct ql3_adapter *qdev)
1061{
1062 u16 reg1;
1063 u16 reg2;
1064 bool agereAddrChangeNeeded = false;
1065 u32 miiAddr = 0;
1066 int err;
1067
1068 /* Determine the PHY we are using by reading the ID's */
1069 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
1070 if(err != 0) {
1071 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1072 qdev->ndev->name);
1073 return err;
1074 }
1075
1076 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
1077 if(err != 0) {
1078 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1079 qdev->ndev->name);
1080 return err;
1081 }
1082
1083 /* Check if we have a Agere PHY */
1084 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1085
1086 /* Determine which MII address we should be using
1087 determined by the index of the card */
1088 if (qdev->mac_index == 0) {
1089 miiAddr = MII_AGERE_ADDR_1;
1090 } else {
1091 miiAddr = MII_AGERE_ADDR_2;
1092 }
1093
1094 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
1095 if(err != 0) {
1096 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1097 qdev->ndev->name);
1098 return err;
1099 }
1100
1101 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
1102 if(err != 0) {
1103 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1104 qdev->ndev->name);
1105 return err;
1106 }
1107
1108 /* We need to remember to initialize the Agere PHY */
1109 agereAddrChangeNeeded = true;
1110 }
1111
1112 /* Determine the particular PHY we have on board to apply
1113 PHY specific initializations */
1114 qdev->phyType = getPhyType(qdev, reg1, reg2);
1115
1116 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1117 /* need this here so address gets changed */
1118 phyAgereSpecificInit(qdev, miiAddr);
1119 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1120 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
1121 return -EIO;
1122 }
1123
1124 return 0;
1125}
1126
Ron Mercer5a4faa872006-07-25 00:40:21 -07001127/*
1128 * Caller holds hw_lock.
1129 */
1130static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1131{
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1134 u32 value;
1135
1136 if (enable)
1137 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1138 else
1139 value = (MAC_CONFIG_REG_PE << 16);
1140
1141 if (qdev->mac_index)
1142 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1143 else
1144 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1145}
1146
1147/*
1148 * Caller holds hw_lock.
1149 */
1150static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1151{
1152 struct ql3xxx_port_registers __iomem *port_regs =
1153 qdev->mem_map_registers;
1154 u32 value;
1155
1156 if (enable)
1157 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1158 else
1159 value = (MAC_CONFIG_REG_SR << 16);
1160
1161 if (qdev->mac_index)
1162 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1163 else
1164 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1165}
1166
1167/*
1168 * Caller holds hw_lock.
1169 */
1170static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1171{
1172 struct ql3xxx_port_registers __iomem *port_regs =
1173 qdev->mem_map_registers;
1174 u32 value;
1175
1176 if (enable)
1177 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1178 else
1179 value = (MAC_CONFIG_REG_GM << 16);
1180
1181 if (qdev->mac_index)
1182 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1183 else
1184 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1185}
1186
1187/*
1188 * Caller holds hw_lock.
1189 */
1190static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1191{
1192 struct ql3xxx_port_registers __iomem *port_regs =
1193 qdev->mem_map_registers;
1194 u32 value;
1195
1196 if (enable)
1197 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1198 else
1199 value = (MAC_CONFIG_REG_FD << 16);
1200
1201 if (qdev->mac_index)
1202 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1203 else
1204 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1205}
1206
1207/*
1208 * Caller holds hw_lock.
1209 */
1210static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1211{
1212 struct ql3xxx_port_registers __iomem *port_regs =
1213 qdev->mem_map_registers;
1214 u32 value;
1215
1216 if (enable)
1217 value =
1218 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1219 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1220 else
1221 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1222
1223 if (qdev->mac_index)
1224 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1225 else
1226 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1227}
1228
1229/*
1230 * Caller holds hw_lock.
1231 */
1232static int ql_is_fiber(struct ql3_adapter *qdev)
1233{
1234 struct ql3xxx_port_registers __iomem *port_regs =
1235 qdev->mem_map_registers;
1236 u32 bitToCheck = 0;
1237 u32 temp;
1238
1239 switch (qdev->mac_index) {
1240 case 0:
1241 bitToCheck = PORT_STATUS_SM0;
1242 break;
1243 case 1:
1244 bitToCheck = PORT_STATUS_SM1;
1245 break;
1246 }
1247
1248 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1249 return (temp & bitToCheck) != 0;
1250}
1251
1252static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1253{
1254 u16 reg;
1255 ql_mii_read_reg(qdev, 0x00, &reg);
1256 return (reg & 0x1000) != 0;
1257}
1258
1259/*
1260 * Caller holds hw_lock.
1261 */
1262static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1263{
1264 struct ql3xxx_port_registers __iomem *port_regs =
1265 qdev->mem_map_registers;
1266 u32 bitToCheck = 0;
1267 u32 temp;
1268
1269 switch (qdev->mac_index) {
1270 case 0:
1271 bitToCheck = PORT_STATUS_AC0;
1272 break;
1273 case 1:
1274 bitToCheck = PORT_STATUS_AC1;
1275 break;
1276 }
1277
1278 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1279 if (temp & bitToCheck) {
1280 if (netif_msg_link(qdev))
1281 printk(KERN_INFO PFX
1282 "%s: Auto-Negotiate complete.\n",
1283 qdev->ndev->name);
1284 return 1;
1285 } else {
1286 if (netif_msg_link(qdev))
1287 printk(KERN_WARNING PFX
1288 "%s: Auto-Negotiate incomplete.\n",
1289 qdev->ndev->name);
1290 return 0;
1291 }
1292}
1293
1294/*
1295 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1296 */
1297static int ql_is_neg_pause(struct ql3_adapter *qdev)
1298{
1299 if (ql_is_fiber(qdev))
1300 return ql_is_petbi_neg_pause(qdev);
1301 else
1302 return ql_is_phy_neg_pause(qdev);
1303}
1304
1305static int ql_auto_neg_error(struct ql3_adapter *qdev)
1306{
1307 struct ql3xxx_port_registers __iomem *port_regs =
1308 qdev->mem_map_registers;
1309 u32 bitToCheck = 0;
1310 u32 temp;
1311
1312 switch (qdev->mac_index) {
1313 case 0:
1314 bitToCheck = PORT_STATUS_AE0;
1315 break;
1316 case 1:
1317 bitToCheck = PORT_STATUS_AE1;
1318 break;
1319 }
1320 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1321 return (temp & bitToCheck) != 0;
1322}
1323
1324static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1325{
1326 if (ql_is_fiber(qdev))
1327 return SPEED_1000;
1328 else
1329 return ql_phy_get_speed(qdev);
1330}
1331
1332static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1333{
1334 if (ql_is_fiber(qdev))
1335 return 1;
1336 else
1337 return ql_is_full_dup(qdev);
1338}
1339
1340/*
1341 * Caller holds hw_lock.
1342 */
1343static int ql_link_down_detect(struct ql3_adapter *qdev)
1344{
1345 struct ql3xxx_port_registers __iomem *port_regs =
1346 qdev->mem_map_registers;
1347 u32 bitToCheck = 0;
1348 u32 temp;
1349
1350 switch (qdev->mac_index) {
1351 case 0:
1352 bitToCheck = ISP_CONTROL_LINK_DN_0;
1353 break;
1354 case 1:
1355 bitToCheck = ISP_CONTROL_LINK_DN_1;
1356 break;
1357 }
1358
1359 temp =
1360 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1361 return (temp & bitToCheck) != 0;
1362}
1363
1364/*
1365 * Caller holds hw_lock.
1366 */
1367static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1368{
1369 struct ql3xxx_port_registers __iomem *port_regs =
1370 qdev->mem_map_registers;
1371
1372 switch (qdev->mac_index) {
1373 case 0:
1374 ql_write_common_reg(qdev,
1375 &port_regs->CommonRegs.ispControlStatus,
1376 (ISP_CONTROL_LINK_DN_0) |
1377 (ISP_CONTROL_LINK_DN_0 << 16));
1378 break;
1379
1380 case 1:
1381 ql_write_common_reg(qdev,
1382 &port_regs->CommonRegs.ispControlStatus,
1383 (ISP_CONTROL_LINK_DN_1) |
1384 (ISP_CONTROL_LINK_DN_1 << 16));
1385 break;
1386
1387 default:
1388 return 1;
1389 }
1390
1391 return 0;
1392}
1393
1394/*
1395 * Caller holds hw_lock.
1396 */
Ron Mercer3efedf22007-03-26 12:43:52 -07001397static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -07001398{
1399 struct ql3xxx_port_registers __iomem *port_regs =
1400 qdev->mem_map_registers;
1401 u32 bitToCheck = 0;
1402 u32 temp;
1403
Ron Mercer3efedf22007-03-26 12:43:52 -07001404 switch (qdev->mac_index) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07001405 case 0:
1406 bitToCheck = PORT_STATUS_F1_ENABLED;
1407 break;
1408 case 1:
1409 bitToCheck = PORT_STATUS_F3_ENABLED;
1410 break;
1411 default:
1412 break;
1413 }
1414
1415 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1416 if (temp & bitToCheck) {
1417 if (netif_msg_link(qdev))
1418 printk(KERN_DEBUG PFX
1419 "%s: is not link master.\n", qdev->ndev->name);
1420 return 0;
1421 } else {
1422 if (netif_msg_link(qdev))
1423 printk(KERN_DEBUG PFX
1424 "%s: is link master.\n", qdev->ndev->name);
1425 return 1;
1426 }
1427}
1428
Ron Mercer3efedf22007-03-26 12:43:52 -07001429static void ql_phy_reset_ex(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -07001430{
Ron Mercer3efedf22007-03-26 12:43:52 -07001431 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1432 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001433}
1434
Ron Mercer3efedf22007-03-26 12:43:52 -07001435static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -07001436{
1437 u16 reg;
Ron Mercer3efedf22007-03-26 12:43:52 -07001438 u16 portConfiguration;
Ron Mercer5a4faa872006-07-25 00:40:21 -07001439
Ron Mercer3efedf22007-03-26 12:43:52 -07001440 if(qdev->phyType == PHY_AGERE_ET1011C) {
1441 /* turn off external loopback */
1442 ql_mii_write_reg(qdev, 0x13, 0x0000);
1443 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001444
Ron Mercer3efedf22007-03-26 12:43:52 -07001445 if(qdev->mac_index == 0)
1446 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1447 else
1448 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1449
1450 /* Some HBA's in the field are set to 0 and they need to
1451 be reinterpreted with a default value */
1452 if(portConfiguration == 0)
1453 portConfiguration = PORT_CONFIG_DEFAULT;
1454
1455 /* Set the 1000 advertisements */
1456 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
1457 PHYAddr[qdev->mac_index]);
1458 reg &= ~PHY_GIG_ALL_PARAMS;
1459
1460 if(portConfiguration &
1461 PORT_CONFIG_FULL_DUPLEX_ENABLED &
1462 PORT_CONFIG_1000MB_SPEED) {
1463 reg |= PHY_GIG_ADV_1000F;
1464 }
1465
1466 if(portConfiguration &
1467 PORT_CONFIG_HALF_DUPLEX_ENABLED &
1468 PORT_CONFIG_1000MB_SPEED) {
1469 reg |= PHY_GIG_ADV_1000H;
1470 }
1471
1472 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1473 PHYAddr[qdev->mac_index]);
1474
1475 /* Set the 10/100 & pause negotiation advertisements */
1476 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
1477 PHYAddr[qdev->mac_index]);
1478 reg &= ~PHY_NEG_ALL_PARAMS;
1479
1480 if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1481 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1482
1483 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1484 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1485 reg |= PHY_NEG_ADV_100F;
1486
1487 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1488 reg |= PHY_NEG_ADV_10F;
1489 }
1490
1491 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1492 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1493 reg |= PHY_NEG_ADV_100H;
1494
1495 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1496 reg |= PHY_NEG_ADV_10H;
1497 }
1498
1499 if(portConfiguration &
1500 PORT_CONFIG_1000MB_SPEED) {
1501 reg |= 1;
1502 }
1503
1504 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1505 PHYAddr[qdev->mac_index]);
1506
1507 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
1508
1509 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1510 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1511 PHYAddr[qdev->mac_index]);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001512}
1513
Ron Mercer3efedf22007-03-26 12:43:52 -07001514static void ql_phy_init_ex(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -07001515{
Ron Mercer3efedf22007-03-26 12:43:52 -07001516 ql_phy_reset_ex(qdev);
1517 PHY_Setup(qdev);
1518 ql_phy_start_neg_ex(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001519}
1520
1521/*
1522 * Caller holds hw_lock.
1523 */
1524static u32 ql_get_link_state(struct ql3_adapter *qdev)
1525{
1526 struct ql3xxx_port_registers __iomem *port_regs =
1527 qdev->mem_map_registers;
1528 u32 bitToCheck = 0;
1529 u32 temp, linkState;
1530
1531 switch (qdev->mac_index) {
1532 case 0:
1533 bitToCheck = PORT_STATUS_UP0;
1534 break;
1535 case 1:
1536 bitToCheck = PORT_STATUS_UP1;
1537 break;
1538 }
1539 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1540 if (temp & bitToCheck) {
1541 linkState = LS_UP;
1542 } else {
1543 linkState = LS_DOWN;
1544 if (netif_msg_link(qdev))
1545 printk(KERN_WARNING PFX
1546 "%s: Link is down.\n", qdev->ndev->name);
1547 }
1548 return linkState;
1549}
1550
1551static int ql_port_start(struct ql3_adapter *qdev)
1552{
1553 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1554 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
Ron Mercer3efedf22007-03-26 12:43:52 -07001555 2) << 7)) {
1556 printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
1557 qdev->ndev->name);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001558 return -1;
Ron Mercer3efedf22007-03-26 12:43:52 -07001559 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001560
1561 if (ql_is_fiber(qdev)) {
1562 ql_petbi_init(qdev);
1563 } else {
1564 /* Copper port */
Ron Mercer3efedf22007-03-26 12:43:52 -07001565 ql_phy_init_ex(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001566 }
1567
1568 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1569 return 0;
1570}
1571
1572static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1573{
1574
1575 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1576 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1577 2) << 7))
1578 return -1;
1579
1580 if (!ql_auto_neg_error(qdev)) {
1581 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1582 /* configure the MAC */
1583 if (netif_msg_link(qdev))
1584 printk(KERN_DEBUG PFX
1585 "%s: Configuring link.\n",
1586 qdev->ndev->
1587 name);
1588 ql_mac_cfg_soft_reset(qdev, 1);
1589 ql_mac_cfg_gig(qdev,
1590 (ql_get_link_speed
1591 (qdev) ==
1592 SPEED_1000));
1593 ql_mac_cfg_full_dup(qdev,
1594 ql_is_link_full_dup
1595 (qdev));
1596 ql_mac_cfg_pause(qdev,
1597 ql_is_neg_pause
1598 (qdev));
1599 ql_mac_cfg_soft_reset(qdev, 0);
1600
1601 /* enable the MAC */
1602 if (netif_msg_link(qdev))
1603 printk(KERN_DEBUG PFX
1604 "%s: Enabling mac.\n",
1605 qdev->ndev->
1606 name);
1607 ql_mac_enable(qdev, 1);
1608 }
1609
1610 if (netif_msg_link(qdev))
1611 printk(KERN_DEBUG PFX
1612 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1613 qdev->ndev->name);
1614 qdev->port_link_state = LS_UP;
1615 netif_start_queue(qdev->ndev);
1616 netif_carrier_on(qdev->ndev);
1617 if (netif_msg_link(qdev))
1618 printk(KERN_INFO PFX
1619 "%s: Link is up at %d Mbps, %s duplex.\n",
1620 qdev->ndev->name,
1621 ql_get_link_speed(qdev),
1622 ql_is_link_full_dup(qdev)
1623 ? "full" : "half");
1624
1625 } else { /* Remote error detected */
1626
1627 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1628 if (netif_msg_link(qdev))
1629 printk(KERN_DEBUG PFX
1630 "%s: Remote error detected. "
1631 "Calling ql_port_start().\n",
1632 qdev->ndev->
1633 name);
1634 /*
1635 * ql_port_start() is shared code and needs
1636 * to lock the PHY on it's own.
1637 */
1638 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1639 if(ql_port_start(qdev)) {/* Restart port */
1640 return -1;
1641 } else
1642 return 0;
1643 }
1644 }
1645 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1646 return 0;
1647}
1648
1649static void ql_link_state_machine(struct ql3_adapter *qdev)
1650{
1651 u32 curr_link_state;
1652 unsigned long hw_flags;
1653
1654 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1655
1656 curr_link_state = ql_get_link_state(qdev);
1657
1658 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1659 if (netif_msg_link(qdev))
1660 printk(KERN_INFO PFX
1661 "%s: Reset in progress, skip processing link "
1662 "state.\n", qdev->ndev->name);
Benjamin Li04f10772007-02-26 11:06:35 -08001663
1664 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001665 return;
1666 }
1667
1668 switch (qdev->port_link_state) {
1669 default:
1670 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1671 ql_port_start(qdev);
1672 }
1673 qdev->port_link_state = LS_DOWN;
1674 /* Fall Through */
1675
1676 case LS_DOWN:
1677 if (netif_msg_link(qdev))
1678 printk(KERN_DEBUG PFX
1679 "%s: port_link_state = LS_DOWN.\n",
1680 qdev->ndev->name);
1681 if (curr_link_state == LS_UP) {
1682 if (netif_msg_link(qdev))
1683 printk(KERN_DEBUG PFX
1684 "%s: curr_link_state = LS_UP.\n",
1685 qdev->ndev->name);
1686 if (ql_is_auto_neg_complete(qdev))
1687 ql_finish_auto_neg(qdev);
1688
1689 if (qdev->port_link_state == LS_UP)
1690 ql_link_down_detect_clear(qdev);
1691
1692 }
1693 break;
1694
1695 case LS_UP:
1696 /*
1697 * See if the link is currently down or went down and came
1698 * back up
1699 */
1700 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1701 if (netif_msg_link(qdev))
1702 printk(KERN_INFO PFX "%s: Link is down.\n",
1703 qdev->ndev->name);
1704 qdev->port_link_state = LS_DOWN;
1705 }
1706 break;
1707 }
1708 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1709}
1710
1711/*
1712 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1713 */
1714static void ql_get_phy_owner(struct ql3_adapter *qdev)
1715{
Ron Mercer3efedf22007-03-26 12:43:52 -07001716 if (ql_this_adapter_controls_port(qdev))
Ron Mercer5a4faa872006-07-25 00:40:21 -07001717 set_bit(QL_LINK_MASTER,&qdev->flags);
1718 else
1719 clear_bit(QL_LINK_MASTER,&qdev->flags);
1720}
1721
1722/*
1723 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1724 */
1725static void ql_init_scan_mode(struct ql3_adapter *qdev)
1726{
1727 ql_mii_enable_scan_mode(qdev);
1728
1729 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
Ron Mercer3efedf22007-03-26 12:43:52 -07001730 if (ql_this_adapter_controls_port(qdev))
1731 ql_petbi_init_ex(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001732 } else {
Ron Mercer3efedf22007-03-26 12:43:52 -07001733 if (ql_this_adapter_controls_port(qdev))
1734 ql_phy_init_ex(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001735 }
1736}
1737
1738/*
1739 * MII_Setup needs to be called before taking the PHY out of reset so that the
1740 * management interface clock speed can be set properly. It would be better if
1741 * we had a way to disable MDC until after the PHY is out of reset, but we
1742 * don't have that capability.
1743 */
1744static int ql_mii_setup(struct ql3_adapter *qdev)
1745{
1746 u32 reg;
1747 struct ql3xxx_port_registers __iomem *port_regs =
1748 qdev->mem_map_registers;
1749
1750 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1751 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1752 2) << 7))
1753 return -1;
1754
Ron Mercerbd36b0a2007-01-03 16:26:08 -08001755 if (qdev->device_id == QL3032_DEVICE_ID)
1756 ql_write_page0_reg(qdev,
1757 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1758
Ron Mercer5a4faa872006-07-25 00:40:21 -07001759 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1760 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1761
1762 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1763 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1764
1765 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1766 return 0;
1767}
1768
1769static u32 ql_supported_modes(struct ql3_adapter *qdev)
1770{
1771 u32 supported;
1772
1773 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1774 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1775 | SUPPORTED_Autoneg;
1776 } else {
1777 supported = SUPPORTED_10baseT_Half
1778 | SUPPORTED_10baseT_Full
1779 | SUPPORTED_100baseT_Half
1780 | SUPPORTED_100baseT_Full
1781 | SUPPORTED_1000baseT_Half
1782 | SUPPORTED_1000baseT_Full
1783 | SUPPORTED_Autoneg | SUPPORTED_TP;
1784 }
1785
1786 return supported;
1787}
1788
1789static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1790{
1791 int status;
1792 unsigned long hw_flags;
1793 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1794 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1795 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
Benjamin Li04f10772007-02-26 11:06:35 -08001796 2) << 7)) {
1797 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001798 return 0;
Benjamin Li04f10772007-02-26 11:06:35 -08001799 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001800 status = ql_is_auto_cfg(qdev);
1801 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1802 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1803 return status;
1804}
1805
1806static u32 ql_get_speed(struct ql3_adapter *qdev)
1807{
1808 u32 status;
1809 unsigned long hw_flags;
1810 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1811 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1812 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
Benjamin Li04f10772007-02-26 11:06:35 -08001813 2) << 7)) {
1814 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001815 return 0;
Benjamin Li04f10772007-02-26 11:06:35 -08001816 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001817 status = ql_get_link_speed(qdev);
1818 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1819 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1820 return status;
1821}
1822
1823static int ql_get_full_dup(struct ql3_adapter *qdev)
1824{
1825 int status;
1826 unsigned long hw_flags;
1827 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1828 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1829 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
Benjamin Li04f10772007-02-26 11:06:35 -08001830 2) << 7)) {
1831 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001832 return 0;
Benjamin Li04f10772007-02-26 11:06:35 -08001833 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07001834 status = ql_is_link_full_dup(qdev);
1835 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1836 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1837 return status;
1838}
1839
1840
1841static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1842{
1843 struct ql3_adapter *qdev = netdev_priv(ndev);
1844
1845 ecmd->transceiver = XCVR_INTERNAL;
1846 ecmd->supported = ql_supported_modes(qdev);
1847
1848 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1849 ecmd->port = PORT_FIBRE;
1850 } else {
1851 ecmd->port = PORT_TP;
1852 ecmd->phy_address = qdev->PHYAddr;
1853 }
1854 ecmd->advertising = ql_supported_modes(qdev);
1855 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1856 ecmd->speed = ql_get_speed(qdev);
1857 ecmd->duplex = ql_get_full_dup(qdev);
1858 return 0;
1859}
1860
1861static void ql_get_drvinfo(struct net_device *ndev,
1862 struct ethtool_drvinfo *drvinfo)
1863{
1864 struct ql3_adapter *qdev = netdev_priv(ndev);
1865 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1866 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1867 strncpy(drvinfo->fw_version, "N/A", 32);
1868 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1869 drvinfo->n_stats = 0;
1870 drvinfo->testinfo_len = 0;
1871 drvinfo->regdump_len = 0;
1872 drvinfo->eedump_len = 0;
1873}
1874
1875static u32 ql_get_msglevel(struct net_device *ndev)
1876{
1877 struct ql3_adapter *qdev = netdev_priv(ndev);
1878 return qdev->msg_enable;
1879}
1880
1881static void ql_set_msglevel(struct net_device *ndev, u32 value)
1882{
1883 struct ql3_adapter *qdev = netdev_priv(ndev);
1884 qdev->msg_enable = value;
1885}
1886
Ron Mercerec826382007-03-26 13:43:01 -07001887static void ql_get_pauseparam(struct net_device *ndev,
1888 struct ethtool_pauseparam *pause)
1889{
1890 struct ql3_adapter *qdev = netdev_priv(ndev);
1891 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1892
1893 u32 reg;
1894 if(qdev->mac_index == 0)
1895 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1896 else
1897 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1898
1899 pause->autoneg = ql_get_auto_cfg_status(qdev);
1900 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1901 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1902}
1903
Jeff Garzik7282d492006-09-13 14:30:00 -04001904static const struct ethtool_ops ql3xxx_ethtool_ops = {
Ron Mercer5a4faa872006-07-25 00:40:21 -07001905 .get_settings = ql_get_settings,
1906 .get_drvinfo = ql_get_drvinfo,
1907 .get_perm_addr = ethtool_op_get_perm_addr,
1908 .get_link = ethtool_op_get_link,
1909 .get_msglevel = ql_get_msglevel,
1910 .set_msglevel = ql_set_msglevel,
Ron Mercerec826382007-03-26 13:43:01 -07001911 .get_pauseparam = ql_get_pauseparam,
Ron Mercer5a4faa872006-07-25 00:40:21 -07001912};
1913
1914static int ql_populate_free_queue(struct ql3_adapter *qdev)
1915{
1916 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
Benjamin Li0f8ab892007-02-26 11:06:40 -08001917 dma_addr_t map;
1918 int err;
Ron Mercer5a4faa872006-07-25 00:40:21 -07001919
1920 while (lrg_buf_cb) {
1921 if (!lrg_buf_cb->skb) {
Benjamin Licd238fa2007-02-26 11:06:33 -08001922 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1923 qdev->lrg_buffer_len);
Ron Mercer5a4faa872006-07-25 00:40:21 -07001924 if (unlikely(!lrg_buf_cb->skb)) {
1925 printk(KERN_DEBUG PFX
Benjamin Licd238fa2007-02-26 11:06:33 -08001926 "%s: Failed netdev_alloc_skb().\n",
Ron Mercer5a4faa872006-07-25 00:40:21 -07001927 qdev->ndev->name);
1928 break;
1929 } else {
1930 /*
1931 * We save some space to copy the ethhdr from
1932 * first buffer
1933 */
1934 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1935 map = pci_map_single(qdev->pdev,
1936 lrg_buf_cb->skb->data,
1937 qdev->lrg_buffer_len -
1938 QL_HEADER_SPACE,
1939 PCI_DMA_FROMDEVICE);
Benjamin Li0f8ab892007-02-26 11:06:40 -08001940
1941 err = pci_dma_mapping_error(map);
1942 if(err) {
1943 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1944 qdev->ndev->name, err);
1945 dev_kfree_skb(lrg_buf_cb->skb);
1946 lrg_buf_cb->skb = NULL;
1947 break;
1948 }
1949
1950
Ron Mercer5a4faa872006-07-25 00:40:21 -07001951 lrg_buf_cb->buf_phy_addr_low =
1952 cpu_to_le32(LS_64BITS(map));
1953 lrg_buf_cb->buf_phy_addr_high =
1954 cpu_to_le32(MS_64BITS(map));
1955 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1956 pci_unmap_len_set(lrg_buf_cb, maplen,
1957 qdev->lrg_buffer_len -
1958 QL_HEADER_SPACE);
1959 --qdev->lrg_buf_skb_check;
1960 if (!qdev->lrg_buf_skb_check)
1961 return 1;
1962 }
1963 }
1964 lrg_buf_cb = lrg_buf_cb->next;
1965 }
1966 return 0;
1967}
1968
1969/*
1970 * Caller holds hw_lock.
1971 */
Ron Mercerf67cac02007-03-26 13:42:59 -07001972static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1973{
1974 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1975 if (qdev->small_buf_release_cnt >= 16) {
1976 while (qdev->small_buf_release_cnt >= 16) {
1977 qdev->small_buf_q_producer_index++;
1978
1979 if (qdev->small_buf_q_producer_index ==
1980 NUM_SBUFQ_ENTRIES)
1981 qdev->small_buf_q_producer_index = 0;
1982 qdev->small_buf_release_cnt -= 8;
1983 }
1984 wmb();
1985 writel(qdev->small_buf_q_producer_index,
1986 &port_regs->CommonRegs.rxSmallQProducerIndex);
1987 }
1988}
1989
1990/*
1991 * Caller holds hw_lock.
1992 */
Ron Mercer5a4faa872006-07-25 00:40:21 -07001993static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1994{
1995 struct bufq_addr_element *lrg_buf_q_ele;
1996 int i;
1997 struct ql_rcv_buf_cb *lrg_buf_cb;
1998 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1999
2000 if ((qdev->lrg_buf_free_count >= 8)
2001 && (qdev->lrg_buf_release_cnt >= 16)) {
2002
2003 if (qdev->lrg_buf_skb_check)
2004 if (!ql_populate_free_queue(qdev))
2005 return;
2006
2007 lrg_buf_q_ele = qdev->lrg_buf_next_free;
2008
2009 while ((qdev->lrg_buf_release_cnt >= 16)
2010 && (qdev->lrg_buf_free_count >= 8)) {
2011
2012 for (i = 0; i < 8; i++) {
2013 lrg_buf_cb =
2014 ql_get_from_lrg_buf_free_list(qdev);
2015 lrg_buf_q_ele->addr_high =
2016 lrg_buf_cb->buf_phy_addr_high;
2017 lrg_buf_q_ele->addr_low =
2018 lrg_buf_cb->buf_phy_addr_low;
2019 lrg_buf_q_ele++;
2020
2021 qdev->lrg_buf_release_cnt--;
2022 }
2023
2024 qdev->lrg_buf_q_producer_index++;
2025
Ron Mercer1357bfc2007-02-26 11:06:37 -08002026 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
Ron Mercer5a4faa872006-07-25 00:40:21 -07002027 qdev->lrg_buf_q_producer_index = 0;
2028
2029 if (qdev->lrg_buf_q_producer_index ==
Ron Mercer1357bfc2007-02-26 11:06:37 -08002030 (qdev->num_lbufq_entries - 1)) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07002031 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
2032 }
2033 }
Ron Mercerf67cac02007-03-26 13:42:59 -07002034 wmb();
Ron Mercer5a4faa872006-07-25 00:40:21 -07002035 qdev->lrg_buf_next_free = lrg_buf_q_ele;
Ron Mercerf67cac02007-03-26 13:42:59 -07002036 writel(qdev->lrg_buf_q_producer_index,
2037 &port_regs->CommonRegs.rxLargeQProducerIndex);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002038 }
2039}
2040
2041static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2042 struct ob_mac_iocb_rsp *mac_rsp)
2043{
2044 struct ql_tx_buf_cb *tx_cb;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002045 int i;
Benjamin Lie8f4df22007-02-26 11:06:42 -08002046 int retval = 0;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002047
Benjamin Lie8f4df22007-02-26 11:06:42 -08002048 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2049 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
2050 }
2051
Ron Mercer5a4faa872006-07-25 00:40:21 -07002052 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
Benjamin Lie8f4df22007-02-26 11:06:42 -08002053
2054 /* Check the transmit response flags for any errors */
2055 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2056 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
2057
2058 qdev->stats.tx_errors++;
2059 retval = -EIO;
2060 goto frame_not_sent;
2061 }
2062
2063 if(tx_cb->seg_count == 0) {
2064 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
2065
2066 qdev->stats.tx_errors++;
2067 retval = -EIO;
2068 goto invalid_seg_count;
2069 }
2070
Ron Mercer5a4faa872006-07-25 00:40:21 -07002071 pci_unmap_single(qdev->pdev,
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002072 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2073 pci_unmap_len(&tx_cb->map[0], maplen),
2074 PCI_DMA_TODEVICE);
2075 tx_cb->seg_count--;
2076 if (tx_cb->seg_count) {
2077 for (i = 1; i < tx_cb->seg_count; i++) {
2078 pci_unmap_page(qdev->pdev,
2079 pci_unmap_addr(&tx_cb->map[i],
2080 mapaddr),
2081 pci_unmap_len(&tx_cb->map[i], maplen),
2082 PCI_DMA_TODEVICE);
2083 }
2084 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07002085 qdev->stats.tx_packets++;
2086 qdev->stats.tx_bytes += tx_cb->skb->len;
Benjamin Lie8f4df22007-02-26 11:06:42 -08002087
2088frame_not_sent:
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002089 dev_kfree_skb_irq(tx_cb->skb);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002090 tx_cb->skb = NULL;
Benjamin Lie8f4df22007-02-26 11:06:42 -08002091
2092invalid_seg_count:
Ron Mercer5a4faa872006-07-25 00:40:21 -07002093 atomic_inc(&qdev->tx_count);
2094}
2095
Adrian Bunk36640062007-03-05 02:49:27 +01002096static void ql_get_sbuf(struct ql3_adapter *qdev)
Ron Mercer97916332007-02-26 11:06:38 -08002097{
2098 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2099 qdev->small_buf_index = 0;
2100 qdev->small_buf_release_cnt++;
2101}
2102
Adrian Bunk36640062007-03-05 02:49:27 +01002103static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
Ron Mercer97916332007-02-26 11:06:38 -08002104{
2105 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2106 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2107 qdev->lrg_buf_release_cnt++;
2108 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2109 qdev->lrg_buf_index = 0;
2110 return(lrg_buf_cb);
2111}
2112
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002113/*
2114 * The difference between 3022 and 3032 for inbound completions:
2115 * 3022 uses two buffers per completion. The first buffer contains
2116 * (some) header info, the second the remainder of the headers plus
2117 * the data. For this chip we reserve some space at the top of the
2118 * receive buffer so that the header info in buffer one can be
2119 * prepended to the buffer two. Buffer two is the sent up while
2120 * buffer one is returned to the hardware to be reused.
2121 * 3032 receives all of it's data and headers in one buffer for a
2122 * simpler process. 3032 also supports checksum verification as
2123 * can be seen in ql_process_macip_rx_intr().
2124 */
Ron Mercer5a4faa872006-07-25 00:40:21 -07002125static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2126 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2127{
Ron Mercer5a4faa872006-07-25 00:40:21 -07002128 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2129 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002130 struct sk_buff *skb;
2131 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2132
2133 /*
2134 * Get the inbound address list (small buffer).
2135 */
Ron Mercer97916332007-02-26 11:06:38 -08002136 ql_get_sbuf(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002137
Ron Mercer97916332007-02-26 11:06:38 -08002138 if (qdev->device_id == QL3022_DEVICE_ID)
2139 lrg_buf_cb1 = ql_get_lbuf(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002140
2141 /* start of second buffer */
Ron Mercer97916332007-02-26 11:06:38 -08002142 lrg_buf_cb2 = ql_get_lbuf(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002143 skb = lrg_buf_cb2->skb;
2144
2145 qdev->stats.rx_packets++;
2146 qdev->stats.rx_bytes += length;
2147
2148 skb_put(skb, length);
2149 pci_unmap_single(qdev->pdev,
2150 pci_unmap_addr(lrg_buf_cb2, mapaddr),
2151 pci_unmap_len(lrg_buf_cb2, maplen),
2152 PCI_DMA_FROMDEVICE);
2153 prefetch(skb->data);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002154 skb->ip_summed = CHECKSUM_NONE;
2155 skb->protocol = eth_type_trans(skb, qdev->ndev);
2156
2157 netif_receive_skb(skb);
2158 qdev->ndev->last_rx = jiffies;
2159 lrg_buf_cb2->skb = NULL;
2160
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002161 if (qdev->device_id == QL3022_DEVICE_ID)
2162 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002163 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2164}
2165
2166static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2167 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2168{
Ron Mercer5a4faa872006-07-25 00:40:21 -07002169 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2170 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002171 struct sk_buff *skb1 = NULL, *skb2;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002172 struct net_device *ndev = qdev->ndev;
2173 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2174 u16 size = 0;
2175
2176 /*
2177 * Get the inbound address list (small buffer).
2178 */
2179
Ron Mercer97916332007-02-26 11:06:38 -08002180 ql_get_sbuf(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002181
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002182 if (qdev->device_id == QL3022_DEVICE_ID) {
2183 /* start of first buffer on 3022 */
Ron Mercer97916332007-02-26 11:06:38 -08002184 lrg_buf_cb1 = ql_get_lbuf(qdev);
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002185 skb1 = lrg_buf_cb1->skb;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002186 size = ETH_HLEN;
2187 if (*((u16 *) skb1->data) != 0xFFFF)
2188 size += VLAN_ETH_HLEN - ETH_HLEN;
2189 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07002190
2191 /* start of second buffer */
Ron Mercer97916332007-02-26 11:06:38 -08002192 lrg_buf_cb2 = ql_get_lbuf(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002193 skb2 = lrg_buf_cb2->skb;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002194
Ron Mercer5a4faa872006-07-25 00:40:21 -07002195 skb_put(skb2, length); /* Just the second buffer length here. */
2196 pci_unmap_single(qdev->pdev,
2197 pci_unmap_addr(lrg_buf_cb2, mapaddr),
2198 pci_unmap_len(lrg_buf_cb2, maplen),
2199 PCI_DMA_FROMDEVICE);
2200 prefetch(skb2->data);
2201
Ron Mercer5a4faa872006-07-25 00:40:21 -07002202 skb2->ip_summed = CHECKSUM_NONE;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002203 if (qdev->device_id == QL3022_DEVICE_ID) {
2204 /*
2205 * Copy the ethhdr from first buffer to second. This
2206 * is necessary for 3022 IP completions.
2207 */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03002208 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2209 skb_push(skb2, size), size);
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002210 } else {
2211 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2212 if (checksum &
2213 (IB_IP_IOCB_RSP_3032_ICE |
Ron Mercerb3b15142007-03-26 13:43:00 -07002214 IB_IP_IOCB_RSP_3032_CE)) {
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002215 printk(KERN_ERR
2216 "%s: Bad checksum for this %s packet, checksum = %x.\n",
2217 __func__,
2218 ((checksum &
2219 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
2220 "UDP"),checksum);
Ron Mercerb3b15142007-03-26 13:43:00 -07002221 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2222 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2223 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002224 skb2->ip_summed = CHECKSUM_UNNECESSARY;
Ron Mercerb3b15142007-03-26 13:43:00 -07002225 }
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002226 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07002227 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2228
2229 netif_receive_skb(skb2);
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002230 qdev->stats.rx_packets++;
2231 qdev->stats.rx_bytes += length;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002232 ndev->last_rx = jiffies;
2233 lrg_buf_cb2->skb = NULL;
2234
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002235 if (qdev->device_id == QL3022_DEVICE_ID)
2236 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002237 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2238}
2239
2240static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2241 int *tx_cleaned, int *rx_cleaned, int work_to_do)
2242{
Ron Mercer5a4faa872006-07-25 00:40:21 -07002243 struct net_rsp_iocb *net_rsp;
2244 struct net_device *ndev = qdev->ndev;
Ron Mercer63b66d12007-02-26 11:06:41 -08002245 int work_done = 0;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002246
2247 /* While there are entries in the completion queue. */
Ron Mercerf67cac02007-03-26 13:42:59 -07002248 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
Ron Mercer63b66d12007-02-26 11:06:41 -08002249 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07002250
2251 net_rsp = qdev->rsp_current;
2252 switch (net_rsp->opcode) {
2253
2254 case OPCODE_OB_MAC_IOCB_FN0:
2255 case OPCODE_OB_MAC_IOCB_FN2:
2256 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2257 net_rsp);
2258 (*tx_cleaned)++;
2259 break;
2260
2261 case OPCODE_IB_MAC_IOCB:
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002262 case OPCODE_IB_3032_MAC_IOCB:
Ron Mercer5a4faa872006-07-25 00:40:21 -07002263 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2264 net_rsp);
2265 (*rx_cleaned)++;
2266 break;
2267
2268 case OPCODE_IB_IP_IOCB:
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002269 case OPCODE_IB_3032_IP_IOCB:
Ron Mercer5a4faa872006-07-25 00:40:21 -07002270 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2271 net_rsp);
2272 (*rx_cleaned)++;
2273 break;
2274 default:
2275 {
2276 u32 *tmp = (u32 *) net_rsp;
2277 printk(KERN_ERR PFX
2278 "%s: Hit default case, not "
2279 "handled!\n"
2280 " dropping the packet, opcode = "
2281 "%x.\n",
2282 ndev->name, net_rsp->opcode);
2283 printk(KERN_ERR PFX
2284 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
2285 (unsigned long int)tmp[0],
2286 (unsigned long int)tmp[1],
2287 (unsigned long int)tmp[2],
2288 (unsigned long int)tmp[3]);
2289 }
2290 }
2291
2292 qdev->rsp_consumer_index++;
2293
2294 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2295 qdev->rsp_consumer_index = 0;
2296 qdev->rsp_current = qdev->rsp_q_virt_addr;
2297 } else {
2298 qdev->rsp_current++;
2299 }
Ron Mercer63b66d12007-02-26 11:06:41 -08002300
2301 work_done = *tx_cleaned + *rx_cleaned;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002302 }
2303
Ron Mercerf67cac02007-03-26 13:42:59 -07002304 return work_done;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002305}
2306
2307static int ql_poll(struct net_device *ndev, int *budget)
2308{
2309 struct ql3_adapter *qdev = netdev_priv(ndev);
2310 int work_to_do = min(*budget, ndev->quota);
2311 int rx_cleaned = 0, tx_cleaned = 0;
Ron Mercer63b66d12007-02-26 11:06:41 -08002312 unsigned long hw_flags;
2313 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002314
2315 if (!netif_carrier_ok(ndev))
2316 goto quit_polling;
2317
2318 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
2319 *budget -= rx_cleaned;
2320 ndev->quota -= rx_cleaned;
2321
Benjamin Lie8f4df22007-02-26 11:06:42 -08002322 if( tx_cleaned + rx_cleaned != work_to_do ||
2323 !netif_running(ndev)) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07002324quit_polling:
2325 netif_rx_complete(ndev);
Ron Mercer63b66d12007-02-26 11:06:41 -08002326
2327 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
Ron Mercerf67cac02007-03-26 13:42:59 -07002328 ql_update_small_bufq_prod_index(qdev);
2329 ql_update_lrg_bufq_prod_index(qdev);
2330 writel(qdev->rsp_consumer_index,
2331 &port_regs->CommonRegs.rspQConsumerIndex);
Ron Mercer63b66d12007-02-26 11:06:41 -08002332 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2333
Ron Mercer5a4faa872006-07-25 00:40:21 -07002334 ql_enable_interrupts(qdev);
2335 return 0;
2336 }
2337 return 1;
2338}
2339
David Howells7d12e782006-10-05 14:55:46 +01002340static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
Ron Mercer5a4faa872006-07-25 00:40:21 -07002341{
2342
2343 struct net_device *ndev = dev_id;
2344 struct ql3_adapter *qdev = netdev_priv(ndev);
2345 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2346 u32 value;
2347 int handled = 1;
2348 u32 var;
2349
2350 port_regs = qdev->mem_map_registers;
2351
2352 value =
2353 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2354
2355 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2356 spin_lock(&qdev->adapter_lock);
2357 netif_stop_queue(qdev->ndev);
2358 netif_carrier_off(qdev->ndev);
2359 ql_disable_interrupts(qdev);
2360 qdev->port_link_state = LS_DOWN;
2361 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2362
2363 if (value & ISP_CONTROL_FE) {
2364 /*
2365 * Chip Fatal Error.
2366 */
2367 var =
2368 ql_read_page0_reg_l(qdev,
2369 &port_regs->PortFatalErrStatus);
2370 printk(KERN_WARNING PFX
2371 "%s: Resetting chip. PortFatalErrStatus "
2372 "register = 0x%x\n", ndev->name, var);
2373 set_bit(QL_RESET_START,&qdev->flags) ;
2374 } else {
2375 /*
2376 * Soft Reset Requested.
2377 */
2378 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2379 printk(KERN_ERR PFX
2380 "%s: Another function issued a reset to the "
2381 "chip. ISR value = %x.\n", ndev->name, value);
2382 }
David Howellsc4028952006-11-22 14:57:56 +00002383 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002384 spin_unlock(&qdev->adapter_lock);
2385 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
Benjamin Lie8f4df22007-02-26 11:06:42 -08002386 ql_disable_interrupts(qdev);
Ron Mercer63b66d12007-02-26 11:06:41 -08002387 if (likely(netif_rx_schedule_prep(ndev))) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07002388 __netif_rx_schedule(ndev);
Ron Mercer63b66d12007-02-26 11:06:41 -08002389 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07002390 } else {
2391 return IRQ_NONE;
2392 }
2393
2394 return IRQ_RETVAL(handled);
2395}
2396
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002397/*
2398 * Get the total number of segments needed for the
2399 * given number of fragments. This is necessary because
2400 * outbound address lists (OAL) will be used when more than
2401 * two frags are given. Each address list has 5 addr/len
2402 * pairs. The 5th pair in each AOL is used to point to
2403 * the next AOL if more frags are coming.
2404 * That is why the frags:segment count ratio is not linear.
2405 */
Benjamin Lie8f4df22007-02-26 11:06:42 -08002406static int ql_get_seg_count(struct ql3_adapter *qdev,
2407 unsigned short frags)
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002408{
Benjamin Lie8f4df22007-02-26 11:06:42 -08002409 if (qdev->device_id == QL3022_DEVICE_ID)
2410 return 1;
2411
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002412 switch(frags) {
2413 case 0: return 1; /* just the skb->data seg */
2414 case 1: return 2; /* skb->data + 1 frag */
2415 case 2: return 3; /* skb->data + 2 frags */
2416 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2417 case 4: return 6;
2418 case 5: return 7;
2419 case 6: return 8;
2420 case 7: return 10;
2421 case 8: return 11;
2422 case 9: return 12;
2423 case 10: return 13;
2424 case 11: return 15;
2425 case 12: return 16;
2426 case 13: return 17;
2427 case 14: return 18;
2428 case 15: return 20;
2429 case 16: return 21;
2430 case 17: return 22;
2431 case 18: return 23;
2432 }
2433 return -1;
2434}
2435
2436static void ql_hw_csum_setup(struct sk_buff *skb,
2437 struct ob_mac_iocb_req *mac_iocb_ptr)
2438{
2439 struct ethhdr *eth;
2440 struct iphdr *ip = NULL;
2441 u8 offset = ETH_HLEN;
2442
2443 eth = (struct ethhdr *)(skb->data);
2444
2445 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2446 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2447 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2448 ((struct vlan_ethhdr *)skb->data)->
2449 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2450 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2451 offset = VLAN_ETH_HLEN;
2452 }
2453
2454 if (ip) {
2455 if (ip->protocol == IPPROTO_TCP) {
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002456 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2457 OB_3032MAC_IOCB_REQ_IC;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002458 mac_iocb_ptr->ip_hdr_off = offset;
2459 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2460 } else if (ip->protocol == IPPROTO_UDP) {
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002461 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2462 OB_3032MAC_IOCB_REQ_IC;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002463 mac_iocb_ptr->ip_hdr_off = offset;
2464 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2465 }
2466 }
2467}
2468
2469/*
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002470 * Map the buffers for this transmit. This will return
2471 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002472 */
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002473static int ql_send_map(struct ql3_adapter *qdev,
2474 struct ob_mac_iocb_req *mac_iocb_ptr,
2475 struct ql_tx_buf_cb *tx_cb,
2476 struct sk_buff *skb)
Ron Mercer5a4faa872006-07-25 00:40:21 -07002477{
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002478 struct oal *oal;
2479 struct oal_entry *oal_entry;
Ron Mercer63f779262007-02-28 16:42:17 -08002480 int len = skb_headlen(skb);
Benjamin Li0f8ab892007-02-26 11:06:40 -08002481 dma_addr_t map;
2482 int err;
2483 int completed_segs, i;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002484 int seg_cnt, seg = 0;
2485 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002486
Ron Mercerb6967eb2007-03-26 13:42:58 -07002487 seg_cnt = tx_cb->seg_count;
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002488 /*
2489 * Map the skb buffer first.
2490 */
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002491 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
Benjamin Li0f8ab892007-02-26 11:06:40 -08002492
2493 err = pci_dma_mapping_error(map);
2494 if(err) {
2495 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2496 qdev->ndev->name, err);
2497
2498 return NETDEV_TX_BUSY;
2499 }
2500
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002501 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2502 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2503 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2504 oal_entry->len = cpu_to_le32(len);
2505 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2506 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2507 seg++;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002508
Benjamin Lie8f4df22007-02-26 11:06:42 -08002509 if (seg_cnt == 1) {
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002510 /* Terminate the last segment. */
2511 oal_entry->len =
2512 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2513 } else {
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002514 oal = tx_cb->oal;
Benjamin Li0f8ab892007-02-26 11:06:40 -08002515 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2516 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002517 oal_entry++;
2518 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2519 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2520 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2521 (seg == 17 && seg_cnt > 18)) {
2522 /* Continuation entry points to outbound address list. */
2523 map = pci_map_single(qdev->pdev, oal,
2524 sizeof(struct oal),
2525 PCI_DMA_TODEVICE);
Benjamin Li0f8ab892007-02-26 11:06:40 -08002526
2527 err = pci_dma_mapping_error(map);
2528 if(err) {
2529
2530 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2531 qdev->ndev->name, err);
2532 goto map_error;
2533 }
2534
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002535 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2536 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2537 oal_entry->len =
2538 cpu_to_le32(sizeof(struct oal) |
2539 OAL_CONT_ENTRY);
2540 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2541 map);
2542 pci_unmap_len_set(&tx_cb->map[seg], maplen,
Ron Mercerb6967eb2007-03-26 13:42:58 -07002543 sizeof(struct oal));
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002544 oal_entry = (struct oal_entry *)oal;
2545 oal++;
2546 seg++;
2547 }
2548
2549 map =
2550 pci_map_page(qdev->pdev, frag->page,
2551 frag->page_offset, frag->size,
2552 PCI_DMA_TODEVICE);
Benjamin Li0f8ab892007-02-26 11:06:40 -08002553
2554 err = pci_dma_mapping_error(map);
2555 if(err) {
2556 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2557 qdev->ndev->name, err);
2558 goto map_error;
2559 }
2560
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002561 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2562 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2563 oal_entry->len = cpu_to_le32(frag->size);
2564 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2565 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2566 frag->size);
2567 }
2568 /* Terminate the last segment. */
2569 oal_entry->len =
2570 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2571 }
Benjamin Li0f8ab892007-02-26 11:06:40 -08002572
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002573 return NETDEV_TX_OK;
Benjamin Li0f8ab892007-02-26 11:06:40 -08002574
2575map_error:
2576 /* A PCI mapping failed and now we will need to back out
2577 * We need to traverse through the oal's and associated pages which
2578 * have been mapped and now we must unmap them to clean up properly
2579 */
2580
2581 seg = 1;
2582 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2583 oal = tx_cb->oal;
2584 for (i=0; i<completed_segs; i++,seg++) {
2585 oal_entry++;
2586
2587 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2588 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2589 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2590 (seg == 17 && seg_cnt > 18)) {
2591 pci_unmap_single(qdev->pdev,
2592 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2593 pci_unmap_len(&tx_cb->map[seg], maplen),
2594 PCI_DMA_TODEVICE);
2595 oal++;
2596 seg++;
2597 }
2598
2599 pci_unmap_page(qdev->pdev,
2600 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2601 pci_unmap_len(&tx_cb->map[seg], maplen),
2602 PCI_DMA_TODEVICE);
2603 }
2604
2605 pci_unmap_single(qdev->pdev,
2606 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2607 pci_unmap_addr(&tx_cb->map[0], maplen),
2608 PCI_DMA_TODEVICE);
2609
2610 return NETDEV_TX_BUSY;
2611
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002612}
2613
2614/*
2615 * The difference between 3022 and 3032 sends:
2616 * 3022 only supports a simple single segment transmission.
2617 * 3032 supports checksumming and scatter/gather lists (fragments).
2618 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2619 * in the IOCB plus a chain of outbound address lists (OAL) that
2620 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2621 * will used to point to an OAL when more ALP entries are required.
2622 * The IOCB is always the top of the chain followed by one or more
2623 * OALs (when necessary).
2624 */
2625static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2626{
2627 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2628 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2629 struct ql_tx_buf_cb *tx_cb;
2630 u32 tot_len = skb->len;
2631 struct ob_mac_iocb_req *mac_iocb_ptr;
2632
2633 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002634 return NETDEV_TX_BUSY;
2635 }
2636
2637 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
Benjamin Lie8f4df22007-02-26 11:06:42 -08002638 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2639 (skb_shinfo(skb)->nr_frags))) == -1) {
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002640 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2641 return NETDEV_TX_OK;
2642 }
2643
2644 mac_iocb_ptr = tx_cb->queue_entry;
Ron Mercerd8a759f2007-03-26 13:42:57 -07002645 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002646 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2647 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2648 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2649 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2650 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2651 tx_cb->skb = skb;
Benjamin Lie8f4df22007-02-26 11:06:42 -08002652 if (qdev->device_id == QL3032_DEVICE_ID &&
2653 skb->ip_summed == CHECKSUM_PARTIAL)
Ron Mercer3e71f6d2007-02-26 11:06:39 -08002654 ql_hw_csum_setup(skb, mac_iocb_ptr);
2655
2656 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2657 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2658 return NETDEV_TX_BUSY;
2659 }
2660
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002661 wmb();
Ron Mercer5a4faa872006-07-25 00:40:21 -07002662 qdev->req_producer_index++;
2663 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2664 qdev->req_producer_index = 0;
2665 wmb();
2666 ql_write_common_reg_l(qdev,
Al Viroee111d12006-09-25 02:53:53 +01002667 &port_regs->CommonRegs.reqQProducerIndex,
Ron Mercer5a4faa872006-07-25 00:40:21 -07002668 qdev->req_producer_index);
2669
2670 ndev->trans_start = jiffies;
2671 if (netif_msg_tx_queued(qdev))
2672 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2673 ndev->name, qdev->req_producer_index, skb->len);
2674
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002675 atomic_dec(&qdev->tx_count);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002676 return NETDEV_TX_OK;
2677}
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002678
Ron Mercer5a4faa872006-07-25 00:40:21 -07002679static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2680{
2681 qdev->req_q_size =
2682 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2683
2684 qdev->req_q_virt_addr =
2685 pci_alloc_consistent(qdev->pdev,
2686 (size_t) qdev->req_q_size,
2687 &qdev->req_q_phy_addr);
2688
2689 if ((qdev->req_q_virt_addr == NULL) ||
2690 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2691 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2692 qdev->ndev->name);
2693 return -ENOMEM;
2694 }
2695
2696 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2697
2698 qdev->rsp_q_virt_addr =
2699 pci_alloc_consistent(qdev->pdev,
2700 (size_t) qdev->rsp_q_size,
2701 &qdev->rsp_q_phy_addr);
2702
2703 if ((qdev->rsp_q_virt_addr == NULL) ||
2704 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2705 printk(KERN_ERR PFX
2706 "%s: rspQ allocation failed\n",
2707 qdev->ndev->name);
2708 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2709 qdev->req_q_virt_addr,
2710 qdev->req_q_phy_addr);
2711 return -ENOMEM;
2712 }
2713
2714 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2715
2716 return 0;
2717}
2718
2719static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2720{
2721 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2722 printk(KERN_INFO PFX
2723 "%s: Already done.\n", qdev->ndev->name);
2724 return;
2725 }
2726
2727 pci_free_consistent(qdev->pdev,
2728 qdev->req_q_size,
2729 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2730
2731 qdev->req_q_virt_addr = NULL;
2732
2733 pci_free_consistent(qdev->pdev,
2734 qdev->rsp_q_size,
2735 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2736
2737 qdev->rsp_q_virt_addr = NULL;
2738
2739 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2740}
2741
2742static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2743{
2744 /* Create Large Buffer Queue */
2745 qdev->lrg_buf_q_size =
Ron Mercer1357bfc2007-02-26 11:06:37 -08002746 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002747 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2748 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2749 else
2750 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2751
Ron Mercer1357bfc2007-02-26 11:06:37 -08002752 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2753 if (qdev->lrg_buf == NULL) {
2754 printk(KERN_ERR PFX
2755 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2756 return -ENOMEM;
2757 }
2758
Ron Mercer5a4faa872006-07-25 00:40:21 -07002759 qdev->lrg_buf_q_alloc_virt_addr =
2760 pci_alloc_consistent(qdev->pdev,
2761 qdev->lrg_buf_q_alloc_size,
2762 &qdev->lrg_buf_q_alloc_phy_addr);
2763
2764 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2765 printk(KERN_ERR PFX
2766 "%s: lBufQ failed\n", qdev->ndev->name);
2767 return -ENOMEM;
2768 }
2769 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2770 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2771
2772 /* Create Small Buffer Queue */
2773 qdev->small_buf_q_size =
2774 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2775 if (qdev->small_buf_q_size < PAGE_SIZE)
2776 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2777 else
2778 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2779
2780 qdev->small_buf_q_alloc_virt_addr =
2781 pci_alloc_consistent(qdev->pdev,
2782 qdev->small_buf_q_alloc_size,
2783 &qdev->small_buf_q_alloc_phy_addr);
2784
2785 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2786 printk(KERN_ERR PFX
2787 "%s: Small Buffer Queue allocation failed.\n",
2788 qdev->ndev->name);
2789 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2790 qdev->lrg_buf_q_alloc_virt_addr,
2791 qdev->lrg_buf_q_alloc_phy_addr);
2792 return -ENOMEM;
2793 }
2794
2795 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2796 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2797 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2798 return 0;
2799}
2800
2801static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2802{
2803 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2804 printk(KERN_INFO PFX
2805 "%s: Already done.\n", qdev->ndev->name);
2806 return;
2807 }
Ron Mercer1357bfc2007-02-26 11:06:37 -08002808 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002809 pci_free_consistent(qdev->pdev,
2810 qdev->lrg_buf_q_alloc_size,
2811 qdev->lrg_buf_q_alloc_virt_addr,
2812 qdev->lrg_buf_q_alloc_phy_addr);
2813
2814 qdev->lrg_buf_q_virt_addr = NULL;
2815
2816 pci_free_consistent(qdev->pdev,
2817 qdev->small_buf_q_alloc_size,
2818 qdev->small_buf_q_alloc_virt_addr,
2819 qdev->small_buf_q_alloc_phy_addr);
2820
2821 qdev->small_buf_q_virt_addr = NULL;
2822
2823 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2824}
2825
2826static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2827{
2828 int i;
2829 struct bufq_addr_element *small_buf_q_entry;
2830
2831 /* Currently we allocate on one of memory and use it for smallbuffers */
2832 qdev->small_buf_total_size =
2833 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2834 QL_SMALL_BUFFER_SIZE);
2835
2836 qdev->small_buf_virt_addr =
2837 pci_alloc_consistent(qdev->pdev,
2838 qdev->small_buf_total_size,
2839 &qdev->small_buf_phy_addr);
2840
2841 if (qdev->small_buf_virt_addr == NULL) {
2842 printk(KERN_ERR PFX
2843 "%s: Failed to get small buffer memory.\n",
2844 qdev->ndev->name);
2845 return -ENOMEM;
2846 }
2847
2848 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2849 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2850
2851 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2852
Ron Mercer5a4faa872006-07-25 00:40:21 -07002853 /* Initialize the small buffer queue. */
2854 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2855 small_buf_q_entry->addr_high =
2856 cpu_to_le32(qdev->small_buf_phy_addr_high);
2857 small_buf_q_entry->addr_low =
2858 cpu_to_le32(qdev->small_buf_phy_addr_low +
2859 (i * QL_SMALL_BUFFER_SIZE));
2860 small_buf_q_entry++;
2861 }
2862 qdev->small_buf_index = 0;
2863 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2864 return 0;
2865}
2866
2867static void ql_free_small_buffers(struct ql3_adapter *qdev)
2868{
2869 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2870 printk(KERN_INFO PFX
2871 "%s: Already done.\n", qdev->ndev->name);
2872 return;
2873 }
2874 if (qdev->small_buf_virt_addr != NULL) {
2875 pci_free_consistent(qdev->pdev,
2876 qdev->small_buf_total_size,
2877 qdev->small_buf_virt_addr,
2878 qdev->small_buf_phy_addr);
2879
2880 qdev->small_buf_virt_addr = NULL;
2881 }
2882}
2883
2884static void ql_free_large_buffers(struct ql3_adapter *qdev)
2885{
2886 int i = 0;
2887 struct ql_rcv_buf_cb *lrg_buf_cb;
2888
Ron Mercer1357bfc2007-02-26 11:06:37 -08002889 for (i = 0; i < qdev->num_large_buffers; i++) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07002890 lrg_buf_cb = &qdev->lrg_buf[i];
2891 if (lrg_buf_cb->skb) {
2892 dev_kfree_skb(lrg_buf_cb->skb);
2893 pci_unmap_single(qdev->pdev,
2894 pci_unmap_addr(lrg_buf_cb, mapaddr),
2895 pci_unmap_len(lrg_buf_cb, maplen),
2896 PCI_DMA_FROMDEVICE);
2897 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2898 } else {
2899 break;
2900 }
2901 }
2902}
2903
2904static void ql_init_large_buffers(struct ql3_adapter *qdev)
2905{
2906 int i;
2907 struct ql_rcv_buf_cb *lrg_buf_cb;
2908 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2909
Ron Mercer1357bfc2007-02-26 11:06:37 -08002910 for (i = 0; i < qdev->num_large_buffers; i++) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07002911 lrg_buf_cb = &qdev->lrg_buf[i];
2912 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2913 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2914 buf_addr_ele++;
2915 }
2916 qdev->lrg_buf_index = 0;
2917 qdev->lrg_buf_skb_check = 0;
2918}
2919
2920static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2921{
2922 int i;
2923 struct ql_rcv_buf_cb *lrg_buf_cb;
2924 struct sk_buff *skb;
Benjamin Li0f8ab892007-02-26 11:06:40 -08002925 dma_addr_t map;
2926 int err;
Ron Mercer5a4faa872006-07-25 00:40:21 -07002927
Ron Mercer1357bfc2007-02-26 11:06:37 -08002928 for (i = 0; i < qdev->num_large_buffers; i++) {
Benjamin Licd238fa2007-02-26 11:06:33 -08002929 skb = netdev_alloc_skb(qdev->ndev,
2930 qdev->lrg_buffer_len);
Ron Mercer5a4faa872006-07-25 00:40:21 -07002931 if (unlikely(!skb)) {
2932 /* Better luck next round */
2933 printk(KERN_ERR PFX
2934 "%s: large buff alloc failed, "
2935 "for %d bytes at index %d.\n",
2936 qdev->ndev->name,
2937 qdev->lrg_buffer_len * 2, i);
2938 ql_free_large_buffers(qdev);
2939 return -ENOMEM;
2940 } else {
2941
2942 lrg_buf_cb = &qdev->lrg_buf[i];
2943 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2944 lrg_buf_cb->index = i;
2945 lrg_buf_cb->skb = skb;
2946 /*
2947 * We save some space to copy the ethhdr from first
2948 * buffer
2949 */
2950 skb_reserve(skb, QL_HEADER_SPACE);
2951 map = pci_map_single(qdev->pdev,
2952 skb->data,
2953 qdev->lrg_buffer_len -
2954 QL_HEADER_SPACE,
2955 PCI_DMA_FROMDEVICE);
Benjamin Li0f8ab892007-02-26 11:06:40 -08002956
2957 err = pci_dma_mapping_error(map);
2958 if(err) {
2959 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2960 qdev->ndev->name, err);
2961 ql_free_large_buffers(qdev);
2962 return -ENOMEM;
2963 }
2964
Ron Mercer5a4faa872006-07-25 00:40:21 -07002965 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2966 pci_unmap_len_set(lrg_buf_cb, maplen,
2967 qdev->lrg_buffer_len -
2968 QL_HEADER_SPACE);
2969 lrg_buf_cb->buf_phy_addr_low =
2970 cpu_to_le32(LS_64BITS(map));
2971 lrg_buf_cb->buf_phy_addr_high =
2972 cpu_to_le32(MS_64BITS(map));
2973 }
2974 }
2975 return 0;
2976}
2977
Ron Mercerbd36b0a2007-01-03 16:26:08 -08002978static void ql_free_send_free_list(struct ql3_adapter *qdev)
2979{
2980 struct ql_tx_buf_cb *tx_cb;
2981 int i;
2982
2983 tx_cb = &qdev->tx_buf[0];
2984 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2985 if (tx_cb->oal) {
2986 kfree(tx_cb->oal);
2987 tx_cb->oal = NULL;
2988 }
2989 tx_cb++;
2990 }
2991}
2992
2993static int ql_create_send_free_list(struct ql3_adapter *qdev)
Ron Mercer5a4faa872006-07-25 00:40:21 -07002994{
2995 struct ql_tx_buf_cb *tx_cb;
2996 int i;
2997 struct ob_mac_iocb_req *req_q_curr =
2998 qdev->req_q_virt_addr;
2999
3000 /* Create free list of transmit buffers */
3001 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003002
Ron Mercer5a4faa872006-07-25 00:40:21 -07003003 tx_cb = &qdev->tx_buf[i];
3004 tx_cb->skb = NULL;
3005 tx_cb->queue_entry = req_q_curr;
3006 req_q_curr++;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003007 tx_cb->oal = kmalloc(512, GFP_KERNEL);
3008 if (tx_cb->oal == NULL)
3009 return -1;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003010 }
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003011 return 0;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003012}
3013
3014static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
3015{
Ron Mercer1357bfc2007-02-26 11:06:37 -08003016 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
3017 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003018 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
Ron Mercer1357bfc2007-02-26 11:06:37 -08003019 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07003020 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
Ron Mercer1357bfc2007-02-26 11:06:37 -08003021 /*
3022 * Bigger buffers, so less of them.
3023 */
3024 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003025 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
3026 } else {
3027 printk(KERN_ERR PFX
3028 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
3029 qdev->ndev->name);
3030 return -ENOMEM;
3031 }
Ron Mercer1357bfc2007-02-26 11:06:37 -08003032 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003033 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
3034 qdev->max_frame_size =
3035 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
3036
3037 /*
3038 * First allocate a page of shared memory and use it for shadow
3039 * locations of Network Request Queue Consumer Address Register and
3040 * Network Completion Queue Producer Index Register
3041 */
3042 qdev->shadow_reg_virt_addr =
3043 pci_alloc_consistent(qdev->pdev,
3044 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
3045
3046 if (qdev->shadow_reg_virt_addr != NULL) {
3047 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
3048 qdev->req_consumer_index_phy_addr_high =
3049 MS_64BITS(qdev->shadow_reg_phy_addr);
3050 qdev->req_consumer_index_phy_addr_low =
3051 LS_64BITS(qdev->shadow_reg_phy_addr);
3052
3053 qdev->prsp_producer_index =
3054 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
3055 qdev->rsp_producer_index_phy_addr_high =
3056 qdev->req_consumer_index_phy_addr_high;
3057 qdev->rsp_producer_index_phy_addr_low =
3058 qdev->req_consumer_index_phy_addr_low + 8;
3059 } else {
3060 printk(KERN_ERR PFX
3061 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3062 return -ENOMEM;
3063 }
3064
3065 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3066 printk(KERN_ERR PFX
3067 "%s: ql_alloc_net_req_rsp_queues failed.\n",
3068 qdev->ndev->name);
3069 goto err_req_rsp;
3070 }
3071
3072 if (ql_alloc_buffer_queues(qdev) != 0) {
3073 printk(KERN_ERR PFX
3074 "%s: ql_alloc_buffer_queues failed.\n",
3075 qdev->ndev->name);
3076 goto err_buffer_queues;
3077 }
3078
3079 if (ql_alloc_small_buffers(qdev) != 0) {
3080 printk(KERN_ERR PFX
3081 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3082 goto err_small_buffers;
3083 }
3084
3085 if (ql_alloc_large_buffers(qdev) != 0) {
3086 printk(KERN_ERR PFX
3087 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3088 goto err_small_buffers;
3089 }
3090
3091 /* Initialize the large buffer queue. */
3092 ql_init_large_buffers(qdev);
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003093 if (ql_create_send_free_list(qdev))
3094 goto err_free_list;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003095
3096 qdev->rsp_current = qdev->rsp_q_virt_addr;
3097
3098 return 0;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003099err_free_list:
3100 ql_free_send_free_list(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003101err_small_buffers:
3102 ql_free_buffer_queues(qdev);
3103err_buffer_queues:
3104 ql_free_net_req_rsp_queues(qdev);
3105err_req_rsp:
3106 pci_free_consistent(qdev->pdev,
3107 PAGE_SIZE,
3108 qdev->shadow_reg_virt_addr,
3109 qdev->shadow_reg_phy_addr);
3110
3111 return -ENOMEM;
3112}
3113
3114static void ql_free_mem_resources(struct ql3_adapter *qdev)
3115{
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003116 ql_free_send_free_list(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003117 ql_free_large_buffers(qdev);
3118 ql_free_small_buffers(qdev);
3119 ql_free_buffer_queues(qdev);
3120 ql_free_net_req_rsp_queues(qdev);
3121 if (qdev->shadow_reg_virt_addr != NULL) {
3122 pci_free_consistent(qdev->pdev,
3123 PAGE_SIZE,
3124 qdev->shadow_reg_virt_addr,
3125 qdev->shadow_reg_phy_addr);
3126 qdev->shadow_reg_virt_addr = NULL;
3127 }
3128}
3129
3130static int ql_init_misc_registers(struct ql3_adapter *qdev)
3131{
Al Viroee111d12006-09-25 02:53:53 +01003132 struct ql3xxx_local_ram_registers __iomem *local_ram =
3133 (void __iomem *)qdev->mem_map_registers;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003134
3135 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3136 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3137 2) << 4))
3138 return -1;
3139
3140 ql_write_page2_reg(qdev,
3141 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
3142
3143 ql_write_page2_reg(qdev,
3144 &local_ram->maxBufletCount,
3145 qdev->nvram_data.bufletCount);
3146
3147 ql_write_page2_reg(qdev,
3148 &local_ram->freeBufletThresholdLow,
3149 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3150 (qdev->nvram_data.tcpWindowThreshold0));
3151
3152 ql_write_page2_reg(qdev,
3153 &local_ram->freeBufletThresholdHigh,
3154 qdev->nvram_data.tcpWindowThreshold50);
3155
3156 ql_write_page2_reg(qdev,
3157 &local_ram->ipHashTableBase,
3158 (qdev->nvram_data.ipHashTableBaseHi << 16) |
3159 qdev->nvram_data.ipHashTableBaseLo);
3160 ql_write_page2_reg(qdev,
3161 &local_ram->ipHashTableCount,
3162 qdev->nvram_data.ipHashTableSize);
3163 ql_write_page2_reg(qdev,
3164 &local_ram->tcpHashTableBase,
3165 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3166 qdev->nvram_data.tcpHashTableBaseLo);
3167 ql_write_page2_reg(qdev,
3168 &local_ram->tcpHashTableCount,
3169 qdev->nvram_data.tcpHashTableSize);
3170 ql_write_page2_reg(qdev,
3171 &local_ram->ncbBase,
3172 (qdev->nvram_data.ncbTableBaseHi << 16) |
3173 qdev->nvram_data.ncbTableBaseLo);
3174 ql_write_page2_reg(qdev,
3175 &local_ram->maxNcbCount,
3176 qdev->nvram_data.ncbTableSize);
3177 ql_write_page2_reg(qdev,
3178 &local_ram->drbBase,
3179 (qdev->nvram_data.drbTableBaseHi << 16) |
3180 qdev->nvram_data.drbTableBaseLo);
3181 ql_write_page2_reg(qdev,
3182 &local_ram->maxDrbCount,
3183 qdev->nvram_data.drbTableSize);
3184 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3185 return 0;
3186}
3187
3188static int ql_adapter_initialize(struct ql3_adapter *qdev)
3189{
3190 u32 value;
3191 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3192 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
Al Viroee111d12006-09-25 02:53:53 +01003193 (void __iomem *)port_regs;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003194 u32 delay = 10;
3195 int status = 0;
3196
3197 if(ql_mii_setup(qdev))
3198 return -1;
3199
3200 /* Bring out PHY out of reset */
3201 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3202 (ISP_SERIAL_PORT_IF_WE |
3203 (ISP_SERIAL_PORT_IF_WE << 16)));
3204
3205 qdev->port_link_state = LS_DOWN;
3206 netif_carrier_off(qdev->ndev);
3207
3208 /* V2 chip fix for ARS-39168. */
3209 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3210 (ISP_SERIAL_PORT_IF_SDE |
3211 (ISP_SERIAL_PORT_IF_SDE << 16)));
3212
3213 /* Request Queue Registers */
3214 *((u32 *) (qdev->preq_consumer_index)) = 0;
3215 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3216 qdev->req_producer_index = 0;
3217
3218 ql_write_page1_reg(qdev,
3219 &hmem_regs->reqConsumerIndexAddrHigh,
3220 qdev->req_consumer_index_phy_addr_high);
3221 ql_write_page1_reg(qdev,
3222 &hmem_regs->reqConsumerIndexAddrLow,
3223 qdev->req_consumer_index_phy_addr_low);
3224
3225 ql_write_page1_reg(qdev,
3226 &hmem_regs->reqBaseAddrHigh,
3227 MS_64BITS(qdev->req_q_phy_addr));
3228 ql_write_page1_reg(qdev,
3229 &hmem_regs->reqBaseAddrLow,
3230 LS_64BITS(qdev->req_q_phy_addr));
3231 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3232
3233 /* Response Queue Registers */
3234 *((u16 *) (qdev->prsp_producer_index)) = 0;
3235 qdev->rsp_consumer_index = 0;
3236 qdev->rsp_current = qdev->rsp_q_virt_addr;
3237
3238 ql_write_page1_reg(qdev,
3239 &hmem_regs->rspProducerIndexAddrHigh,
3240 qdev->rsp_producer_index_phy_addr_high);
3241
3242 ql_write_page1_reg(qdev,
3243 &hmem_regs->rspProducerIndexAddrLow,
3244 qdev->rsp_producer_index_phy_addr_low);
3245
3246 ql_write_page1_reg(qdev,
3247 &hmem_regs->rspBaseAddrHigh,
3248 MS_64BITS(qdev->rsp_q_phy_addr));
3249
3250 ql_write_page1_reg(qdev,
3251 &hmem_regs->rspBaseAddrLow,
3252 LS_64BITS(qdev->rsp_q_phy_addr));
3253
3254 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3255
3256 /* Large Buffer Queue */
3257 ql_write_page1_reg(qdev,
3258 &hmem_regs->rxLargeQBaseAddrHigh,
3259 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3260
3261 ql_write_page1_reg(qdev,
3262 &hmem_regs->rxLargeQBaseAddrLow,
3263 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3264
Ron Mercer1357bfc2007-02-26 11:06:37 -08003265 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003266
3267 ql_write_page1_reg(qdev,
3268 &hmem_regs->rxLargeBufferLength,
3269 qdev->lrg_buffer_len);
3270
3271 /* Small Buffer Queue */
3272 ql_write_page1_reg(qdev,
3273 &hmem_regs->rxSmallQBaseAddrHigh,
3274 MS_64BITS(qdev->small_buf_q_phy_addr));
3275
3276 ql_write_page1_reg(qdev,
3277 &hmem_regs->rxSmallQBaseAddrLow,
3278 LS_64BITS(qdev->small_buf_q_phy_addr));
3279
3280 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3281 ql_write_page1_reg(qdev,
3282 &hmem_regs->rxSmallBufferLength,
3283 QL_SMALL_BUFFER_SIZE);
3284
3285 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3286 qdev->small_buf_release_cnt = 8;
Ron Mercer1357bfc2007-02-26 11:06:37 -08003287 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003288 qdev->lrg_buf_release_cnt = 8;
3289 qdev->lrg_buf_next_free =
3290 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3291 qdev->small_buf_index = 0;
3292 qdev->lrg_buf_index = 0;
3293 qdev->lrg_buf_free_count = 0;
3294 qdev->lrg_buf_free_head = NULL;
3295 qdev->lrg_buf_free_tail = NULL;
3296
3297 ql_write_common_reg(qdev,
Al Viroee111d12006-09-25 02:53:53 +01003298 &port_regs->CommonRegs.
Ron Mercer5a4faa872006-07-25 00:40:21 -07003299 rxSmallQProducerIndex,
3300 qdev->small_buf_q_producer_index);
3301 ql_write_common_reg(qdev,
Al Viroee111d12006-09-25 02:53:53 +01003302 &port_regs->CommonRegs.
Ron Mercer5a4faa872006-07-25 00:40:21 -07003303 rxLargeQProducerIndex,
3304 qdev->lrg_buf_q_producer_index);
3305
3306 /*
3307 * Find out if the chip has already been initialized. If it has, then
3308 * we skip some of the initialization.
3309 */
3310 clear_bit(QL_LINK_MASTER, &qdev->flags);
3311 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3312 if ((value & PORT_STATUS_IC) == 0) {
3313
3314 /* Chip has not been configured yet, so let it rip. */
3315 if(ql_init_misc_registers(qdev)) {
3316 status = -1;
3317 goto out;
3318 }
3319
Ron Mercer5a4faa872006-07-25 00:40:21 -07003320 value = qdev->nvram_data.tcpMaxWindowSize;
3321 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3322
3323 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3324
3325 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3326 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3327 * 2) << 13)) {
3328 status = -1;
3329 goto out;
3330 }
3331 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3332 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3333 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3334 16) | (INTERNAL_CHIP_SD |
3335 INTERNAL_CHIP_WE)));
3336 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3337 }
3338
Ron Mercerb3b15142007-03-26 13:43:00 -07003339 if (qdev->mac_index)
3340 ql_write_page0_reg(qdev,
3341 &port_regs->mac1MaxFrameLengthReg,
3342 qdev->max_frame_size);
3343 else
3344 ql_write_page0_reg(qdev,
3345 &port_regs->mac0MaxFrameLengthReg,
3346 qdev->max_frame_size);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003347
3348 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3349 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3350 2) << 7)) {
3351 status = -1;
3352 goto out;
3353 }
3354
Ron Mercer3efedf22007-03-26 12:43:52 -07003355 PHY_Setup(qdev);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003356 ql_init_scan_mode(qdev);
3357 ql_get_phy_owner(qdev);
3358
3359 /* Load the MAC Configuration */
3360
3361 /* Program lower 32 bits of the MAC address */
3362 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3363 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3364 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3365 ((qdev->ndev->dev_addr[2] << 24)
3366 | (qdev->ndev->dev_addr[3] << 16)
3367 | (qdev->ndev->dev_addr[4] << 8)
3368 | qdev->ndev->dev_addr[5]));
3369
3370 /* Program top 16 bits of the MAC address */
3371 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3372 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3373 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3374 ((qdev->ndev->dev_addr[0] << 8)
3375 | qdev->ndev->dev_addr[1]));
3376
3377 /* Enable Primary MAC */
3378 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3379 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3380 MAC_ADDR_INDIRECT_PTR_REG_PE));
3381
3382 /* Clear Primary and Secondary IP addresses */
3383 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3384 ((IP_ADDR_INDEX_REG_MASK << 16) |
3385 (qdev->mac_index << 2)));
3386 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3387
3388 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3389 ((IP_ADDR_INDEX_REG_MASK << 16) |
3390 ((qdev->mac_index << 2) + 1)));
3391 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3392
3393 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3394
3395 /* Indicate Configuration Complete */
3396 ql_write_page0_reg(qdev,
3397 &port_regs->portControl,
3398 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3399
3400 do {
3401 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3402 if (value & PORT_STATUS_IC)
3403 break;
3404 msleep(500);
3405 } while (--delay);
3406
3407 if (delay == 0) {
3408 printk(KERN_ERR PFX
3409 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3410 status = -1;
3411 goto out;
3412 }
3413
3414 /* Enable Ethernet Function */
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003415 if (qdev->device_id == QL3032_DEVICE_ID) {
3416 value =
3417 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
Ron Mercerb3b15142007-03-26 13:43:00 -07003418 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3419 QL3032_PORT_CONTROL_ET);
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003420 ql_write_page0_reg(qdev, &port_regs->functionControl,
3421 ((value << 16) | value));
3422 } else {
3423 value =
3424 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3425 PORT_CONTROL_HH);
3426 ql_write_page0_reg(qdev, &port_regs->portControl,
3427 ((value << 16) | value));
3428 }
3429
Ron Mercer5a4faa872006-07-25 00:40:21 -07003430
3431out:
3432 return status;
3433}
3434
3435/*
3436 * Caller holds hw_lock.
3437 */
3438static int ql_adapter_reset(struct ql3_adapter *qdev)
3439{
3440 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3441 int status = 0;
3442 u16 value;
3443 int max_wait_time;
3444
3445 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3446 clear_bit(QL_RESET_DONE, &qdev->flags);
3447
3448 /*
3449 * Issue soft reset to chip.
3450 */
3451 printk(KERN_DEBUG PFX
3452 "%s: Issue soft reset to chip.\n",
3453 qdev->ndev->name);
3454 ql_write_common_reg(qdev,
Al Viroee111d12006-09-25 02:53:53 +01003455 &port_regs->CommonRegs.ispControlStatus,
Ron Mercer5a4faa872006-07-25 00:40:21 -07003456 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3457
3458 /* Wait 3 seconds for reset to complete. */
3459 printk(KERN_DEBUG PFX
3460 "%s: Wait 10 milliseconds for reset to complete.\n",
3461 qdev->ndev->name);
3462
3463 /* Wait until the firmware tells us the Soft Reset is done */
3464 max_wait_time = 5;
3465 do {
3466 value =
3467 ql_read_common_reg(qdev,
3468 &port_regs->CommonRegs.ispControlStatus);
3469 if ((value & ISP_CONTROL_SR) == 0)
3470 break;
3471
3472 ssleep(1);
3473 } while ((--max_wait_time));
3474
3475 /*
3476 * Also, make sure that the Network Reset Interrupt bit has been
3477 * cleared after the soft reset has taken place.
3478 */
3479 value =
3480 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3481 if (value & ISP_CONTROL_RI) {
3482 printk(KERN_DEBUG PFX
3483 "ql_adapter_reset: clearing RI after reset.\n");
3484 ql_write_common_reg(qdev,
Al Viroee111d12006-09-25 02:53:53 +01003485 &port_regs->CommonRegs.
Ron Mercer5a4faa872006-07-25 00:40:21 -07003486 ispControlStatus,
3487 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3488 }
3489
3490 if (max_wait_time == 0) {
3491 /* Issue Force Soft Reset */
3492 ql_write_common_reg(qdev,
Al Viroee111d12006-09-25 02:53:53 +01003493 &port_regs->CommonRegs.
Ron Mercer5a4faa872006-07-25 00:40:21 -07003494 ispControlStatus,
3495 ((ISP_CONTROL_FSR << 16) |
3496 ISP_CONTROL_FSR));
3497 /*
3498 * Wait until the firmware tells us the Force Soft Reset is
3499 * done
3500 */
3501 max_wait_time = 5;
3502 do {
3503 value =
3504 ql_read_common_reg(qdev,
3505 &port_regs->CommonRegs.
3506 ispControlStatus);
3507 if ((value & ISP_CONTROL_FSR) == 0) {
3508 break;
3509 }
3510 ssleep(1);
3511 } while ((--max_wait_time));
3512 }
3513 if (max_wait_time == 0)
3514 status = 1;
3515
3516 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3517 set_bit(QL_RESET_DONE, &qdev->flags);
3518 return status;
3519}
3520
3521static void ql_set_mac_info(struct ql3_adapter *qdev)
3522{
3523 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3524 u32 value, port_status;
3525 u8 func_number;
3526
3527 /* Get the function number */
3528 value =
3529 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3530 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3531 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3532 switch (value & ISP_CONTROL_FN_MASK) {
3533 case ISP_CONTROL_FN0_NET:
3534 qdev->mac_index = 0;
3535 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3536 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3537 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3538 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3539 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3540 if (port_status & PORT_STATUS_SM0)
3541 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3542 else
3543 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3544 break;
3545
3546 case ISP_CONTROL_FN1_NET:
3547 qdev->mac_index = 1;
3548 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3549 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3550 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3551 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3552 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3553 if (port_status & PORT_STATUS_SM1)
3554 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3555 else
3556 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3557 break;
3558
3559 case ISP_CONTROL_FN0_SCSI:
3560 case ISP_CONTROL_FN1_SCSI:
3561 default:
3562 printk(KERN_DEBUG PFX
3563 "%s: Invalid function number, ispControlStatus = 0x%x\n",
3564 qdev->ndev->name,value);
3565 break;
3566 }
3567 qdev->numPorts = qdev->nvram_data.numPorts;
3568}
3569
3570static void ql_display_dev_info(struct net_device *ndev)
3571{
3572 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3573 struct pci_dev *pdev = qdev->pdev;
3574
3575 printk(KERN_INFO PFX
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003576 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3577 DRV_NAME, qdev->index, qdev->chip_rev_id,
3578 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3579 qdev->pci_slot);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003580 printk(KERN_INFO PFX
3581 "%s Interface.\n",
3582 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3583
3584 /*
3585 * Print PCI bus width/type.
3586 */
3587 printk(KERN_INFO PFX
3588 "Bus interface is %s %s.\n",
3589 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3590 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3591
3592 printk(KERN_INFO PFX
3593 "mem IO base address adjusted = 0x%p\n",
3594 qdev->mem_map_registers);
3595 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3596
3597 if (netif_msg_probe(qdev))
3598 printk(KERN_INFO PFX
3599 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3600 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
3601 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3602 ndev->dev_addr[5]);
3603}
3604
3605static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3606{
3607 struct net_device *ndev = qdev->ndev;
3608 int retval = 0;
3609
3610 netif_stop_queue(ndev);
3611 netif_carrier_off(ndev);
3612
3613 clear_bit(QL_ADAPTER_UP,&qdev->flags);
3614 clear_bit(QL_LINK_MASTER,&qdev->flags);
3615
3616 ql_disable_interrupts(qdev);
3617
3618 free_irq(qdev->pdev->irq, ndev);
3619
3620 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3621 printk(KERN_INFO PFX
3622 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3623 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3624 pci_disable_msi(qdev->pdev);
3625 }
3626
3627 del_timer_sync(&qdev->adapter_timer);
3628
3629 netif_poll_disable(ndev);
3630
3631 if (do_reset) {
3632 int soft_reset;
3633 unsigned long hw_flags;
3634
3635 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3636 if (ql_wait_for_drvr_lock(qdev)) {
3637 if ((soft_reset = ql_adapter_reset(qdev))) {
3638 printk(KERN_ERR PFX
3639 "%s: ql_adapter_reset(%d) FAILED!\n",
3640 ndev->name, qdev->index);
3641 }
3642 printk(KERN_ERR PFX
3643 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
3644 } else {
3645 printk(KERN_ERR PFX
3646 "%s: Could not acquire driver lock to do "
3647 "reset!\n", ndev->name);
3648 retval = -1;
3649 }
3650 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3651 }
3652 ql_free_mem_resources(qdev);
3653 return retval;
3654}
3655
3656static int ql_adapter_up(struct ql3_adapter *qdev)
3657{
3658 struct net_device *ndev = qdev->ndev;
3659 int err;
Thomas Gleixner38515e92007-02-14 00:33:16 -08003660 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003661 unsigned long hw_flags;
3662
3663 if (ql_alloc_mem_resources(qdev)) {
3664 printk(KERN_ERR PFX
3665 "%s Unable to allocate buffers.\n", ndev->name);
3666 return -ENOMEM;
3667 }
3668
3669 if (qdev->msi) {
3670 if (pci_enable_msi(qdev->pdev)) {
3671 printk(KERN_ERR PFX
3672 "%s: User requested MSI, but MSI failed to "
3673 "initialize. Continuing without MSI.\n",
3674 qdev->ndev->name);
3675 qdev->msi = 0;
3676 } else {
3677 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3678 set_bit(QL_MSI_ENABLED,&qdev->flags);
Thomas Gleixner38515e92007-02-14 00:33:16 -08003679 irq_flags &= ~IRQF_SHARED;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003680 }
3681 }
3682
3683 if ((err = request_irq(qdev->pdev->irq,
3684 ql3xxx_isr,
3685 irq_flags, ndev->name, ndev))) {
3686 printk(KERN_ERR PFX
3687 "%s: Failed to reserve interrupt %d already in use.\n",
3688 ndev->name, qdev->pdev->irq);
3689 goto err_irq;
3690 }
3691
3692 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3693
3694 if ((err = ql_wait_for_drvr_lock(qdev))) {
3695 if ((err = ql_adapter_initialize(qdev))) {
3696 printk(KERN_ERR PFX
3697 "%s: Unable to initialize adapter.\n",
3698 ndev->name);
3699 goto err_init;
3700 }
3701 printk(KERN_ERR PFX
3702 "%s: Releaseing driver lock.\n",ndev->name);
3703 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3704 } else {
3705 printk(KERN_ERR PFX
3706 "%s: Could not aquire driver lock.\n",
3707 ndev->name);
3708 goto err_lock;
3709 }
3710
3711 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3712
3713 set_bit(QL_ADAPTER_UP,&qdev->flags);
3714
3715 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3716
3717 netif_poll_enable(ndev);
3718 ql_enable_interrupts(qdev);
3719 return 0;
3720
3721err_init:
3722 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3723err_lock:
Benjamin Li04f10772007-02-26 11:06:35 -08003724 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003725 free_irq(qdev->pdev->irq, ndev);
3726err_irq:
3727 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3728 printk(KERN_INFO PFX
3729 "%s: calling pci_disable_msi().\n",
3730 qdev->ndev->name);
3731 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3732 pci_disable_msi(qdev->pdev);
3733 }
3734 return err;
3735}
3736
3737static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3738{
3739 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3740 printk(KERN_ERR PFX
3741 "%s: Driver up/down cycle failed, "
3742 "closing device\n",qdev->ndev->name);
3743 dev_close(qdev->ndev);
3744 return -1;
3745 }
3746 return 0;
3747}
3748
3749static int ql3xxx_close(struct net_device *ndev)
3750{
3751 struct ql3_adapter *qdev = netdev_priv(ndev);
3752
3753 /*
3754 * Wait for device to recover from a reset.
3755 * (Rarely happens, but possible.)
3756 */
3757 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3758 msleep(50);
3759
3760 ql_adapter_down(qdev,QL_DO_RESET);
3761 return 0;
3762}
3763
3764static int ql3xxx_open(struct net_device *ndev)
3765{
3766 struct ql3_adapter *qdev = netdev_priv(ndev);
3767 return (ql_adapter_up(qdev));
3768}
3769
3770static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3771{
3772 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3773 return &qdev->stats;
3774}
3775
Ron Mercer5a4faa872006-07-25 00:40:21 -07003776static void ql3xxx_set_multicast_list(struct net_device *ndev)
3777{
3778 /*
3779 * We are manually parsing the list in the net_device structure.
3780 */
3781 return;
3782}
3783
3784static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3785{
3786 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3787 struct ql3xxx_port_registers __iomem *port_regs =
3788 qdev->mem_map_registers;
3789 struct sockaddr *addr = p;
3790 unsigned long hw_flags;
3791
3792 if (netif_running(ndev))
3793 return -EBUSY;
3794
3795 if (!is_valid_ether_addr(addr->sa_data))
3796 return -EADDRNOTAVAIL;
3797
3798 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3799
3800 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3801 /* Program lower 32 bits of the MAC address */
3802 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3803 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3804 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3805 ((ndev->dev_addr[2] << 24) | (ndev->
3806 dev_addr[3] << 16) |
3807 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3808
3809 /* Program top 16 bits of the MAC address */
3810 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3811 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3812 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3813 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3814 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3815
3816 return 0;
3817}
3818
3819static void ql3xxx_tx_timeout(struct net_device *ndev)
3820{
3821 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3822
3823 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3824 /*
3825 * Stop the queues, we've got a problem.
3826 */
3827 netif_stop_queue(ndev);
3828
3829 /*
3830 * Wake up the worker to process this event.
3831 */
David Howellsc4028952006-11-22 14:57:56 +00003832 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003833}
3834
David Howellsc4028952006-11-22 14:57:56 +00003835static void ql_reset_work(struct work_struct *work)
Ron Mercer5a4faa872006-07-25 00:40:21 -07003836{
David Howellsc4028952006-11-22 14:57:56 +00003837 struct ql3_adapter *qdev =
3838 container_of(work, struct ql3_adapter, reset_work.work);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003839 struct net_device *ndev = qdev->ndev;
3840 u32 value;
3841 struct ql_tx_buf_cb *tx_cb;
3842 int max_wait_time, i;
3843 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3844 unsigned long hw_flags;
3845
3846 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3847 clear_bit(QL_LINK_MASTER,&qdev->flags);
3848
3849 /*
3850 * Loop through the active list and return the skb.
3851 */
3852 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003853 int j;
Ron Mercer5a4faa872006-07-25 00:40:21 -07003854 tx_cb = &qdev->tx_buf[i];
3855 if (tx_cb->skb) {
Ron Mercer5a4faa872006-07-25 00:40:21 -07003856 printk(KERN_DEBUG PFX
3857 "%s: Freeing lost SKB.\n",
3858 qdev->ndev->name);
3859 pci_unmap_single(qdev->pdev,
Ron Mercerbd36b0a2007-01-03 16:26:08 -08003860 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3861 pci_unmap_len(&tx_cb->map[0], maplen),
3862 PCI_DMA_TODEVICE);
3863 for(j=1;j<tx_cb->seg_count;j++) {
3864 pci_unmap_page(qdev->pdev,
3865 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3866 pci_unmap_len(&tx_cb->map[j],maplen),
3867 PCI_DMA_TODEVICE);
3868 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07003869 dev_kfree_skb(tx_cb->skb);
3870 tx_cb->skb = NULL;
3871 }
3872 }
3873
3874 printk(KERN_ERR PFX
3875 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3876 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3877 ql_write_common_reg(qdev,
3878 &port_regs->CommonRegs.
3879 ispControlStatus,
3880 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3881 /*
3882 * Wait the for Soft Reset to Complete.
3883 */
3884 max_wait_time = 10;
3885 do {
3886 value = ql_read_common_reg(qdev,
3887 &port_regs->CommonRegs.
3888
3889 ispControlStatus);
3890 if ((value & ISP_CONTROL_SR) == 0) {
3891 printk(KERN_DEBUG PFX
3892 "%s: reset completed.\n",
3893 qdev->ndev->name);
3894 break;
3895 }
3896
3897 if (value & ISP_CONTROL_RI) {
3898 printk(KERN_DEBUG PFX
3899 "%s: clearing NRI after reset.\n",
3900 qdev->ndev->name);
3901 ql_write_common_reg(qdev,
Al Viroee111d12006-09-25 02:53:53 +01003902 &port_regs->
Ron Mercer5a4faa872006-07-25 00:40:21 -07003903 CommonRegs.
3904 ispControlStatus,
3905 ((ISP_CONTROL_RI <<
3906 16) | ISP_CONTROL_RI));
3907 }
3908
3909 ssleep(1);
3910 } while (--max_wait_time);
3911 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3912
3913 if (value & ISP_CONTROL_SR) {
3914
3915 /*
3916 * Set the reset flags and clear the board again.
3917 * Nothing else to do...
3918 */
3919 printk(KERN_ERR PFX
3920 "%s: Timed out waiting for reset to "
3921 "complete.\n", ndev->name);
3922 printk(KERN_ERR PFX
3923 "%s: Do a reset.\n", ndev->name);
3924 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3925 clear_bit(QL_RESET_START,&qdev->flags);
3926 ql_cycle_adapter(qdev,QL_DO_RESET);
3927 return;
3928 }
3929
3930 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3931 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3932 clear_bit(QL_RESET_START,&qdev->flags);
3933 ql_cycle_adapter(qdev,QL_NO_RESET);
3934 }
3935}
3936
David Howellsc4028952006-11-22 14:57:56 +00003937static void ql_tx_timeout_work(struct work_struct *work)
Ron Mercer5a4faa872006-07-25 00:40:21 -07003938{
David Howellsc4028952006-11-22 14:57:56 +00003939 struct ql3_adapter *qdev =
3940 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3941
3942 ql_cycle_adapter(qdev, QL_DO_RESET);
Ron Mercer5a4faa872006-07-25 00:40:21 -07003943}
3944
3945static void ql_get_board_info(struct ql3_adapter *qdev)
3946{
3947 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3948 u32 value;
3949
3950 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3951
3952 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3953 if (value & PORT_STATUS_64)
3954 qdev->pci_width = 64;
3955 else
3956 qdev->pci_width = 32;
3957 if (value & PORT_STATUS_X)
3958 qdev->pci_x = 1;
3959 else
3960 qdev->pci_x = 0;
3961 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3962}
3963
3964static void ql3xxx_timer(unsigned long ptr)
3965{
3966 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3967
3968 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3969 printk(KERN_DEBUG PFX
3970 "%s: Reset in progress.\n",
3971 qdev->ndev->name);
3972 goto end;
3973 }
3974
3975 ql_link_state_machine(qdev);
3976
3977 /* Restart timer on 2 second interval. */
3978end:
3979 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3980}
3981
3982static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3983 const struct pci_device_id *pci_entry)
3984{
3985 struct net_device *ndev = NULL;
3986 struct ql3_adapter *qdev = NULL;
3987 static int cards_found = 0;
3988 int pci_using_dac, err;
3989
3990 err = pci_enable_device(pdev);
3991 if (err) {
3992 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3993 pci_name(pdev));
3994 goto err_out;
3995 }
3996
3997 err = pci_request_regions(pdev, DRV_NAME);
3998 if (err) {
3999 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
4000 pci_name(pdev));
4001 goto err_out_disable_pdev;
4002 }
4003
4004 pci_set_master(pdev);
4005
4006 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4007 pci_using_dac = 1;
4008 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4009 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
4010 pci_using_dac = 0;
4011 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
4012 }
4013
4014 if (err) {
4015 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
4016 pci_name(pdev));
4017 goto err_out_free_regions;
4018 }
4019
4020 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
Benjamin Li546faf02007-02-26 11:06:31 -08004021 if (!ndev) {
4022 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
4023 pci_name(pdev));
4024 err = -ENOMEM;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004025 goto err_out_free_regions;
Benjamin Li546faf02007-02-26 11:06:31 -08004026 }
Ron Mercer5a4faa872006-07-25 00:40:21 -07004027
4028 SET_MODULE_OWNER(ndev);
4029 SET_NETDEV_DEV(ndev, &pdev->dev);
4030
Ron Mercer5a4faa872006-07-25 00:40:21 -07004031 pci_set_drvdata(pdev, ndev);
4032
4033 qdev = netdev_priv(ndev);
4034 qdev->index = cards_found;
4035 qdev->ndev = ndev;
4036 qdev->pdev = pdev;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08004037 qdev->device_id = pci_entry->device;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004038 qdev->port_link_state = LS_DOWN;
4039 if (msi)
4040 qdev->msi = 1;
4041
4042 qdev->msg_enable = netif_msg_init(debug, default_msg);
4043
Ron Mercerbd36b0a2007-01-03 16:26:08 -08004044 if (pci_using_dac)
4045 ndev->features |= NETIF_F_HIGHDMA;
4046 if (qdev->device_id == QL3032_DEVICE_ID)
Stephen Hemmingere68a8c12007-05-30 14:23:17 -07004047 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
Ron Mercerbd36b0a2007-01-03 16:26:08 -08004048
Ron Mercer5a4faa872006-07-25 00:40:21 -07004049 qdev->mem_map_registers =
4050 ioremap_nocache(pci_resource_start(pdev, 1),
4051 pci_resource_len(qdev->pdev, 1));
4052 if (!qdev->mem_map_registers) {
4053 printk(KERN_ERR PFX "%s: cannot map device registers\n",
4054 pci_name(pdev));
Benjamin Li546faf02007-02-26 11:06:31 -08004055 err = -EIO;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004056 goto err_out_free_ndev;
4057 }
4058
4059 spin_lock_init(&qdev->adapter_lock);
4060 spin_lock_init(&qdev->hw_lock);
4061
4062 /* Set driver entry points */
4063 ndev->open = ql3xxx_open;
4064 ndev->hard_start_xmit = ql3xxx_send;
4065 ndev->stop = ql3xxx_close;
4066 ndev->get_stats = ql3xxx_get_stats;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004067 ndev->set_multicast_list = ql3xxx_set_multicast_list;
4068 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
4069 ndev->set_mac_address = ql3xxx_set_mac_address;
4070 ndev->tx_timeout = ql3xxx_tx_timeout;
4071 ndev->watchdog_timeo = 5 * HZ;
4072
4073 ndev->poll = &ql_poll;
4074 ndev->weight = 64;
4075
4076 ndev->irq = pdev->irq;
4077
4078 /* make sure the EEPROM is good */
4079 if (ql_get_nvram_params(qdev)) {
4080 printk(KERN_ALERT PFX
4081 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
4082 qdev->index);
Benjamin Li546faf02007-02-26 11:06:31 -08004083 err = -EIO;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004084 goto err_out_iounmap;
4085 }
4086
4087 ql_set_mac_info(qdev);
4088
4089 /* Validate and set parameters */
4090 if (qdev->mac_index) {
Ron Mercercb8bac12007-02-26 11:06:36 -08004091 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004092 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
4093 ETH_ALEN);
4094 } else {
Ron Mercercb8bac12007-02-26 11:06:36 -08004095 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
Ron Mercer5a4faa872006-07-25 00:40:21 -07004096 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
4097 ETH_ALEN);
4098 }
4099 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4100
4101 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4102
4103 /* Turn off support for multicasting */
4104 ndev->flags &= ~IFF_MULTICAST;
4105
4106 /* Record PCI bus information. */
4107 ql_get_board_info(qdev);
4108
4109 /*
4110 * Set the Maximum Memory Read Byte Count value. We do this to handle
4111 * jumbo frames.
4112 */
4113 if (qdev->pci_x) {
4114 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4115 }
4116
4117 err = register_netdev(ndev);
4118 if (err) {
4119 printk(KERN_ERR PFX "%s: cannot register net device\n",
4120 pci_name(pdev));
4121 goto err_out_iounmap;
4122 }
4123
4124 /* we're going to reset, so assume we have no link for now */
4125
4126 netif_carrier_off(ndev);
4127 netif_stop_queue(ndev);
4128
4129 qdev->workqueue = create_singlethread_workqueue(ndev->name);
David Howellsc4028952006-11-22 14:57:56 +00004130 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
4131 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
Ron Mercer5a4faa872006-07-25 00:40:21 -07004132
4133 init_timer(&qdev->adapter_timer);
4134 qdev->adapter_timer.function = ql3xxx_timer;
4135 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4136 qdev->adapter_timer.data = (unsigned long)qdev;
4137
4138 if(!cards_found) {
4139 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
4140 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
4141 DRV_NAME, DRV_VERSION);
4142 }
4143 ql_display_dev_info(ndev);
4144
4145 cards_found++;
4146 return 0;
4147
4148err_out_iounmap:
4149 iounmap(qdev->mem_map_registers);
4150err_out_free_ndev:
4151 free_netdev(ndev);
4152err_out_free_regions:
4153 pci_release_regions(pdev);
4154err_out_disable_pdev:
4155 pci_disable_device(pdev);
4156 pci_set_drvdata(pdev, NULL);
4157err_out:
4158 return err;
4159}
4160
4161static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4162{
4163 struct net_device *ndev = pci_get_drvdata(pdev);
4164 struct ql3_adapter *qdev = netdev_priv(ndev);
4165
4166 unregister_netdev(ndev);
4167 qdev = netdev_priv(ndev);
4168
4169 ql_disable_interrupts(qdev);
4170
4171 if (qdev->workqueue) {
4172 cancel_delayed_work(&qdev->reset_work);
4173 cancel_delayed_work(&qdev->tx_timeout_work);
4174 destroy_workqueue(qdev->workqueue);
4175 qdev->workqueue = NULL;
4176 }
4177
Al Viro855fc732006-09-25 02:54:46 +01004178 iounmap(qdev->mem_map_registers);
Ron Mercer5a4faa872006-07-25 00:40:21 -07004179 pci_release_regions(pdev);
4180 pci_set_drvdata(pdev, NULL);
4181 free_netdev(ndev);
4182}
4183
4184static struct pci_driver ql3xxx_driver = {
4185
4186 .name = DRV_NAME,
4187 .id_table = ql3xxx_pci_tbl,
4188 .probe = ql3xxx_probe,
4189 .remove = __devexit_p(ql3xxx_remove),
4190};
4191
4192static int __init ql3xxx_init_module(void)
4193{
4194 return pci_register_driver(&ql3xxx_driver);
4195}
4196
4197static void __exit ql3xxx_exit(void)
4198{
4199 pci_unregister_driver(&ql3xxx_driver);
4200}
4201
4202module_init(ql3xxx_init_module);
4203module_exit(ql3xxx_exit);