blob: afd5e527032b778a23ff2394b730bfffead4c42d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 DAVICOM Web-Site: www.davicom.com.tw
17
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
25
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
31
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
36
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
40
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
46
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
51
52 Alan Cox <alan@redhat.com>
Jeff Garzikf3b197a2006-05-26 21:39:03 -040053 Added new PCI identifiers provided by Clear Zhang at ALi
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 for their 1563 ethernet device.
55
56 TODO
57
58 Implement pci_driver::suspend() and pci_driver::resume()
59 power management methods.
60
61 Check on 64 bit boxes.
62 Check and fix on big endian boxes.
63
64 Test and make sure PCI latency is now correct for all cases.
65*/
66
67#define DRV_NAME "dmfe"
68#define DRV_VERSION "1.36.4"
69#define DRV_RELDATE "2002-01-17"
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/timer.h>
75#include <linux/ptrace.h>
76#include <linux/errno.h>
77#include <linux/ioport.h>
78#include <linux/slab.h>
79#include <linux/interrupt.h>
80#include <linux/pci.h>
Tobias Klausercb199d42005-05-12 22:20:19 -040081#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070082#include <linux/init.h>
83#include <linux/netdevice.h>
84#include <linux/etherdevice.h>
85#include <linux/ethtool.h>
86#include <linux/skbuff.h>
87#include <linux/delay.h>
88#include <linux/spinlock.h>
89#include <linux/crc32.h>
90#include <linux/bitops.h>
91
92#include <asm/processor.h>
93#include <asm/io.h>
94#include <asm/dma.h>
95#include <asm/uaccess.h>
96#include <asm/irq.h>
97
98
99/* Board/System/Debug information/definition ---------------- */
100#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
101#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
102#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
103#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
104
105#define DM9102_IO_SIZE 0x80
106#define DM9102A_IO_SIZE 0x100
107#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
108#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
109#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
110#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
111#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
112#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
113#define TX_BUF_ALLOC 0x600
114#define RX_ALLOC_SIZE 0x620
115#define DM910X_RESET 1
116#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
117#define CR6_DEFAULT 0x00080000 /* HD */
118#define CR7_DEFAULT 0x180c1
119#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
120#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
121#define MAX_PACKET_SIZE 1514
122#define DMFE_MAX_MULTICAST 14
123#define RX_COPY_SIZE 100
124#define MAX_CHECK_PACKET 0x8000
125#define DM9801_NOISE_FLOOR 8
126#define DM9802_NOISE_FLOOR 5
127
128#define DMFE_10MHF 0
129#define DMFE_100MHF 1
130#define DMFE_10MFD 4
131#define DMFE_100MFD 5
132#define DMFE_AUTO 8
133#define DMFE_1M_HPNA 0x10
134
135#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
136#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
137#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
138#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
139#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
140#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
141
142#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
143#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
144#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
145
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800146#define DMFE_DBUG(dbug_now, msg, value) \
147 do { \
148 if (dmfe_debug || (dbug_now)) \
149 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
150 (msg), (long) (value)); \
151 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800153#define SHOW_MEDIA_TYPE(mode) \
154 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
155 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157
158/* CR9 definition: SROM/MII */
159#define CR9_SROM_READ 0x4800
160#define CR9_SRCS 0x1
161#define CR9_SRCLK 0x2
162#define CR9_CRDOUT 0x8
163#define SROM_DATA_0 0x0
164#define SROM_DATA_1 0x4
165#define PHY_DATA_1 0x20000
166#define PHY_DATA_0 0x00000
167#define MDCLKH 0x10000
168
169#define PHY_POWER_DOWN 0x800
170
171#define SROM_V41_CODE 0x14
172
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800173#define SROM_CLK_WRITE(data, ioaddr) \
174 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
175 udelay(5); \
176 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
177 udelay(5); \
178 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
179 udelay(5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800181#define __CHK_IO_SIZE(pci_id, dev_rev) \
182 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x02000030) ) ? \
183 DM9102A_IO_SIZE: DM9102_IO_SIZE)
184
185#define CHK_IO_SIZE(pci_dev, dev_rev) \
186 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, dev_rev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188/* Sten Check */
189#define DEVICE net_device
190
191/* Structure/enum declaration ------------------------------- */
192struct tx_desc {
193 u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
194 char *tx_buf_ptr; /* Data for us */
195 struct tx_desc *next_tx_desc;
196} __attribute__(( aligned(32) ));
197
198struct rx_desc {
199 u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
200 struct sk_buff *rx_skb_ptr; /* Data for us */
201 struct rx_desc *next_rx_desc;
202} __attribute__(( aligned(32) ));
203
204struct dmfe_board_info {
205 u32 chip_id; /* Chip vendor/Device ID */
206 u32 chip_revision; /* Chip revision */
Andrew Mortonead9bff2007-03-06 02:41:49 -0800207 struct DEVICE *next_dev; /* next device */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 struct pci_dev *pdev; /* PCI device */
209 spinlock_t lock;
210
211 long ioaddr; /* I/O base address */
212 u32 cr0_data;
213 u32 cr5_data;
214 u32 cr6_data;
215 u32 cr7_data;
216 u32 cr15_data;
217
218 /* pointer for memory physical address */
219 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
220 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
221 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
222 dma_addr_t first_tx_desc_dma;
223 dma_addr_t first_rx_desc_dma;
224
225 /* descriptor pointer */
226 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
227 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
228 unsigned char *desc_pool_ptr; /* descriptor pool memory */
229 struct tx_desc *first_tx_desc;
230 struct tx_desc *tx_insert_ptr;
231 struct tx_desc *tx_remove_ptr;
232 struct rx_desc *first_rx_desc;
233 struct rx_desc *rx_insert_ptr;
234 struct rx_desc *rx_ready_ptr; /* packet come pointer */
235 unsigned long tx_packet_cnt; /* transmitted packet count */
236 unsigned long tx_queue_cnt; /* wait to send packet count */
237 unsigned long rx_avail_cnt; /* available rx descriptor count */
238 unsigned long interval_rx_cnt; /* rx packet count a callback time */
239
240 u16 HPNA_command; /* For HPNA register 16 */
241 u16 HPNA_timer; /* For HPNA remote device check */
242 u16 dbug_cnt;
243 u16 NIC_capability; /* NIC media capability */
244 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
245
246 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
247 u8 chip_type; /* Keep DM9102A chip type */
248 u8 media_mode; /* user specify media mode */
249 u8 op_mode; /* real work media mode */
250 u8 phy_addr;
251 u8 link_failed; /* Ever link failed */
252 u8 wait_reset; /* Hardware failed, need to reset */
253 u8 dm910x_chk_mode; /* Operating mode check */
254 u8 first_in_callback; /* Flag to record state */
255 struct timer_list timer;
256
257 /* System defined statistic counter */
258 struct net_device_stats stats;
259
260 /* Driver defined statistic counter */
261 unsigned long tx_fifo_underrun;
262 unsigned long tx_loss_carrier;
263 unsigned long tx_no_carrier;
264 unsigned long tx_late_collision;
265 unsigned long tx_excessive_collision;
266 unsigned long tx_jabber_timeout;
267 unsigned long reset_count;
268 unsigned long reset_cr8;
269 unsigned long reset_fatal;
270 unsigned long reset_TXtimeout;
271
272 /* NIC SROM data */
273 unsigned char srom[128];
274};
275
276enum dmfe_offsets {
277 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
278 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
279 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
280 DCR15 = 0x78
281};
282
283enum dmfe_CR6_bits {
284 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
285 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
286 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
287};
288
289/* Global variable declaration ----------------------------- */
290static int __devinitdata printed_version;
291static char version[] __devinitdata =
292 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
293 DRV_VERSION " (" DRV_RELDATE ")\n";
294
295static int dmfe_debug;
296static unsigned char dmfe_media_mode = DMFE_AUTO;
297static u32 dmfe_cr6_user_set;
298
299/* For module input parameter */
300static int debug;
301static u32 cr6set;
302static unsigned char mode = 8;
303static u8 chkmode = 1;
304static u8 HPNA_mode; /* Default: Low Power/High Speed */
305static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
306static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
307static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
308static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
309 4: TX pause packet */
310
311
312/* function declaration ------------------------------------- */
313static int dmfe_open(struct DEVICE *);
314static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
315static int dmfe_stop(struct DEVICE *);
316static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
317static void dmfe_set_filter_mode(struct DEVICE *);
Jeff Garzik7282d492006-09-13 14:30:00 -0400318static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319static u16 read_srom_word(long ,int);
David Howells7d12e782006-10-05 14:55:46 +0100320static irqreturn_t dmfe_interrupt(int , void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321#ifdef CONFIG_NET_POLL_CONTROLLER
322static void poll_dmfe (struct net_device *dev);
323#endif
324static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
325static void allocate_rx_buffer(struct dmfe_board_info *);
326static void update_cr6(u32, unsigned long);
327static void send_filter_frame(struct DEVICE * ,int);
328static void dm9132_id_table(struct DEVICE * ,int);
329static u16 phy_read(unsigned long, u8, u8, u32);
330static void phy_write(unsigned long, u8, u8, u16, u32);
331static void phy_write_1bit(unsigned long, u32);
332static u16 phy_read_1bit(unsigned long);
333static u8 dmfe_sense_speed(struct dmfe_board_info *);
334static void dmfe_process_mode(struct dmfe_board_info *);
335static void dmfe_timer(unsigned long);
336static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
337static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
338static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
339static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
340static void dmfe_dynamic_reset(struct DEVICE *);
341static void dmfe_free_rxbuffer(struct dmfe_board_info *);
342static void dmfe_init_dm910x(struct DEVICE *);
343static void dmfe_parse_srom(struct dmfe_board_info *);
344static void dmfe_program_DM9801(struct dmfe_board_info *, int);
345static void dmfe_program_DM9802(struct dmfe_board_info *);
346static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
347static void dmfe_set_phyxcer(struct dmfe_board_info *);
348
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800349/* DM910X network board routine ---------------------------- */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351/*
352 * Search DM910X board ,allocate space and register it
353 */
354
355static int __devinit dmfe_init_one (struct pci_dev *pdev,
356 const struct pci_device_id *ent)
357{
358 struct dmfe_board_info *db; /* board information structure */
359 struct net_device *dev;
360 u32 dev_rev, pci_pmr;
361 int i, err;
362
363 DMFE_DBUG(0, "dmfe_init_one()", 0);
364
365 if (!printed_version++)
366 printk(version);
367
368 /* Init network device */
369 dev = alloc_etherdev(sizeof(*db));
370 if (dev == NULL)
371 return -ENOMEM;
372 SET_MODULE_OWNER(dev);
373 SET_NETDEV_DEV(dev, &pdev->dev);
374
Tobias Klausercb199d42005-05-12 22:20:19 -0400375 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800376 printk(KERN_WARNING DRV_NAME
377 ": 32-bit PCI DMA not available.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 err = -ENODEV;
379 goto err_out_free;
380 }
381
382 /* Enable Master/IO access, Disable memory access */
383 err = pci_enable_device(pdev);
384 if (err)
385 goto err_out_free;
386
387 if (!pci_resource_start(pdev, 0)) {
388 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
389 err = -ENODEV;
390 goto err_out_disable;
391 }
392
393 /* Read Chip revision */
394 pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
395
396 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev, dev_rev)) ) {
397 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
398 err = -ENODEV;
399 goto err_out_disable;
400 }
401
402#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
403
404 /* Set Latency Timer 80h */
405 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
406 Need a PCI quirk.. */
407
408 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
409#endif
410
411 if (pci_request_regions(pdev, DRV_NAME)) {
412 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
413 err = -ENODEV;
414 goto err_out_disable;
415 }
416
417 /* Init system & device */
418 db = netdev_priv(dev);
419
420 /* Allocate Tx/Rx descriptor memory */
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
423
424 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
425 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
428 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
429 db->buf_pool_start = db->buf_pool_ptr;
430 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
431
432 db->chip_id = ent->driver_data;
433 db->ioaddr = pci_resource_start(pdev, 0);
434 db->chip_revision = dev_rev;
435
436 db->pdev = pdev;
437
438 dev->base_addr = db->ioaddr;
439 dev->irq = pdev->irq;
440 pci_set_drvdata(pdev, dev);
441 dev->open = &dmfe_open;
442 dev->hard_start_xmit = &dmfe_start_xmit;
443 dev->stop = &dmfe_stop;
444 dev->get_stats = &dmfe_get_stats;
445 dev->set_multicast_list = &dmfe_set_filter_mode;
446#ifdef CONFIG_NET_POLL_CONTROLLER
447 dev->poll_controller = &poll_dmfe;
448#endif
449 dev->ethtool_ops = &netdev_ethtool_ops;
450 spin_lock_init(&db->lock);
451
452 pci_read_config_dword(pdev, 0x50, &pci_pmr);
453 pci_pmr &= 0x70000;
454 if ( (pci_pmr == 0x10000) && (dev_rev == 0x02000031) )
455 db->chip_type = 1; /* DM9102A E3 */
456 else
457 db->chip_type = 0;
458
459 /* read 64 word srom data */
460 for (i = 0; i < 64; i++)
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800461 ((u16 *) db->srom)[i] =
462 cpu_to_le16(read_srom_word(db->ioaddr, i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 /* Set Node address */
465 for (i = 0; i < 6; i++)
466 dev->dev_addr[i] = db->srom[20 + i];
467
468 err = register_netdev (dev);
469 if (err)
470 goto err_out_res;
471
472 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s,",
473 dev->name,
474 ent->driver_data >> 16,
475 pci_name(pdev));
476 for (i = 0; i < 6; i++)
477 printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
478 printk(", irq %d.\n", dev->irq);
479
480 pci_set_master(pdev);
481
482 return 0;
483
484err_out_res:
485 pci_release_regions(pdev);
486err_out_disable:
487 pci_disable_device(pdev);
488err_out_free:
489 pci_set_drvdata(pdev, NULL);
490 free_netdev(dev);
491
492 return err;
493}
494
495
496static void __devexit dmfe_remove_one (struct pci_dev *pdev)
497{
498 struct net_device *dev = pci_get_drvdata(pdev);
499 struct dmfe_board_info *db = netdev_priv(dev);
500
501 DMFE_DBUG(0, "dmfe_remove_one()", 0);
502
503 if (dev) {
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800504
505 unregister_netdev(dev);
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
508 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
509 db->desc_pool_dma_ptr);
510 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
511 db->buf_pool_ptr, db->buf_pool_dma_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 pci_release_regions(pdev);
513 free_netdev(dev); /* free board information */
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 pci_set_drvdata(pdev, NULL);
516 }
517
518 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
519}
520
521
522/*
523 * Open the interface.
524 * The interface is opened whenever "ifconfig" actives it.
525 */
526
527static int dmfe_open(struct DEVICE *dev)
528{
529 int ret;
530 struct dmfe_board_info *db = netdev_priv(dev);
531
532 DMFE_DBUG(0, "dmfe_open", 0);
533
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800534 ret = request_irq(dev->irq, &dmfe_interrupt,
535 IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 if (ret)
537 return ret;
538
539 /* system variable init */
540 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
541 db->tx_packet_cnt = 0;
542 db->tx_queue_cnt = 0;
543 db->rx_avail_cnt = 0;
544 db->link_failed = 1;
545 db->wait_reset = 0;
546
547 db->first_in_callback = 0;
548 db->NIC_capability = 0xf; /* All capability*/
549 db->PHY_reg4 = 0x1e0;
550
551 /* CR6 operation mode decision */
552 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
553 (db->chip_revision >= 0x02000030) ) {
554 db->cr6_data |= DMFE_TXTH_256;
555 db->cr0_data = CR0_DEFAULT;
556 db->dm910x_chk_mode=4; /* Enter the normal mode */
557 } else {
558 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
559 db->cr0_data = 0;
560 db->dm910x_chk_mode = 1; /* Enter the check mode */
561 }
562
563 /* Initilize DM910X board */
564 dmfe_init_dm910x(dev);
565
566 /* Active System Interface */
567 netif_wake_queue(dev);
568
569 /* set and active a timer process */
570 init_timer(&db->timer);
571 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
572 db->timer.data = (unsigned long)dev;
573 db->timer.function = &dmfe_timer;
574 add_timer(&db->timer);
575
576 return 0;
577}
578
579
580/* Initilize DM910X board
581 * Reset DM910X board
582 * Initilize TX/Rx descriptor chain structure
583 * Send the set-up frame
584 * Enable Tx/Rx machine
585 */
586
587static void dmfe_init_dm910x(struct DEVICE *dev)
588{
589 struct dmfe_board_info *db = netdev_priv(dev);
590 unsigned long ioaddr = db->ioaddr;
591
592 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
593
594 /* Reset DM910x MAC controller */
595 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
596 udelay(100);
597 outl(db->cr0_data, ioaddr + DCR0);
598 udelay(5);
599
600 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
601 db->phy_addr = 1;
602
603 /* Parser SROM and media mode */
604 dmfe_parse_srom(db);
605 db->media_mode = dmfe_media_mode;
606
607 /* RESET Phyxcer Chip by GPR port bit 7 */
608 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
609 if (db->chip_id == PCI_DM9009_ID) {
610 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
611 mdelay(300); /* Delay 300 ms */
612 }
613 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
614
615 /* Process Phyxcer Media Mode */
616 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
617 dmfe_set_phyxcer(db);
618
619 /* Media Mode Process */
620 if ( !(db->media_mode & DMFE_AUTO) )
621 db->op_mode = db->media_mode; /* Force Mode */
622
623 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
624 dmfe_descriptor_init(db, ioaddr);
625
626 /* Init CR6 to program DM910x operation */
627 update_cr6(db->cr6_data, ioaddr);
628
629 /* Send setup frame */
630 if (db->chip_id == PCI_DM9132_ID)
631 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
632 else
633 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
634
635 /* Init CR7, interrupt active bit */
636 db->cr7_data = CR7_DEFAULT;
637 outl(db->cr7_data, ioaddr + DCR7);
638
639 /* Init CR15, Tx jabber and Rx watchdog timer */
640 outl(db->cr15_data, ioaddr + DCR15);
641
642 /* Enable DM910X Tx/Rx function */
643 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
644 update_cr6(db->cr6_data, ioaddr);
645}
646
647
648/*
649 * Hardware start transmission.
650 * Send a packet to media from the upper layer.
651 */
652
653static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
654{
655 struct dmfe_board_info *db = netdev_priv(dev);
656 struct tx_desc *txptr;
657 unsigned long flags;
658
659 DMFE_DBUG(0, "dmfe_start_xmit", 0);
660
661 /* Resource flag check */
662 netif_stop_queue(dev);
663
664 /* Too large packet check */
665 if (skb->len > MAX_PACKET_SIZE) {
666 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
667 dev_kfree_skb(skb);
668 return 0;
669 }
670
671 spin_lock_irqsave(&db->lock, flags);
672
673 /* No Tx resource check, it never happen nromally */
674 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
675 spin_unlock_irqrestore(&db->lock, flags);
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800676 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
677 db->tx_queue_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return 1;
679 }
680
681 /* Disable NIC interrupt */
682 outl(0, dev->base_addr + DCR7);
683
684 /* transmit this packet */
685 txptr = db->tx_insert_ptr;
686 memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
687 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
688
689 /* Point to next transmit free descriptor */
690 db->tx_insert_ptr = txptr->next_tx_desc;
691
692 /* Transmit Packet Process */
693 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
694 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
695 db->tx_packet_cnt++; /* Ready to send */
696 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
697 dev->trans_start = jiffies; /* saved time stamp */
698 } else {
699 db->tx_queue_cnt++; /* queue TX packet */
700 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
701 }
702
703 /* Tx resource check */
704 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
705 netif_wake_queue(dev);
706
707 /* Restore CR7 to enable interrupt */
708 spin_unlock_irqrestore(&db->lock, flags);
709 outl(db->cr7_data, dev->base_addr + DCR7);
710
711 /* free this SKB */
712 dev_kfree_skb(skb);
713
714 return 0;
715}
716
717
718/*
719 * Stop the interface.
720 * The interface is stopped when it is brought.
721 */
722
723static int dmfe_stop(struct DEVICE *dev)
724{
725 struct dmfe_board_info *db = netdev_priv(dev);
726 unsigned long ioaddr = dev->base_addr;
727
728 DMFE_DBUG(0, "dmfe_stop", 0);
729
730 /* disable system */
731 netif_stop_queue(dev);
732
733 /* deleted timer */
734 del_timer_sync(&db->timer);
735
736 /* Reset & stop DM910X board */
737 outl(DM910X_RESET, ioaddr + DCR0);
738 udelay(5);
739 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
740
741 /* free interrupt */
742 free_irq(dev->irq, dev);
743
744 /* free allocated rx buffer */
745 dmfe_free_rxbuffer(db);
746
747#if 0
748 /* show statistic counter */
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800749 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
750 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 db->tx_fifo_underrun, db->tx_excessive_collision,
752 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
753 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
754 db->reset_fatal, db->reset_TXtimeout);
755#endif
756
757 return 0;
758}
759
760
761/*
762 * DM9102 insterrupt handler
763 * receive the packet to upper layer, free the transmitted packet
764 */
765
David Howells7d12e782006-10-05 14:55:46 +0100766static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767{
768 struct DEVICE *dev = dev_id;
769 struct dmfe_board_info *db = netdev_priv(dev);
770 unsigned long ioaddr = dev->base_addr;
771 unsigned long flags;
772
773 DMFE_DBUG(0, "dmfe_interrupt()", 0);
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 spin_lock_irqsave(&db->lock, flags);
776
777 /* Got DM910X status */
778 db->cr5_data = inl(ioaddr + DCR5);
779 outl(db->cr5_data, ioaddr + DCR5);
780 if ( !(db->cr5_data & 0xc1) ) {
781 spin_unlock_irqrestore(&db->lock, flags);
782 return IRQ_HANDLED;
783 }
784
785 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
786 outl(0, ioaddr + DCR7);
787
788 /* Check system status */
789 if (db->cr5_data & 0x2000) {
790 /* system bus error happen */
791 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
792 db->reset_fatal++;
793 db->wait_reset = 1; /* Need to RESET */
794 spin_unlock_irqrestore(&db->lock, flags);
795 return IRQ_HANDLED;
796 }
797
798 /* Received the coming packet */
799 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
800 dmfe_rx_packet(dev, db);
801
802 /* reallocate rx descriptor buffer */
803 if (db->rx_avail_cnt<RX_DESC_CNT)
804 allocate_rx_buffer(db);
805
806 /* Free the transmitted descriptor */
807 if ( db->cr5_data & 0x01)
808 dmfe_free_tx_pkt(dev, db);
809
810 /* Mode Check */
811 if (db->dm910x_chk_mode & 0x2) {
812 db->dm910x_chk_mode = 0x4;
813 db->cr6_data |= 0x100;
814 update_cr6(db->cr6_data, db->ioaddr);
815 }
816
817 /* Restore CR7 to enable interrupt mask */
818 outl(db->cr7_data, ioaddr + DCR7);
819
820 spin_unlock_irqrestore(&db->lock, flags);
821 return IRQ_HANDLED;
822}
823
824
825#ifdef CONFIG_NET_POLL_CONTROLLER
826/*
827 * Polling 'interrupt' - used by things like netconsole to send skbs
828 * without having to re-enable interrupts. It's not called while
829 * the interrupt routine is executing.
830 */
831
832static void poll_dmfe (struct net_device *dev)
833{
834 /* disable_irq here is not very nice, but with the lockless
835 interrupt handler we have no other choice. */
836 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100837 dmfe_interrupt (dev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 enable_irq(dev->irq);
839}
840#endif
841
842/*
843 * Free TX resource after TX complete
844 */
845
846static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
847{
848 struct tx_desc *txptr;
849 unsigned long ioaddr = dev->base_addr;
850 u32 tdes0;
851
852 txptr = db->tx_remove_ptr;
853 while(db->tx_packet_cnt) {
854 tdes0 = le32_to_cpu(txptr->tdes0);
855 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
856 if (tdes0 & 0x80000000)
857 break;
858
859 /* A packet sent completed */
860 db->tx_packet_cnt--;
861 db->stats.tx_packets++;
862
863 /* Transmit statistic counter */
864 if ( tdes0 != 0x7fffffff ) {
865 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
866 db->stats.collisions += (tdes0 >> 3) & 0xf;
867 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
868 if (tdes0 & TDES0_ERR_MASK) {
869 db->stats.tx_errors++;
870
871 if (tdes0 & 0x0002) { /* UnderRun */
872 db->tx_fifo_underrun++;
873 if ( !(db->cr6_data & CR6_SFT) ) {
874 db->cr6_data = db->cr6_data | CR6_SFT;
875 update_cr6(db->cr6_data, db->ioaddr);
876 }
877 }
878 if (tdes0 & 0x0100)
879 db->tx_excessive_collision++;
880 if (tdes0 & 0x0200)
881 db->tx_late_collision++;
882 if (tdes0 & 0x0400)
883 db->tx_no_carrier++;
884 if (tdes0 & 0x0800)
885 db->tx_loss_carrier++;
886 if (tdes0 & 0x4000)
887 db->tx_jabber_timeout++;
888 }
889 }
890
891 txptr = txptr->next_tx_desc;
892 }/* End of while */
893
894 /* Update TX remove pointer to next */
895 db->tx_remove_ptr = txptr;
896
897 /* Send the Tx packet in queue */
898 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
899 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
900 db->tx_packet_cnt++; /* Ready to send */
901 db->tx_queue_cnt--;
902 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
903 dev->trans_start = jiffies; /* saved time stamp */
904 }
905
906 /* Resource available check */
907 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
908 netif_wake_queue(dev); /* Active upper layer, send again */
909}
910
911
912/*
913 * Calculate the CRC valude of the Rx packet
914 * flag = 1 : return the reverse CRC (for the received packet CRC)
915 * 0 : return the normal CRC (for Hash Table index)
916 */
917
918static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
919{
920 u32 crc = crc32(~0, Data, Len);
921 if (flag) crc = ~crc;
922 return crc;
923}
924
925
926/*
927 * Receive the come packet and pass to upper layer
928 */
929
930static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
931{
932 struct rx_desc *rxptr;
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800933 struct sk_buff *skb, *newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 int rxlen;
935 u32 rdes0;
936
937 rxptr = db->rx_ready_ptr;
938
939 while(db->rx_avail_cnt) {
940 rdes0 = le32_to_cpu(rxptr->rdes0);
941 if (rdes0 & 0x80000000) /* packet owner check */
942 break;
943
944 db->rx_avail_cnt--;
945 db->interval_rx_cnt++;
946
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800947 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
948 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 if ( (rdes0 & 0x300) != 0x300) {
951 /* A packet without First/Last flag */
952 /* reuse this SKB */
953 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
954 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
955 } else {
956 /* A packet with First/Last flag */
957 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
958
959 /* error summary bit check */
960 if (rdes0 & 0x8000) {
961 /* This is a error packet */
962 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
963 db->stats.rx_errors++;
964 if (rdes0 & 1)
965 db->stats.rx_fifo_errors++;
966 if (rdes0 & 2)
967 db->stats.rx_crc_errors++;
968 if (rdes0 & 0x80)
969 db->stats.rx_length_errors++;
970 }
971
972 if ( !(rdes0 & 0x8000) ||
973 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
974 skb = rxptr->rx_skb_ptr;
975
976 /* Received Packet CRC check need or not */
977 if ( (db->dm910x_chk_mode & 1) &&
David S. Miller689be432005-06-28 15:25:31 -0700978 (cal_CRC(skb->data, rxlen, 1) !=
979 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 /* Found a error received packet */
981 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
982 db->dm910x_chk_mode = 3;
983 } else {
984 /* Good packet, send to upper layer */
985 /* Shorst packet used new SKB */
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800986 if ((rxlen < RX_COPY_SIZE) &&
987 ((newskb = dev_alloc_skb(rxlen + 2))
988 != NULL)) {
989
990 skb = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 /* size less than COPY_SIZE, allocate a rxlen SKB */
992 skb->dev = dev;
993 skb_reserve(skb, 2); /* 16byte align */
David S. Miller689be432005-06-28 15:25:31 -0700994 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
996 } else {
997 skb->dev = dev;
998 skb_put(skb, rxlen);
999 }
1000 skb->protocol = eth_type_trans(skb, dev);
1001 netif_rx(skb);
1002 dev->last_rx = jiffies;
1003 db->stats.rx_packets++;
1004 db->stats.rx_bytes += rxlen;
1005 }
1006 } else {
1007 /* Reuse SKB buffer when the packet is error */
1008 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1009 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1010 }
1011 }
1012
1013 rxptr = rxptr->next_rx_desc;
1014 }
1015
1016 db->rx_ready_ptr = rxptr;
1017}
1018
1019
1020/*
1021 * Get statistics from driver.
1022 */
1023
1024static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1025{
1026 struct dmfe_board_info *db = netdev_priv(dev);
1027
1028 DMFE_DBUG(0, "dmfe_get_stats", 0);
1029 return &db->stats;
1030}
1031
1032
1033/*
1034 * Set DM910X multicast address
1035 */
1036
1037static void dmfe_set_filter_mode(struct DEVICE * dev)
1038{
1039 struct dmfe_board_info *db = netdev_priv(dev);
1040 unsigned long flags;
1041
1042 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1043 spin_lock_irqsave(&db->lock, flags);
1044
1045 if (dev->flags & IFF_PROMISC) {
1046 DMFE_DBUG(0, "Enable PROM Mode", 0);
1047 db->cr6_data |= CR6_PM | CR6_PBF;
1048 update_cr6(db->cr6_data, db->ioaddr);
1049 spin_unlock_irqrestore(&db->lock, flags);
1050 return;
1051 }
1052
1053 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1054 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1055 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1056 db->cr6_data |= CR6_PAM;
1057 spin_unlock_irqrestore(&db->lock, flags);
1058 return;
1059 }
1060
1061 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1062 if (db->chip_id == PCI_DM9132_ID)
1063 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1064 else
1065 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1066 spin_unlock_irqrestore(&db->lock, flags);
1067}
1068
1069static void netdev_get_drvinfo(struct net_device *dev,
1070 struct ethtool_drvinfo *info)
1071{
1072 struct dmfe_board_info *np = netdev_priv(dev);
1073
1074 strcpy(info->driver, DRV_NAME);
1075 strcpy(info->version, DRV_VERSION);
1076 if (np->pdev)
1077 strcpy(info->bus_info, pci_name(np->pdev));
1078 else
1079 sprintf(info->bus_info, "EISA 0x%lx %d",
1080 dev->base_addr, dev->irq);
1081}
1082
Jeff Garzik7282d492006-09-13 14:30:00 -04001083static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 .get_drvinfo = netdev_get_drvinfo,
1085};
1086
1087/*
1088 * A periodic timer routine
1089 * Dynamic media sense, allocate Rx buffer...
1090 */
1091
1092static void dmfe_timer(unsigned long data)
1093{
1094 u32 tmp_cr8;
1095 unsigned char tmp_cr12;
1096 struct DEVICE *dev = (struct DEVICE *) data;
1097 struct dmfe_board_info *db = netdev_priv(dev);
1098 unsigned long flags;
1099
1100 DMFE_DBUG(0, "dmfe_timer()", 0);
1101 spin_lock_irqsave(&db->lock, flags);
1102
1103 /* Media mode process when Link OK before enter this route */
1104 if (db->first_in_callback == 0) {
1105 db->first_in_callback = 1;
1106 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1107 db->cr6_data &= ~0x40000;
1108 update_cr6(db->cr6_data, db->ioaddr);
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001109 phy_write(db->ioaddr,
1110 db->phy_addr, 0, 0x1000, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 db->cr6_data |= 0x40000;
1112 update_cr6(db->cr6_data, db->ioaddr);
1113 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1114 add_timer(&db->timer);
1115 spin_unlock_irqrestore(&db->lock, flags);
1116 return;
1117 }
1118 }
1119
1120
1121 /* Operating Mode Check */
1122 if ( (db->dm910x_chk_mode & 0x1) &&
1123 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1124 db->dm910x_chk_mode = 0x4;
1125
1126 /* Dynamic reset DM910X : system error or transmit time-out */
1127 tmp_cr8 = inl(db->ioaddr + DCR8);
1128 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1129 db->reset_cr8++;
1130 db->wait_reset = 1;
1131 }
1132 db->interval_rx_cnt = 0;
1133
1134 /* TX polling kick monitor */
1135 if ( db->tx_packet_cnt &&
1136 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1137 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1138
1139 /* TX Timeout */
1140 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1141 db->reset_TXtimeout++;
1142 db->wait_reset = 1;
1143 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1144 dev->name);
1145 }
1146 }
1147
1148 if (db->wait_reset) {
1149 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1150 db->reset_count++;
1151 dmfe_dynamic_reset(dev);
1152 db->first_in_callback = 0;
1153 db->timer.expires = DMFE_TIMER_WUT;
1154 add_timer(&db->timer);
1155 spin_unlock_irqrestore(&db->lock, flags);
1156 return;
1157 }
1158
1159 /* Link status check, Dynamic media type change */
1160 if (db->chip_id == PCI_DM9132_ID)
1161 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1162 else
1163 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1164
1165 if ( ((db->chip_id == PCI_DM9102_ID) &&
1166 (db->chip_revision == 0x02000030)) ||
1167 ((db->chip_id == PCI_DM9132_ID) &&
1168 (db->chip_revision == 0x02000010)) ) {
1169 /* DM9102A Chip */
1170 if (tmp_cr12 & 2)
1171 tmp_cr12 = 0x0; /* Link failed */
1172 else
1173 tmp_cr12 = 0x3; /* Link OK */
1174 }
1175
1176 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1177 /* Link Failed */
1178 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1179 db->link_failed = 1;
1180
1181 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1182 /* AUTO or force 1M Homerun/Longrun don't need */
1183 if ( !(db->media_mode & 0x38) )
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001184 phy_write(db->ioaddr, db->phy_addr,
1185 0, 0x1000, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1188 if (db->media_mode & DMFE_AUTO) {
1189 /* 10/100M link failed, used 1M Home-Net */
1190 db->cr6_data|=0x00040000; /* bit18=1, MII */
1191 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1192 update_cr6(db->cr6_data, db->ioaddr);
1193 }
1194 } else
1195 if ((tmp_cr12 & 0x3) && db->link_failed) {
1196 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1197 db->link_failed = 0;
1198
1199 /* Auto Sense Speed */
1200 if ( (db->media_mode & DMFE_AUTO) &&
1201 dmfe_sense_speed(db) )
1202 db->link_failed = 1;
1203 dmfe_process_mode(db);
1204 /* SHOW_MEDIA_TYPE(db->op_mode); */
1205 }
1206
1207 /* HPNA remote command check */
1208 if (db->HPNA_command & 0xf00) {
1209 db->HPNA_timer--;
1210 if (!db->HPNA_timer)
1211 dmfe_HPNA_remote_cmd_chk(db);
1212 }
1213
1214 /* Timer active again */
1215 db->timer.expires = DMFE_TIMER_WUT;
1216 add_timer(&db->timer);
1217 spin_unlock_irqrestore(&db->lock, flags);
1218}
1219
1220
1221/*
1222 * Dynamic reset the DM910X board
1223 * Stop DM910X board
1224 * Free Tx/Rx allocated memory
1225 * Reset DM910X board
1226 * Re-initilize DM910X board
1227 */
1228
1229static void dmfe_dynamic_reset(struct DEVICE *dev)
1230{
1231 struct dmfe_board_info *db = netdev_priv(dev);
1232
1233 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1234
1235 /* Sopt MAC controller */
1236 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1237 update_cr6(db->cr6_data, dev->base_addr);
1238 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1239 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1240
1241 /* Disable upper layer interface */
1242 netif_stop_queue(dev);
1243
1244 /* Free Rx Allocate buffer */
1245 dmfe_free_rxbuffer(db);
1246
1247 /* system variable init */
1248 db->tx_packet_cnt = 0;
1249 db->tx_queue_cnt = 0;
1250 db->rx_avail_cnt = 0;
1251 db->link_failed = 1;
1252 db->wait_reset = 0;
1253
1254 /* Re-initilize DM910X board */
1255 dmfe_init_dm910x(dev);
1256
1257 /* Restart upper layer interface */
1258 netif_wake_queue(dev);
1259}
1260
1261
1262/*
1263 * free all allocated rx buffer
1264 */
1265
1266static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1267{
1268 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1269
1270 /* free allocated rx buffer */
1271 while (db->rx_avail_cnt) {
1272 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1273 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1274 db->rx_avail_cnt--;
1275 }
1276}
1277
1278
1279/*
1280 * Reuse the SK buffer
1281 */
1282
1283static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1284{
1285 struct rx_desc *rxptr = db->rx_insert_ptr;
1286
1287 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1288 rxptr->rx_skb_ptr = skb;
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001289 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1290 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 wmb();
1292 rxptr->rdes0 = cpu_to_le32(0x80000000);
1293 db->rx_avail_cnt++;
1294 db->rx_insert_ptr = rxptr->next_rx_desc;
1295 } else
1296 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1297}
1298
1299
1300/*
1301 * Initialize transmit/Receive descriptor
1302 * Using Chain structure, and allocate Tx/Rx buffer
1303 */
1304
1305static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1306{
1307 struct tx_desc *tmp_tx;
1308 struct rx_desc *tmp_rx;
1309 unsigned char *tmp_buf;
1310 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1311 dma_addr_t tmp_buf_dma;
1312 int i;
1313
1314 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1315
1316 /* tx descriptor start pointer */
1317 db->tx_insert_ptr = db->first_tx_desc;
1318 db->tx_remove_ptr = db->first_tx_desc;
1319 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1320
1321 /* rx descriptor start pointer */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001322 db->first_rx_desc = (void *)db->first_tx_desc +
1323 sizeof(struct tx_desc) * TX_DESC_CNT;
1324
1325 db->first_rx_desc_dma = db->first_tx_desc_dma +
1326 sizeof(struct tx_desc) * TX_DESC_CNT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 db->rx_insert_ptr = db->first_rx_desc;
1328 db->rx_ready_ptr = db->first_rx_desc;
1329 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1330
1331 /* Init Transmit chain */
1332 tmp_buf = db->buf_pool_start;
1333 tmp_buf_dma = db->buf_pool_dma_start;
1334 tmp_tx_dma = db->first_tx_desc_dma;
1335 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1336 tmp_tx->tx_buf_ptr = tmp_buf;
1337 tmp_tx->tdes0 = cpu_to_le32(0);
1338 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1339 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1340 tmp_tx_dma += sizeof(struct tx_desc);
1341 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1342 tmp_tx->next_tx_desc = tmp_tx + 1;
1343 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1344 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1345 }
1346 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1347 tmp_tx->next_tx_desc = db->first_tx_desc;
1348
1349 /* Init Receive descriptor chain */
1350 tmp_rx_dma=db->first_rx_desc_dma;
1351 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1352 tmp_rx->rdes0 = cpu_to_le32(0);
1353 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1354 tmp_rx_dma += sizeof(struct rx_desc);
1355 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1356 tmp_rx->next_rx_desc = tmp_rx + 1;
1357 }
1358 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1359 tmp_rx->next_rx_desc = db->first_rx_desc;
1360
1361 /* pre-allocate Rx buffer */
1362 allocate_rx_buffer(db);
1363}
1364
1365
1366/*
1367 * Update CR6 value
1368 * Firstly stop DM910X , then written value and start
1369 */
1370
1371static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1372{
1373 u32 cr6_tmp;
1374
1375 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1376 outl(cr6_tmp, ioaddr + DCR6);
1377 udelay(5);
1378 outl(cr6_data, ioaddr + DCR6);
1379 udelay(5);
1380}
1381
1382
1383/*
1384 * Send a setup frame for DM9132
1385 * This setup frame initilize DM910X address filter mode
1386*/
1387
1388static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1389{
1390 struct dev_mc_list *mcptr;
1391 u16 * addrptr;
1392 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1393 u32 hash_val;
1394 u16 i, hash_table[4];
1395
1396 DMFE_DBUG(0, "dm9132_id_table()", 0);
1397
1398 /* Node address */
1399 addrptr = (u16 *) dev->dev_addr;
1400 outw(addrptr[0], ioaddr);
1401 ioaddr += 4;
1402 outw(addrptr[1], ioaddr);
1403 ioaddr += 4;
1404 outw(addrptr[2], ioaddr);
1405 ioaddr += 4;
1406
1407 /* Clear Hash Table */
1408 for (i = 0; i < 4; i++)
1409 hash_table[i] = 0x0;
1410
1411 /* broadcast address */
1412 hash_table[3] = 0x8000;
1413
1414 /* the multicast address in Hash Table : 64 bits */
1415 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1416 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1417 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1418 }
1419
1420 /* Write the hash table to MAC MD table */
1421 for (i = 0; i < 4; i++, ioaddr += 4)
1422 outw(hash_table[i], ioaddr);
1423}
1424
1425
1426/*
1427 * Send a setup frame for DM9102/DM9102A
1428 * This setup frame initilize DM910X address filter mode
1429 */
1430
1431static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1432{
1433 struct dmfe_board_info *db = netdev_priv(dev);
1434 struct dev_mc_list *mcptr;
1435 struct tx_desc *txptr;
1436 u16 * addrptr;
1437 u32 * suptr;
1438 int i;
1439
1440 DMFE_DBUG(0, "send_filter_frame()", 0);
1441
1442 txptr = db->tx_insert_ptr;
1443 suptr = (u32 *) txptr->tx_buf_ptr;
1444
1445 /* Node address */
1446 addrptr = (u16 *) dev->dev_addr;
1447 *suptr++ = addrptr[0];
1448 *suptr++ = addrptr[1];
1449 *suptr++ = addrptr[2];
1450
1451 /* broadcast address */
1452 *suptr++ = 0xffff;
1453 *suptr++ = 0xffff;
1454 *suptr++ = 0xffff;
1455
1456 /* fit the multicast address */
1457 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1458 addrptr = (u16 *) mcptr->dmi_addr;
1459 *suptr++ = addrptr[0];
1460 *suptr++ = addrptr[1];
1461 *suptr++ = addrptr[2];
1462 }
1463
1464 for (; i<14; i++) {
1465 *suptr++ = 0xffff;
1466 *suptr++ = 0xffff;
1467 *suptr++ = 0xffff;
1468 }
1469
1470 /* prepare the setup frame */
1471 db->tx_insert_ptr = txptr->next_tx_desc;
1472 txptr->tdes1 = cpu_to_le32(0x890000c0);
1473
1474 /* Resource Check and Send the setup packet */
1475 if (!db->tx_packet_cnt) {
1476 /* Resource Empty */
1477 db->tx_packet_cnt++;
1478 txptr->tdes0 = cpu_to_le32(0x80000000);
1479 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1480 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1481 update_cr6(db->cr6_data, dev->base_addr);
1482 dev->trans_start = jiffies;
1483 } else
1484 db->tx_queue_cnt++; /* Put in TX queue */
1485}
1486
1487
1488/*
1489 * Allocate rx buffer,
1490 * As possible as allocate maxiumn Rx buffer
1491 */
1492
1493static void allocate_rx_buffer(struct dmfe_board_info *db)
1494{
1495 struct rx_desc *rxptr;
1496 struct sk_buff *skb;
1497
1498 rxptr = db->rx_insert_ptr;
1499
1500 while(db->rx_avail_cnt < RX_DESC_CNT) {
1501 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1502 break;
1503 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001504 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1505 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 wmb();
1507 rxptr->rdes0 = cpu_to_le32(0x80000000);
1508 rxptr = rxptr->next_rx_desc;
1509 db->rx_avail_cnt++;
1510 }
1511
1512 db->rx_insert_ptr = rxptr;
1513}
1514
1515
1516/*
1517 * Read one word data from the serial ROM
1518 */
1519
1520static u16 read_srom_word(long ioaddr, int offset)
1521{
1522 int i;
1523 u16 srom_data = 0;
1524 long cr9_ioaddr = ioaddr + DCR9;
1525
1526 outl(CR9_SROM_READ, cr9_ioaddr);
1527 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1528
1529 /* Send the Read Command 110b */
1530 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1531 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1532 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1533
1534 /* Send the offset */
1535 for (i = 5; i >= 0; i--) {
1536 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1537 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1538 }
1539
1540 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1541
1542 for (i = 16; i > 0; i--) {
1543 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1544 udelay(5);
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001545 srom_data = (srom_data << 1) |
1546 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1548 udelay(5);
1549 }
1550
1551 outl(CR9_SROM_READ, cr9_ioaddr);
1552 return srom_data;
1553}
1554
1555
1556/*
1557 * Auto sense the media mode
1558 */
1559
1560static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1561{
1562 u8 ErrFlag = 0;
1563 u16 phy_mode;
1564
1565 /* CR6 bit18=0, select 10/100M */
1566 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1567
1568 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1569 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1570
1571 if ( (phy_mode & 0x24) == 0x24 ) {
1572 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001573 phy_mode = phy_read(db->ioaddr,
1574 db->phy_addr, 7, db->chip_id) & 0xf000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 else /* DM9102/DM9102A */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001576 phy_mode = phy_read(db->ioaddr,
1577 db->phy_addr, 17, db->chip_id) & 0xf000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1579 switch (phy_mode) {
1580 case 0x1000: db->op_mode = DMFE_10MHF; break;
1581 case 0x2000: db->op_mode = DMFE_10MFD; break;
1582 case 0x4000: db->op_mode = DMFE_100MHF; break;
1583 case 0x8000: db->op_mode = DMFE_100MFD; break;
1584 default: db->op_mode = DMFE_10MHF;
1585 ErrFlag = 1;
1586 break;
1587 }
1588 } else {
1589 db->op_mode = DMFE_10MHF;
1590 DMFE_DBUG(0, "Link Failed :", phy_mode);
1591 ErrFlag = 1;
1592 }
1593
1594 return ErrFlag;
1595}
1596
1597
1598/*
1599 * Set 10/100 phyxcer capability
1600 * AUTO mode : phyxcer register4 is NIC capability
1601 * Force mode: phyxcer register4 is the force media
1602 */
1603
1604static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1605{
1606 u16 phy_reg;
1607
1608 /* Select 10/100M phyxcer */
1609 db->cr6_data &= ~0x40000;
1610 update_cr6(db->cr6_data, db->ioaddr);
1611
1612 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1613 if (db->chip_id == PCI_DM9009_ID) {
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001614 phy_reg = phy_read(db->ioaddr,
1615 db->phy_addr, 18, db->chip_id) & ~0x1000;
1616
1617 phy_write(db->ioaddr,
1618 db->phy_addr, 18, phy_reg, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 }
1620
1621 /* Phyxcer capability setting */
1622 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1623
1624 if (db->media_mode & DMFE_AUTO) {
1625 /* AUTO Mode */
1626 phy_reg |= db->PHY_reg4;
1627 } else {
1628 /* Force Mode */
1629 switch(db->media_mode) {
1630 case DMFE_10MHF: phy_reg |= 0x20; break;
1631 case DMFE_10MFD: phy_reg |= 0x40; break;
1632 case DMFE_100MHF: phy_reg |= 0x80; break;
1633 case DMFE_100MFD: phy_reg |= 0x100; break;
1634 }
1635 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1636 }
1637
1638 /* Write new capability to Phyxcer Reg4 */
1639 if ( !(phy_reg & 0x01e0)) {
1640 phy_reg|=db->PHY_reg4;
1641 db->media_mode|=DMFE_AUTO;
1642 }
1643 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1644
1645 /* Restart Auto-Negotiation */
1646 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1647 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1648 if ( !db->chip_type )
1649 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1650}
1651
1652
1653/*
1654 * Process op-mode
1655 * AUTO mode : PHY controller in Auto-negotiation Mode
1656 * Force mode: PHY controller in force mode with HUB
1657 * N-way force capability with SWITCH
1658 */
1659
1660static void dmfe_process_mode(struct dmfe_board_info *db)
1661{
1662 u16 phy_reg;
1663
1664 /* Full Duplex Mode Check */
1665 if (db->op_mode & 0x4)
1666 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1667 else
1668 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1669
1670 /* Transciver Selection */
1671 if (db->op_mode & 0x10) /* 1M HomePNA */
1672 db->cr6_data |= 0x40000;/* External MII select */
1673 else
1674 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1675
1676 update_cr6(db->cr6_data, db->ioaddr);
1677
1678 /* 10/100M phyxcer force mode need */
1679 if ( !(db->media_mode & 0x18)) {
1680 /* Forece Mode */
1681 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1682 if ( !(phy_reg & 0x1) ) {
1683 /* parter without N-Way capability */
1684 phy_reg = 0x0;
1685 switch(db->op_mode) {
1686 case DMFE_10MHF: phy_reg = 0x0; break;
1687 case DMFE_10MFD: phy_reg = 0x100; break;
1688 case DMFE_100MHF: phy_reg = 0x2000; break;
1689 case DMFE_100MFD: phy_reg = 0x2100; break;
1690 }
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001691 phy_write(db->ioaddr,
1692 db->phy_addr, 0, phy_reg, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1694 mdelay(20);
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001695 phy_write(db->ioaddr,
1696 db->phy_addr, 0, phy_reg, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 }
1698 }
1699}
1700
1701
1702/*
1703 * Write a word to Phy register
1704 */
1705
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001706static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1707 u16 phy_data, u32 chip_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708{
1709 u16 i;
1710 unsigned long ioaddr;
1711
1712 if (chip_id == PCI_DM9132_ID) {
1713 ioaddr = iobase + 0x80 + offset * 4;
1714 outw(phy_data, ioaddr);
1715 } else {
1716 /* DM9102/DM9102A Chip */
1717 ioaddr = iobase + DCR9;
1718
1719 /* Send 33 synchronization clock to Phy controller */
1720 for (i = 0; i < 35; i++)
1721 phy_write_1bit(ioaddr, PHY_DATA_1);
1722
1723 /* Send start command(01) to Phy */
1724 phy_write_1bit(ioaddr, PHY_DATA_0);
1725 phy_write_1bit(ioaddr, PHY_DATA_1);
1726
1727 /* Send write command(01) to Phy */
1728 phy_write_1bit(ioaddr, PHY_DATA_0);
1729 phy_write_1bit(ioaddr, PHY_DATA_1);
1730
1731 /* Send Phy address */
1732 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001733 phy_write_1bit(ioaddr,
1734 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
1736 /* Send register address */
1737 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001738 phy_write_1bit(ioaddr,
1739 offset & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
1741 /* written trasnition */
1742 phy_write_1bit(ioaddr, PHY_DATA_1);
1743 phy_write_1bit(ioaddr, PHY_DATA_0);
1744
1745 /* Write a word data to PHY controller */
1746 for ( i = 0x8000; i > 0; i >>= 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001747 phy_write_1bit(ioaddr,
1748 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 }
1750}
1751
1752
1753/*
1754 * Read a word data from phy register
1755 */
1756
1757static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1758{
1759 int i;
1760 u16 phy_data;
1761 unsigned long ioaddr;
1762
1763 if (chip_id == PCI_DM9132_ID) {
1764 /* DM9132 Chip */
1765 ioaddr = iobase + 0x80 + offset * 4;
1766 phy_data = inw(ioaddr);
1767 } else {
1768 /* DM9102/DM9102A Chip */
1769 ioaddr = iobase + DCR9;
1770
1771 /* Send 33 synchronization clock to Phy controller */
1772 for (i = 0; i < 35; i++)
1773 phy_write_1bit(ioaddr, PHY_DATA_1);
1774
1775 /* Send start command(01) to Phy */
1776 phy_write_1bit(ioaddr, PHY_DATA_0);
1777 phy_write_1bit(ioaddr, PHY_DATA_1);
1778
1779 /* Send read command(10) to Phy */
1780 phy_write_1bit(ioaddr, PHY_DATA_1);
1781 phy_write_1bit(ioaddr, PHY_DATA_0);
1782
1783 /* Send Phy address */
1784 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001785 phy_write_1bit(ioaddr,
1786 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788 /* Send register address */
1789 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001790 phy_write_1bit(ioaddr,
1791 offset & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
1793 /* Skip transition state */
1794 phy_read_1bit(ioaddr);
1795
1796 /* read 16bit data */
1797 for (phy_data = 0, i = 0; i < 16; i++) {
1798 phy_data <<= 1;
1799 phy_data |= phy_read_1bit(ioaddr);
1800 }
1801 }
1802
1803 return phy_data;
1804}
1805
1806
1807/*
1808 * Write one bit data to Phy Controller
1809 */
1810
1811static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1812{
1813 outl(phy_data, ioaddr); /* MII Clock Low */
1814 udelay(1);
1815 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1816 udelay(1);
1817 outl(phy_data, ioaddr); /* MII Clock Low */
1818 udelay(1);
1819}
1820
1821
1822/*
1823 * Read one bit phy data from PHY controller
1824 */
1825
1826static u16 phy_read_1bit(unsigned long ioaddr)
1827{
1828 u16 phy_data;
1829
1830 outl(0x50000, ioaddr);
1831 udelay(1);
1832 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1833 outl(0x40000, ioaddr);
1834 udelay(1);
1835
1836 return phy_data;
1837}
1838
1839
1840/*
1841 * Parser SROM and media mode
1842 */
1843
1844static void dmfe_parse_srom(struct dmfe_board_info * db)
1845{
1846 char * srom = db->srom;
1847 int dmfe_mode, tmp_reg;
1848
1849 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1850
1851 /* Init CR15 */
1852 db->cr15_data = CR15_DEFAULT;
1853
1854 /* Check SROM Version */
1855 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1856 /* SROM V4.01 */
1857 /* Get NIC support media mode */
Andrew Morton16b110c2005-06-20 15:32:59 -07001858 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 db->PHY_reg4 = 0;
1860 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1861 switch( db->NIC_capability & tmp_reg ) {
1862 case 0x1: db->PHY_reg4 |= 0x0020; break;
1863 case 0x2: db->PHY_reg4 |= 0x0040; break;
1864 case 0x4: db->PHY_reg4 |= 0x0080; break;
1865 case 0x8: db->PHY_reg4 |= 0x0100; break;
1866 }
1867 }
1868
1869 /* Media Mode Force or not check */
Andrew Morton16b110c2005-06-20 15:32:59 -07001870 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1871 le32_to_cpup((__le32 *)srom + 36/4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 switch(dmfe_mode) {
1873 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1874 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1875 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1876 case 0x100:
1877 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1878 }
1879
1880 /* Special Function setting */
1881 /* VLAN function */
1882 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1883 db->cr15_data |= 0x40;
1884
1885 /* Flow Control */
1886 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1887 db->cr15_data |= 0x400;
1888
1889 /* TX pause packet */
1890 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1891 db->cr15_data |= 0x9800;
1892 }
1893
1894 /* Parse HPNA parameter */
1895 db->HPNA_command = 1;
1896
1897 /* Accept remote command or not */
1898 if (HPNA_rx_cmd == 0)
1899 db->HPNA_command |= 0x8000;
1900
1901 /* Issue remote command & operation mode */
1902 if (HPNA_tx_cmd == 1)
1903 switch(HPNA_mode) { /* Issue Remote Command */
1904 case 0: db->HPNA_command |= 0x0904; break;
1905 case 1: db->HPNA_command |= 0x0a00; break;
1906 case 2: db->HPNA_command |= 0x0506; break;
1907 case 3: db->HPNA_command |= 0x0602; break;
1908 }
1909 else
1910 switch(HPNA_mode) { /* Don't Issue */
1911 case 0: db->HPNA_command |= 0x0004; break;
1912 case 1: db->HPNA_command |= 0x0000; break;
1913 case 2: db->HPNA_command |= 0x0006; break;
1914 case 3: db->HPNA_command |= 0x0002; break;
1915 }
1916
1917 /* Check DM9801 or DM9802 present or not */
1918 db->HPNA_present = 0;
1919 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1920 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1921 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1922 /* DM9801 or DM9802 present */
1923 db->HPNA_timer = 8;
1924 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1925 /* DM9801 HomeRun */
1926 db->HPNA_present = 1;
1927 dmfe_program_DM9801(db, tmp_reg);
1928 } else {
1929 /* DM9802 LongRun */
1930 db->HPNA_present = 2;
1931 dmfe_program_DM9802(db);
1932 }
1933 }
1934
1935}
1936
1937
1938/*
1939 * Init HomeRun DM9801
1940 */
1941
1942static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1943{
1944 uint reg17, reg25;
1945
1946 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
1947 switch(HPNA_rev) {
1948 case 0xb900: /* DM9801 E3 */
1949 db->HPNA_command |= 0x1000;
1950 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
1951 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
1952 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1953 break;
1954 case 0xb901: /* DM9801 E4 */
1955 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1956 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
1957 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1958 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
1959 break;
1960 case 0xb902: /* DM9801 E5 */
1961 case 0xb903: /* DM9801 E6 */
1962 default:
1963 db->HPNA_command |= 0x1000;
1964 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1965 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
1966 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
1967 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
1968 break;
1969 }
1970 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1971 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
1972 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
1973}
1974
1975
1976/*
1977 * Init HomeRun DM9802
1978 */
1979
1980static void dmfe_program_DM9802(struct dmfe_board_info * db)
1981{
1982 uint phy_reg;
1983
1984 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
1985 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
1986 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
1987 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
1988 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
1989}
1990
1991
1992/*
1993 * Check remote HPNA power and speed status. If not correct,
1994 * issue command again.
1995*/
1996
1997static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
1998{
1999 uint phy_reg;
2000
2001 /* Got remote device status */
2002 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2003 switch(phy_reg) {
2004 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2005 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2006 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2007 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2008 }
2009
2010 /* Check remote device status match our setting ot not */
2011 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
Maxim Levitskyf67ba792007-03-06 02:41:51 -08002012 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2013 db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 db->HPNA_timer=8;
2015 } else
2016 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2017}
2018
2019
2020
2021static struct pci_device_id dmfe_pci_tbl[] = {
2022 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2023 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2024 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2025 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2026 { 0, }
2027};
2028MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2029
2030
2031static struct pci_driver dmfe_driver = {
2032 .name = "dmfe",
2033 .id_table = dmfe_pci_tbl,
2034 .probe = dmfe_init_one,
2035 .remove = __devexit_p(dmfe_remove_one),
2036};
2037
2038MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2039MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2040MODULE_LICENSE("GPL");
2041MODULE_VERSION(DRV_VERSION);
2042
2043module_param(debug, int, 0);
2044module_param(mode, byte, 0);
2045module_param(cr6set, int, 0);
2046module_param(chkmode, byte, 0);
2047module_param(HPNA_mode, byte, 0);
2048module_param(HPNA_rx_cmd, byte, 0);
2049module_param(HPNA_tx_cmd, byte, 0);
2050module_param(HPNA_NoiseFloor, byte, 0);
2051module_param(SF_mode, byte, 0);
2052MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
Maxim Levitskyf67ba792007-03-06 02:41:51 -08002053MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2054 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2055
2056MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2057 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
2059/* Description:
2060 * when user used insmod to add module, system invoked init_module()
2061 * to initilize and register.
2062 */
2063
2064static int __init dmfe_init_module(void)
2065{
2066 int rc;
2067
2068 printk(version);
2069 printed_version = 1;
2070
2071 DMFE_DBUG(0, "init_module() ", debug);
2072
2073 if (debug)
2074 dmfe_debug = debug; /* set debug flag */
2075 if (cr6set)
2076 dmfe_cr6_user_set = cr6set;
2077
2078 switch(mode) {
2079 case DMFE_10MHF:
2080 case DMFE_100MHF:
2081 case DMFE_10MFD:
2082 case DMFE_100MFD:
2083 case DMFE_1M_HPNA:
2084 dmfe_media_mode = mode;
2085 break;
2086 default:dmfe_media_mode = DMFE_AUTO;
2087 break;
2088 }
2089
2090 if (HPNA_mode > 4)
2091 HPNA_mode = 0; /* Default: LP/HS */
2092 if (HPNA_rx_cmd > 1)
2093 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2094 if (HPNA_tx_cmd > 1)
2095 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2096 if (HPNA_NoiseFloor > 15)
2097 HPNA_NoiseFloor = 0;
2098
Jeff Garzik29917622006-08-19 17:48:59 -04002099 rc = pci_register_driver(&dmfe_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 if (rc < 0)
2101 return rc;
2102
2103 return 0;
2104}
2105
2106
2107/*
2108 * Description:
2109 * when user used rmmod to delete module, system invoked clean_module()
2110 * to un-register all registered services.
2111 */
2112
2113static void __exit dmfe_cleanup_module(void)
2114{
2115 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2116 pci_unregister_driver(&dmfe_driver);
2117}
2118
2119module_init(dmfe_init_module);
2120module_exit(dmfe_cleanup_module);