blob: ca90566d5bcd4f5a0f8cb1e06de329103280ffbe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 DAVICOM Web-Site: www.davicom.com.tw
17
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
25
26 Alan Cox <alan@redhat.com> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
31
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
36
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
40
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
46
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
51
52 Alan Cox <alan@redhat.com>
Jeff Garzikf3b197a2006-05-26 21:39:03 -040053 Added new PCI identifiers provided by Clear Zhang at ALi
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 for their 1563 ethernet device.
55
56 TODO
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 Check on 64 bit boxes.
59 Check and fix on big endian boxes.
60
61 Test and make sure PCI latency is now correct for all cases.
62*/
63
64#define DRV_NAME "dmfe"
65#define DRV_VERSION "1.36.4"
66#define DRV_RELDATE "2002-01-17"
67
68#include <linux/module.h>
69#include <linux/kernel.h>
70#include <linux/string.h>
71#include <linux/timer.h>
72#include <linux/ptrace.h>
73#include <linux/errno.h>
74#include <linux/ioport.h>
75#include <linux/slab.h>
76#include <linux/interrupt.h>
77#include <linux/pci.h>
Tobias Klausercb199d42005-05-12 22:20:19 -040078#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <linux/init.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/ethtool.h>
83#include <linux/skbuff.h>
84#include <linux/delay.h>
85#include <linux/spinlock.h>
86#include <linux/crc32.h>
87#include <linux/bitops.h>
88
89#include <asm/processor.h>
90#include <asm/io.h>
91#include <asm/dma.h>
92#include <asm/uaccess.h>
93#include <asm/irq.h>
94
95
96/* Board/System/Debug information/definition ---------------- */
97#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
98#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
99#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
100#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
101
102#define DM9102_IO_SIZE 0x80
103#define DM9102A_IO_SIZE 0x100
104#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
105#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
106#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
107#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
108#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
109#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
110#define TX_BUF_ALLOC 0x600
111#define RX_ALLOC_SIZE 0x620
112#define DM910X_RESET 1
113#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
114#define CR6_DEFAULT 0x00080000 /* HD */
115#define CR7_DEFAULT 0x180c1
116#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
117#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
118#define MAX_PACKET_SIZE 1514
119#define DMFE_MAX_MULTICAST 14
120#define RX_COPY_SIZE 100
121#define MAX_CHECK_PACKET 0x8000
122#define DM9801_NOISE_FLOOR 8
123#define DM9802_NOISE_FLOOR 5
124
Maxim Levitskyf1069042007-03-06 02:41:54 -0800125#define DMFE_WOL_LINKCHANGE 0x20000000
126#define DMFE_WOL_SAMPLEPACKET 0x10000000
127#define DMFE_WOL_MAGICPACKET 0x08000000
128
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#define DMFE_10MHF 0
131#define DMFE_100MHF 1
132#define DMFE_10MFD 4
133#define DMFE_100MFD 5
134#define DMFE_AUTO 8
135#define DMFE_1M_HPNA 0x10
136
137#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
138#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
139#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
140#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
141#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
142#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
143
144#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
145#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
146#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
147
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800148#define DMFE_DBUG(dbug_now, msg, value) \
149 do { \
150 if (dmfe_debug || (dbug_now)) \
151 printk(KERN_ERR DRV_NAME ": %s %lx\n",\
152 (msg), (long) (value)); \
153 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800155#define SHOW_MEDIA_TYPE(mode) \
156 printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
157 (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159
160/* CR9 definition: SROM/MII */
161#define CR9_SROM_READ 0x4800
162#define CR9_SRCS 0x1
163#define CR9_SRCLK 0x2
164#define CR9_CRDOUT 0x8
165#define SROM_DATA_0 0x0
166#define SROM_DATA_1 0x4
167#define PHY_DATA_1 0x20000
168#define PHY_DATA_0 0x00000
169#define MDCLKH 0x10000
170
171#define PHY_POWER_DOWN 0x800
172
173#define SROM_V41_CODE 0x14
174
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800175#define SROM_CLK_WRITE(data, ioaddr) \
176 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
177 udelay(5); \
178 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
179 udelay(5); \
180 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
181 udelay(5);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800183#define __CHK_IO_SIZE(pci_id, dev_rev) \
Auke Kok44c10132007-06-08 15:46:36 -0700184 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800185 DM9102A_IO_SIZE: DM9102_IO_SIZE)
186
Auke Kok44c10132007-06-08 15:46:36 -0700187#define CHK_IO_SIZE(pci_dev) \
188 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
189 (pci_dev)->revision))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
191/* Sten Check */
192#define DEVICE net_device
193
194/* Structure/enum declaration ------------------------------- */
195struct tx_desc {
Al Viroa31e40f2007-03-14 09:18:20 +0000196 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 char *tx_buf_ptr; /* Data for us */
198 struct tx_desc *next_tx_desc;
199} __attribute__(( aligned(32) ));
200
201struct rx_desc {
Al Viroa31e40f2007-03-14 09:18:20 +0000202 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 struct sk_buff *rx_skb_ptr; /* Data for us */
204 struct rx_desc *next_rx_desc;
205} __attribute__(( aligned(32) ));
206
207struct dmfe_board_info {
208 u32 chip_id; /* Chip vendor/Device ID */
Auke Kok44c10132007-06-08 15:46:36 -0700209 u8 chip_revision; /* Chip revision */
Andrew Mortonead9bff2007-03-06 02:41:49 -0800210 struct DEVICE *next_dev; /* next device */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 struct pci_dev *pdev; /* PCI device */
212 spinlock_t lock;
213
214 long ioaddr; /* I/O base address */
215 u32 cr0_data;
216 u32 cr5_data;
217 u32 cr6_data;
218 u32 cr7_data;
219 u32 cr15_data;
220
221 /* pointer for memory physical address */
222 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
223 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
224 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
225 dma_addr_t first_tx_desc_dma;
226 dma_addr_t first_rx_desc_dma;
227
228 /* descriptor pointer */
229 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
230 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
231 unsigned char *desc_pool_ptr; /* descriptor pool memory */
232 struct tx_desc *first_tx_desc;
233 struct tx_desc *tx_insert_ptr;
234 struct tx_desc *tx_remove_ptr;
235 struct rx_desc *first_rx_desc;
236 struct rx_desc *rx_insert_ptr;
237 struct rx_desc *rx_ready_ptr; /* packet come pointer */
238 unsigned long tx_packet_cnt; /* transmitted packet count */
239 unsigned long tx_queue_cnt; /* wait to send packet count */
240 unsigned long rx_avail_cnt; /* available rx descriptor count */
241 unsigned long interval_rx_cnt; /* rx packet count a callback time */
242
243 u16 HPNA_command; /* For HPNA register 16 */
244 u16 HPNA_timer; /* For HPNA remote device check */
245 u16 dbug_cnt;
246 u16 NIC_capability; /* NIC media capability */
247 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
248
249 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
250 u8 chip_type; /* Keep DM9102A chip type */
251 u8 media_mode; /* user specify media mode */
252 u8 op_mode; /* real work media mode */
253 u8 phy_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 u8 wait_reset; /* Hardware failed, need to reset */
255 u8 dm910x_chk_mode; /* Operating mode check */
256 u8 first_in_callback; /* Flag to record state */
Maxim Levitskyf1069042007-03-06 02:41:54 -0800257 u8 wol_mode; /* user WOL settings */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 struct timer_list timer;
259
260 /* System defined statistic counter */
261 struct net_device_stats stats;
262
263 /* Driver defined statistic counter */
264 unsigned long tx_fifo_underrun;
265 unsigned long tx_loss_carrier;
266 unsigned long tx_no_carrier;
267 unsigned long tx_late_collision;
268 unsigned long tx_excessive_collision;
269 unsigned long tx_jabber_timeout;
270 unsigned long reset_count;
271 unsigned long reset_cr8;
272 unsigned long reset_fatal;
273 unsigned long reset_TXtimeout;
274
275 /* NIC SROM data */
276 unsigned char srom[128];
277};
278
279enum dmfe_offsets {
280 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
281 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
282 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
283 DCR15 = 0x78
284};
285
286enum dmfe_CR6_bits {
287 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
288 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
289 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
290};
291
292/* Global variable declaration ----------------------------- */
293static int __devinitdata printed_version;
294static char version[] __devinitdata =
295 KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
296 DRV_VERSION " (" DRV_RELDATE ")\n";
297
298static int dmfe_debug;
299static unsigned char dmfe_media_mode = DMFE_AUTO;
300static u32 dmfe_cr6_user_set;
301
302/* For module input parameter */
303static int debug;
304static u32 cr6set;
305static unsigned char mode = 8;
306static u8 chkmode = 1;
307static u8 HPNA_mode; /* Default: Low Power/High Speed */
308static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
309static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
310static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
311static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
312 4: TX pause packet */
313
314
315/* function declaration ------------------------------------- */
316static int dmfe_open(struct DEVICE *);
317static int dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
318static int dmfe_stop(struct DEVICE *);
319static struct net_device_stats * dmfe_get_stats(struct DEVICE *);
320static void dmfe_set_filter_mode(struct DEVICE *);
Jeff Garzik7282d492006-09-13 14:30:00 -0400321static const struct ethtool_ops netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322static u16 read_srom_word(long ,int);
David Howells7d12e782006-10-05 14:55:46 +0100323static irqreturn_t dmfe_interrupt(int , void *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324#ifdef CONFIG_NET_POLL_CONTROLLER
325static void poll_dmfe (struct net_device *dev);
326#endif
327static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
328static void allocate_rx_buffer(struct dmfe_board_info *);
329static void update_cr6(u32, unsigned long);
330static void send_filter_frame(struct DEVICE * ,int);
331static void dm9132_id_table(struct DEVICE * ,int);
332static u16 phy_read(unsigned long, u8, u8, u32);
333static void phy_write(unsigned long, u8, u8, u16, u32);
334static void phy_write_1bit(unsigned long, u32);
335static u16 phy_read_1bit(unsigned long);
336static u8 dmfe_sense_speed(struct dmfe_board_info *);
337static void dmfe_process_mode(struct dmfe_board_info *);
338static void dmfe_timer(unsigned long);
339static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
340static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
341static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
342static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
343static void dmfe_dynamic_reset(struct DEVICE *);
344static void dmfe_free_rxbuffer(struct dmfe_board_info *);
345static void dmfe_init_dm910x(struct DEVICE *);
346static void dmfe_parse_srom(struct dmfe_board_info *);
347static void dmfe_program_DM9801(struct dmfe_board_info *, int);
348static void dmfe_program_DM9802(struct dmfe_board_info *);
349static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
350static void dmfe_set_phyxcer(struct dmfe_board_info *);
351
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800352/* DM910X network board routine ---------------------------- */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354/*
355 * Search DM910X board ,allocate space and register it
356 */
357
358static int __devinit dmfe_init_one (struct pci_dev *pdev,
359 const struct pci_device_id *ent)
360{
361 struct dmfe_board_info *db; /* board information structure */
362 struct net_device *dev;
Auke Kok44c10132007-06-08 15:46:36 -0700363 u32 pci_pmr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 int i, err;
Joe Perches0795af52007-10-03 17:59:30 -0700365 DECLARE_MAC_BUF(mac);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 DMFE_DBUG(0, "dmfe_init_one()", 0);
368
369 if (!printed_version++)
370 printk(version);
371
372 /* Init network device */
373 dev = alloc_etherdev(sizeof(*db));
374 if (dev == NULL)
375 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 SET_NETDEV_DEV(dev, &pdev->dev);
377
Tobias Klausercb199d42005-05-12 22:20:19 -0400378 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800379 printk(KERN_WARNING DRV_NAME
380 ": 32-bit PCI DMA not available.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 err = -ENODEV;
382 goto err_out_free;
383 }
384
385 /* Enable Master/IO access, Disable memory access */
386 err = pci_enable_device(pdev);
387 if (err)
388 goto err_out_free;
389
390 if (!pci_resource_start(pdev, 0)) {
391 printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
392 err = -ENODEV;
393 goto err_out_disable;
394 }
395
Auke Kok44c10132007-06-08 15:46:36 -0700396 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
398 err = -ENODEV;
399 goto err_out_disable;
400 }
401
402#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
403
404 /* Set Latency Timer 80h */
405 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
406 Need a PCI quirk.. */
407
408 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
409#endif
410
411 if (pci_request_regions(pdev, DRV_NAME)) {
412 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
413 err = -ENODEV;
414 goto err_out_disable;
415 }
416
417 /* Init system & device */
418 db = netdev_priv(dev);
419
420 /* Allocate Tx/Rx descriptor memory */
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800421 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
422 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
423
424 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
425 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
428 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
429 db->buf_pool_start = db->buf_pool_ptr;
430 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
431
432 db->chip_id = ent->driver_data;
433 db->ioaddr = pci_resource_start(pdev, 0);
Auke Kok44c10132007-06-08 15:46:36 -0700434 db->chip_revision = pdev->revision;
Maxim Levitskyf1069042007-03-06 02:41:54 -0800435 db->wol_mode = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
437 db->pdev = pdev;
438
439 dev->base_addr = db->ioaddr;
440 dev->irq = pdev->irq;
441 pci_set_drvdata(pdev, dev);
442 dev->open = &dmfe_open;
443 dev->hard_start_xmit = &dmfe_start_xmit;
444 dev->stop = &dmfe_stop;
445 dev->get_stats = &dmfe_get_stats;
446 dev->set_multicast_list = &dmfe_set_filter_mode;
447#ifdef CONFIG_NET_POLL_CONTROLLER
448 dev->poll_controller = &poll_dmfe;
449#endif
450 dev->ethtool_ops = &netdev_ethtool_ops;
Maxim Levitskycfa51b92007-03-06 02:41:53 -0800451 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 spin_lock_init(&db->lock);
453
454 pci_read_config_dword(pdev, 0x50, &pci_pmr);
455 pci_pmr &= 0x70000;
Auke Kok44c10132007-06-08 15:46:36 -0700456 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 db->chip_type = 1; /* DM9102A E3 */
458 else
459 db->chip_type = 0;
460
461 /* read 64 word srom data */
462 for (i = 0; i < 64; i++)
Al Viroa31e40f2007-03-14 09:18:20 +0000463 ((__le16 *) db->srom)[i] =
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800464 cpu_to_le16(read_srom_word(db->ioaddr, i));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 /* Set Node address */
467 for (i = 0; i < 6; i++)
468 dev->dev_addr[i] = db->srom[20 + i];
469
470 err = register_netdev (dev);
471 if (err)
472 goto err_out_res;
473
Joe Perches0795af52007-10-03 17:59:30 -0700474 printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, "
475 "%s, irq %d.\n",
476 dev->name,
477 ent->driver_data >> 16,
478 pci_name(pdev),
479 print_mac(mac, dev->dev_addr),
480 dev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
482 pci_set_master(pdev);
483
484 return 0;
485
486err_out_res:
487 pci_release_regions(pdev);
488err_out_disable:
489 pci_disable_device(pdev);
490err_out_free:
491 pci_set_drvdata(pdev, NULL);
492 free_netdev(dev);
493
494 return err;
495}
496
497
498static void __devexit dmfe_remove_one (struct pci_dev *pdev)
499{
500 struct net_device *dev = pci_get_drvdata(pdev);
501 struct dmfe_board_info *db = netdev_priv(dev);
502
503 DMFE_DBUG(0, "dmfe_remove_one()", 0);
504
505 if (dev) {
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800506
507 unregister_netdev(dev);
508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
510 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
511 db->desc_pool_dma_ptr);
512 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
513 db->buf_pool_ptr, db->buf_pool_dma_ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 pci_release_regions(pdev);
515 free_netdev(dev); /* free board information */
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 pci_set_drvdata(pdev, NULL);
518 }
519
520 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
521}
522
523
524/*
525 * Open the interface.
526 * The interface is opened whenever "ifconfig" actives it.
527 */
528
529static int dmfe_open(struct DEVICE *dev)
530{
531 int ret;
532 struct dmfe_board_info *db = netdev_priv(dev);
533
534 DMFE_DBUG(0, "dmfe_open", 0);
535
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800536 ret = request_irq(dev->irq, &dmfe_interrupt,
537 IRQF_SHARED, dev->name, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 if (ret)
539 return ret;
540
541 /* system variable init */
542 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
543 db->tx_packet_cnt = 0;
544 db->tx_queue_cnt = 0;
545 db->rx_avail_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 db->wait_reset = 0;
547
548 db->first_in_callback = 0;
549 db->NIC_capability = 0xf; /* All capability*/
550 db->PHY_reg4 = 0x1e0;
551
552 /* CR6 operation mode decision */
553 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
Auke Kok44c10132007-06-08 15:46:36 -0700554 (db->chip_revision >= 0x30) ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 db->cr6_data |= DMFE_TXTH_256;
556 db->cr0_data = CR0_DEFAULT;
557 db->dm910x_chk_mode=4; /* Enter the normal mode */
558 } else {
559 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
560 db->cr0_data = 0;
561 db->dm910x_chk_mode = 1; /* Enter the check mode */
562 }
563
564 /* Initilize DM910X board */
565 dmfe_init_dm910x(dev);
566
567 /* Active System Interface */
568 netif_wake_queue(dev);
569
570 /* set and active a timer process */
571 init_timer(&db->timer);
572 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
573 db->timer.data = (unsigned long)dev;
574 db->timer.function = &dmfe_timer;
575 add_timer(&db->timer);
576
577 return 0;
578}
579
580
581/* Initilize DM910X board
582 * Reset DM910X board
583 * Initilize TX/Rx descriptor chain structure
584 * Send the set-up frame
585 * Enable Tx/Rx machine
586 */
587
588static void dmfe_init_dm910x(struct DEVICE *dev)
589{
590 struct dmfe_board_info *db = netdev_priv(dev);
591 unsigned long ioaddr = db->ioaddr;
592
593 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
594
595 /* Reset DM910x MAC controller */
596 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
597 udelay(100);
598 outl(db->cr0_data, ioaddr + DCR0);
599 udelay(5);
600
601 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
602 db->phy_addr = 1;
603
604 /* Parser SROM and media mode */
605 dmfe_parse_srom(db);
606 db->media_mode = dmfe_media_mode;
607
608 /* RESET Phyxcer Chip by GPR port bit 7 */
609 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
610 if (db->chip_id == PCI_DM9009_ID) {
611 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
612 mdelay(300); /* Delay 300 ms */
613 }
614 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
615
616 /* Process Phyxcer Media Mode */
617 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
618 dmfe_set_phyxcer(db);
619
620 /* Media Mode Process */
621 if ( !(db->media_mode & DMFE_AUTO) )
622 db->op_mode = db->media_mode; /* Force Mode */
623
624 /* Initiliaze Transmit/Receive decriptor and CR3/4 */
625 dmfe_descriptor_init(db, ioaddr);
626
627 /* Init CR6 to program DM910x operation */
628 update_cr6(db->cr6_data, ioaddr);
629
630 /* Send setup frame */
631 if (db->chip_id == PCI_DM9132_ID)
632 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
633 else
634 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
635
636 /* Init CR7, interrupt active bit */
637 db->cr7_data = CR7_DEFAULT;
638 outl(db->cr7_data, ioaddr + DCR7);
639
640 /* Init CR15, Tx jabber and Rx watchdog timer */
641 outl(db->cr15_data, ioaddr + DCR15);
642
643 /* Enable DM910X Tx/Rx function */
644 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
645 update_cr6(db->cr6_data, ioaddr);
646}
647
648
649/*
650 * Hardware start transmission.
651 * Send a packet to media from the upper layer.
652 */
653
654static int dmfe_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
655{
656 struct dmfe_board_info *db = netdev_priv(dev);
657 struct tx_desc *txptr;
658 unsigned long flags;
659
660 DMFE_DBUG(0, "dmfe_start_xmit", 0);
661
662 /* Resource flag check */
663 netif_stop_queue(dev);
664
665 /* Too large packet check */
666 if (skb->len > MAX_PACKET_SIZE) {
667 printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
668 dev_kfree_skb(skb);
669 return 0;
670 }
671
672 spin_lock_irqsave(&db->lock, flags);
673
674 /* No Tx resource check, it never happen nromally */
675 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
676 spin_unlock_irqrestore(&db->lock, flags);
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800677 printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
678 db->tx_queue_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 return 1;
680 }
681
682 /* Disable NIC interrupt */
683 outl(0, dev->base_addr + DCR7);
684
685 /* transmit this packet */
686 txptr = db->tx_insert_ptr;
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300687 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
689
690 /* Point to next transmit free descriptor */
691 db->tx_insert_ptr = txptr->next_tx_desc;
692
693 /* Transmit Packet Process */
694 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
695 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
696 db->tx_packet_cnt++; /* Ready to send */
697 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
698 dev->trans_start = jiffies; /* saved time stamp */
699 } else {
700 db->tx_queue_cnt++; /* queue TX packet */
701 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
702 }
703
704 /* Tx resource check */
705 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
706 netif_wake_queue(dev);
707
708 /* Restore CR7 to enable interrupt */
709 spin_unlock_irqrestore(&db->lock, flags);
710 outl(db->cr7_data, dev->base_addr + DCR7);
711
712 /* free this SKB */
713 dev_kfree_skb(skb);
714
715 return 0;
716}
717
718
719/*
720 * Stop the interface.
721 * The interface is stopped when it is brought.
722 */
723
724static int dmfe_stop(struct DEVICE *dev)
725{
726 struct dmfe_board_info *db = netdev_priv(dev);
727 unsigned long ioaddr = dev->base_addr;
728
729 DMFE_DBUG(0, "dmfe_stop", 0);
730
731 /* disable system */
732 netif_stop_queue(dev);
733
734 /* deleted timer */
735 del_timer_sync(&db->timer);
736
737 /* Reset & stop DM910X board */
738 outl(DM910X_RESET, ioaddr + DCR0);
739 udelay(5);
740 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
741
742 /* free interrupt */
743 free_irq(dev->irq, dev);
744
745 /* free allocated rx buffer */
746 dmfe_free_rxbuffer(db);
747
748#if 0
749 /* show statistic counter */
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800750 printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
751 " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 db->tx_fifo_underrun, db->tx_excessive_collision,
753 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
754 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
755 db->reset_fatal, db->reset_TXtimeout);
756#endif
757
758 return 0;
759}
760
761
762/*
763 * DM9102 insterrupt handler
764 * receive the packet to upper layer, free the transmitted packet
765 */
766
David Howells7d12e782006-10-05 14:55:46 +0100767static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768{
769 struct DEVICE *dev = dev_id;
770 struct dmfe_board_info *db = netdev_priv(dev);
771 unsigned long ioaddr = dev->base_addr;
772 unsigned long flags;
773
774 DMFE_DBUG(0, "dmfe_interrupt()", 0);
775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 spin_lock_irqsave(&db->lock, flags);
777
778 /* Got DM910X status */
779 db->cr5_data = inl(ioaddr + DCR5);
780 outl(db->cr5_data, ioaddr + DCR5);
781 if ( !(db->cr5_data & 0xc1) ) {
782 spin_unlock_irqrestore(&db->lock, flags);
783 return IRQ_HANDLED;
784 }
785
786 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
787 outl(0, ioaddr + DCR7);
788
789 /* Check system status */
790 if (db->cr5_data & 0x2000) {
791 /* system bus error happen */
792 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
793 db->reset_fatal++;
794 db->wait_reset = 1; /* Need to RESET */
795 spin_unlock_irqrestore(&db->lock, flags);
796 return IRQ_HANDLED;
797 }
798
799 /* Received the coming packet */
800 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
801 dmfe_rx_packet(dev, db);
802
803 /* reallocate rx descriptor buffer */
804 if (db->rx_avail_cnt<RX_DESC_CNT)
805 allocate_rx_buffer(db);
806
807 /* Free the transmitted descriptor */
808 if ( db->cr5_data & 0x01)
809 dmfe_free_tx_pkt(dev, db);
810
811 /* Mode Check */
812 if (db->dm910x_chk_mode & 0x2) {
813 db->dm910x_chk_mode = 0x4;
814 db->cr6_data |= 0x100;
815 update_cr6(db->cr6_data, db->ioaddr);
816 }
817
818 /* Restore CR7 to enable interrupt mask */
819 outl(db->cr7_data, ioaddr + DCR7);
820
821 spin_unlock_irqrestore(&db->lock, flags);
822 return IRQ_HANDLED;
823}
824
825
826#ifdef CONFIG_NET_POLL_CONTROLLER
827/*
828 * Polling 'interrupt' - used by things like netconsole to send skbs
829 * without having to re-enable interrupts. It's not called while
830 * the interrupt routine is executing.
831 */
832
833static void poll_dmfe (struct net_device *dev)
834{
835 /* disable_irq here is not very nice, but with the lockless
836 interrupt handler we have no other choice. */
837 disable_irq(dev->irq);
David Howells7d12e782006-10-05 14:55:46 +0100838 dmfe_interrupt (dev->irq, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 enable_irq(dev->irq);
840}
841#endif
842
843/*
844 * Free TX resource after TX complete
845 */
846
847static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
848{
849 struct tx_desc *txptr;
850 unsigned long ioaddr = dev->base_addr;
851 u32 tdes0;
852
853 txptr = db->tx_remove_ptr;
854 while(db->tx_packet_cnt) {
855 tdes0 = le32_to_cpu(txptr->tdes0);
856 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
857 if (tdes0 & 0x80000000)
858 break;
859
860 /* A packet sent completed */
861 db->tx_packet_cnt--;
862 db->stats.tx_packets++;
863
864 /* Transmit statistic counter */
865 if ( tdes0 != 0x7fffffff ) {
866 /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
867 db->stats.collisions += (tdes0 >> 3) & 0xf;
868 db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
869 if (tdes0 & TDES0_ERR_MASK) {
870 db->stats.tx_errors++;
871
872 if (tdes0 & 0x0002) { /* UnderRun */
873 db->tx_fifo_underrun++;
874 if ( !(db->cr6_data & CR6_SFT) ) {
875 db->cr6_data = db->cr6_data | CR6_SFT;
876 update_cr6(db->cr6_data, db->ioaddr);
877 }
878 }
879 if (tdes0 & 0x0100)
880 db->tx_excessive_collision++;
881 if (tdes0 & 0x0200)
882 db->tx_late_collision++;
883 if (tdes0 & 0x0400)
884 db->tx_no_carrier++;
885 if (tdes0 & 0x0800)
886 db->tx_loss_carrier++;
887 if (tdes0 & 0x4000)
888 db->tx_jabber_timeout++;
889 }
890 }
891
892 txptr = txptr->next_tx_desc;
893 }/* End of while */
894
895 /* Update TX remove pointer to next */
896 db->tx_remove_ptr = txptr;
897
898 /* Send the Tx packet in queue */
899 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
900 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
901 db->tx_packet_cnt++; /* Ready to send */
902 db->tx_queue_cnt--;
903 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
904 dev->trans_start = jiffies; /* saved time stamp */
905 }
906
907 /* Resource available check */
908 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
909 netif_wake_queue(dev); /* Active upper layer, send again */
910}
911
912
913/*
914 * Calculate the CRC valude of the Rx packet
915 * flag = 1 : return the reverse CRC (for the received packet CRC)
916 * 0 : return the normal CRC (for Hash Table index)
917 */
918
919static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
920{
921 u32 crc = crc32(~0, Data, Len);
922 if (flag) crc = ~crc;
923 return crc;
924}
925
926
927/*
928 * Receive the come packet and pass to upper layer
929 */
930
931static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
932{
933 struct rx_desc *rxptr;
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800934 struct sk_buff *skb, *newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 int rxlen;
936 u32 rdes0;
937
938 rxptr = db->rx_ready_ptr;
939
940 while(db->rx_avail_cnt) {
941 rdes0 = le32_to_cpu(rxptr->rdes0);
942 if (rdes0 & 0x80000000) /* packet owner check */
943 break;
944
945 db->rx_avail_cnt--;
946 db->interval_rx_cnt++;
947
Maxim Levitskyf67ba792007-03-06 02:41:51 -0800948 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
949 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 if ( (rdes0 & 0x300) != 0x300) {
952 /* A packet without First/Last flag */
953 /* reuse this SKB */
954 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
955 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
956 } else {
957 /* A packet with First/Last flag */
958 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
959
960 /* error summary bit check */
961 if (rdes0 & 0x8000) {
962 /* This is a error packet */
963 //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
964 db->stats.rx_errors++;
965 if (rdes0 & 1)
966 db->stats.rx_fifo_errors++;
967 if (rdes0 & 2)
968 db->stats.rx_crc_errors++;
969 if (rdes0 & 0x80)
970 db->stats.rx_length_errors++;
971 }
972
973 if ( !(rdes0 & 0x8000) ||
974 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
975 skb = rxptr->rx_skb_ptr;
976
977 /* Received Packet CRC check need or not */
978 if ( (db->dm910x_chk_mode & 1) &&
David S. Miller689be432005-06-28 15:25:31 -0700979 (cal_CRC(skb->data, rxlen, 1) !=
980 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 /* Found a error received packet */
982 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
983 db->dm910x_chk_mode = 3;
984 } else {
985 /* Good packet, send to upper layer */
986 /* Shorst packet used new SKB */
Maxim Levitsky4dc68f32007-03-06 02:41:52 -0800987 if ((rxlen < RX_COPY_SIZE) &&
988 ((newskb = dev_alloc_skb(rxlen + 2))
989 != NULL)) {
990
991 skb = newskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 /* size less than COPY_SIZE, allocate a rxlen SKB */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 skb_reserve(skb, 2); /* 16byte align */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300994 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
995 skb_put(skb, rxlen),
996 rxlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -0700998 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 skb_put(skb, rxlen);
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 skb->protocol = eth_type_trans(skb, dev);
1002 netif_rx(skb);
1003 dev->last_rx = jiffies;
1004 db->stats.rx_packets++;
1005 db->stats.rx_bytes += rxlen;
1006 }
1007 } else {
1008 /* Reuse SKB buffer when the packet is error */
1009 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1010 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1011 }
1012 }
1013
1014 rxptr = rxptr->next_rx_desc;
1015 }
1016
1017 db->rx_ready_ptr = rxptr;
1018}
1019
1020
1021/*
1022 * Get statistics from driver.
1023 */
1024
1025static struct net_device_stats * dmfe_get_stats(struct DEVICE *dev)
1026{
1027 struct dmfe_board_info *db = netdev_priv(dev);
1028
1029 DMFE_DBUG(0, "dmfe_get_stats", 0);
1030 return &db->stats;
1031}
1032
1033
1034/*
1035 * Set DM910X multicast address
1036 */
1037
1038static void dmfe_set_filter_mode(struct DEVICE * dev)
1039{
1040 struct dmfe_board_info *db = netdev_priv(dev);
1041 unsigned long flags;
1042
1043 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1044 spin_lock_irqsave(&db->lock, flags);
1045
1046 if (dev->flags & IFF_PROMISC) {
1047 DMFE_DBUG(0, "Enable PROM Mode", 0);
1048 db->cr6_data |= CR6_PM | CR6_PBF;
1049 update_cr6(db->cr6_data, db->ioaddr);
1050 spin_unlock_irqrestore(&db->lock, flags);
1051 return;
1052 }
1053
1054 if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
1055 DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
1056 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1057 db->cr6_data |= CR6_PAM;
1058 spin_unlock_irqrestore(&db->lock, flags);
1059 return;
1060 }
1061
1062 DMFE_DBUG(0, "Set multicast address", dev->mc_count);
1063 if (db->chip_id == PCI_DM9132_ID)
1064 dm9132_id_table(dev, dev->mc_count); /* DM9132 */
1065 else
1066 send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
1067 spin_unlock_irqrestore(&db->lock, flags);
1068}
1069
Maxim Levitskyf1069042007-03-06 02:41:54 -08001070/*
1071 * Ethtool interace
1072 */
1073
1074static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 struct ethtool_drvinfo *info)
1076{
1077 struct dmfe_board_info *np = netdev_priv(dev);
1078
1079 strcpy(info->driver, DRV_NAME);
1080 strcpy(info->version, DRV_VERSION);
1081 if (np->pdev)
1082 strcpy(info->bus_info, pci_name(np->pdev));
1083 else
1084 sprintf(info->bus_info, "EISA 0x%lx %d",
1085 dev->base_addr, dev->irq);
1086}
1087
Maxim Levitskyf1069042007-03-06 02:41:54 -08001088static int dmfe_ethtool_set_wol(struct net_device *dev,
1089 struct ethtool_wolinfo *wolinfo)
1090{
1091 struct dmfe_board_info *db = netdev_priv(dev);
1092
1093 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1094 WAKE_ARP | WAKE_MAGICSECURE))
1095 return -EOPNOTSUPP;
1096
1097 db->wol_mode = wolinfo->wolopts;
1098 return 0;
1099}
1100
1101static void dmfe_ethtool_get_wol(struct net_device *dev,
1102 struct ethtool_wolinfo *wolinfo)
1103{
1104 struct dmfe_board_info *db = netdev_priv(dev);
1105
1106 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1107 wolinfo->wolopts = db->wol_mode;
1108 return;
1109}
1110
1111
Jeff Garzik7282d492006-09-13 14:30:00 -04001112static const struct ethtool_ops netdev_ethtool_ops = {
Maxim Levitskyf1069042007-03-06 02:41:54 -08001113 .get_drvinfo = dmfe_ethtool_get_drvinfo,
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001114 .get_link = ethtool_op_get_link,
Maxim Levitskyf1069042007-03-06 02:41:54 -08001115 .set_wol = dmfe_ethtool_set_wol,
1116 .get_wol = dmfe_ethtool_get_wol,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117};
1118
1119/*
1120 * A periodic timer routine
1121 * Dynamic media sense, allocate Rx buffer...
1122 */
1123
1124static void dmfe_timer(unsigned long data)
1125{
1126 u32 tmp_cr8;
1127 unsigned char tmp_cr12;
1128 struct DEVICE *dev = (struct DEVICE *) data;
1129 struct dmfe_board_info *db = netdev_priv(dev);
1130 unsigned long flags;
1131
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001132 int link_ok, link_ok_phy;
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 DMFE_DBUG(0, "dmfe_timer()", 0);
1135 spin_lock_irqsave(&db->lock, flags);
1136
1137 /* Media mode process when Link OK before enter this route */
1138 if (db->first_in_callback == 0) {
1139 db->first_in_callback = 1;
1140 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1141 db->cr6_data &= ~0x40000;
1142 update_cr6(db->cr6_data, db->ioaddr);
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001143 phy_write(db->ioaddr,
1144 db->phy_addr, 0, 0x1000, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 db->cr6_data |= 0x40000;
1146 update_cr6(db->cr6_data, db->ioaddr);
1147 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1148 add_timer(&db->timer);
1149 spin_unlock_irqrestore(&db->lock, flags);
1150 return;
1151 }
1152 }
1153
1154
1155 /* Operating Mode Check */
1156 if ( (db->dm910x_chk_mode & 0x1) &&
1157 (db->stats.rx_packets > MAX_CHECK_PACKET) )
1158 db->dm910x_chk_mode = 0x4;
1159
1160 /* Dynamic reset DM910X : system error or transmit time-out */
1161 tmp_cr8 = inl(db->ioaddr + DCR8);
1162 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1163 db->reset_cr8++;
1164 db->wait_reset = 1;
1165 }
1166 db->interval_rx_cnt = 0;
1167
1168 /* TX polling kick monitor */
1169 if ( db->tx_packet_cnt &&
1170 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) {
1171 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1172
1173 /* TX Timeout */
1174 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
1175 db->reset_TXtimeout++;
1176 db->wait_reset = 1;
1177 printk(KERN_WARNING "%s: Tx timeout - resetting\n",
1178 dev->name);
1179 }
1180 }
1181
1182 if (db->wait_reset) {
1183 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1184 db->reset_count++;
1185 dmfe_dynamic_reset(dev);
1186 db->first_in_callback = 0;
1187 db->timer.expires = DMFE_TIMER_WUT;
1188 add_timer(&db->timer);
1189 spin_unlock_irqrestore(&db->lock, flags);
1190 return;
1191 }
1192
1193 /* Link status check, Dynamic media type change */
1194 if (db->chip_id == PCI_DM9132_ID)
1195 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1196 else
1197 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1198
1199 if ( ((db->chip_id == PCI_DM9102_ID) &&
Auke Kok44c10132007-06-08 15:46:36 -07001200 (db->chip_revision == 0x30)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 ((db->chip_id == PCI_DM9132_ID) &&
Auke Kok44c10132007-06-08 15:46:36 -07001202 (db->chip_revision == 0x10)) ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 /* DM9102A Chip */
1204 if (tmp_cr12 & 2)
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001205 link_ok = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 else
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001207 link_ok = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001209 else
1210 /*0x43 is used instead of 0x3 because bit 6 should represent
1211 link status of external PHY */
1212 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001214
1215 /* If chip reports that link is failed it could be because external
1216 PHY link status pin is not conected correctly to chip
1217 To be sure ask PHY too.
1218 */
1219
1220 /* need a dummy read because of PHY's register latch*/
1221 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1222 link_ok_phy = (phy_read (db->ioaddr,
1223 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1224
1225 if (link_ok_phy != link_ok) {
1226 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1227 link_ok = link_ok | link_ok_phy;
1228 }
1229
1230 if ( !link_ok && netif_carrier_ok(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 /* Link Failed */
1232 DMFE_DBUG(0, "Link Failed", tmp_cr12);
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001233 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1236 /* AUTO or force 1M Homerun/Longrun don't need */
1237 if ( !(db->media_mode & 0x38) )
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001238 phy_write(db->ioaddr, db->phy_addr,
1239 0, 0x1000, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1242 if (db->media_mode & DMFE_AUTO) {
1243 /* 10/100M link failed, used 1M Home-Net */
1244 db->cr6_data|=0x00040000; /* bit18=1, MII */
1245 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1246 update_cr6(db->cr6_data, db->ioaddr);
1247 }
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001248 } else if (!netif_carrier_ok(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001250 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1251
1252 /* Auto Sense Speed */
1253 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1254 netif_carrier_on(dev);
1255 SHOW_MEDIA_TYPE(db->op_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
1257
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001258 dmfe_process_mode(db);
1259 }
1260
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 /* HPNA remote command check */
1262 if (db->HPNA_command & 0xf00) {
1263 db->HPNA_timer--;
1264 if (!db->HPNA_timer)
1265 dmfe_HPNA_remote_cmd_chk(db);
1266 }
1267
1268 /* Timer active again */
1269 db->timer.expires = DMFE_TIMER_WUT;
1270 add_timer(&db->timer);
1271 spin_unlock_irqrestore(&db->lock, flags);
1272}
1273
1274
1275/*
1276 * Dynamic reset the DM910X board
1277 * Stop DM910X board
1278 * Free Tx/Rx allocated memory
1279 * Reset DM910X board
1280 * Re-initilize DM910X board
1281 */
1282
1283static void dmfe_dynamic_reset(struct DEVICE *dev)
1284{
1285 struct dmfe_board_info *db = netdev_priv(dev);
1286
1287 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1288
1289 /* Sopt MAC controller */
1290 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1291 update_cr6(db->cr6_data, dev->base_addr);
1292 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1293 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1294
1295 /* Disable upper layer interface */
1296 netif_stop_queue(dev);
1297
1298 /* Free Rx Allocate buffer */
1299 dmfe_free_rxbuffer(db);
1300
1301 /* system variable init */
1302 db->tx_packet_cnt = 0;
1303 db->tx_queue_cnt = 0;
1304 db->rx_avail_cnt = 0;
Maxim Levitskycfa51b92007-03-06 02:41:53 -08001305 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 db->wait_reset = 0;
1307
1308 /* Re-initilize DM910X board */
1309 dmfe_init_dm910x(dev);
1310
1311 /* Restart upper layer interface */
1312 netif_wake_queue(dev);
1313}
1314
1315
1316/*
1317 * free all allocated rx buffer
1318 */
1319
1320static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1321{
1322 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1323
1324 /* free allocated rx buffer */
1325 while (db->rx_avail_cnt) {
1326 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1327 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1328 db->rx_avail_cnt--;
1329 }
1330}
1331
1332
1333/*
1334 * Reuse the SK buffer
1335 */
1336
1337static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1338{
1339 struct rx_desc *rxptr = db->rx_insert_ptr;
1340
1341 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1342 rxptr->rx_skb_ptr = skb;
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001343 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1344 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 wmb();
1346 rxptr->rdes0 = cpu_to_le32(0x80000000);
1347 db->rx_avail_cnt++;
1348 db->rx_insert_ptr = rxptr->next_rx_desc;
1349 } else
1350 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1351}
1352
1353
1354/*
1355 * Initialize transmit/Receive descriptor
1356 * Using Chain structure, and allocate Tx/Rx buffer
1357 */
1358
1359static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1360{
1361 struct tx_desc *tmp_tx;
1362 struct rx_desc *tmp_rx;
1363 unsigned char *tmp_buf;
1364 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1365 dma_addr_t tmp_buf_dma;
1366 int i;
1367
1368 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1369
1370 /* tx descriptor start pointer */
1371 db->tx_insert_ptr = db->first_tx_desc;
1372 db->tx_remove_ptr = db->first_tx_desc;
1373 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1374
1375 /* rx descriptor start pointer */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001376 db->first_rx_desc = (void *)db->first_tx_desc +
1377 sizeof(struct tx_desc) * TX_DESC_CNT;
1378
1379 db->first_rx_desc_dma = db->first_tx_desc_dma +
1380 sizeof(struct tx_desc) * TX_DESC_CNT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 db->rx_insert_ptr = db->first_rx_desc;
1382 db->rx_ready_ptr = db->first_rx_desc;
1383 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1384
1385 /* Init Transmit chain */
1386 tmp_buf = db->buf_pool_start;
1387 tmp_buf_dma = db->buf_pool_dma_start;
1388 tmp_tx_dma = db->first_tx_desc_dma;
1389 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1390 tmp_tx->tx_buf_ptr = tmp_buf;
1391 tmp_tx->tdes0 = cpu_to_le32(0);
1392 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1393 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1394 tmp_tx_dma += sizeof(struct tx_desc);
1395 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1396 tmp_tx->next_tx_desc = tmp_tx + 1;
1397 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1398 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1399 }
1400 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1401 tmp_tx->next_tx_desc = db->first_tx_desc;
1402
1403 /* Init Receive descriptor chain */
1404 tmp_rx_dma=db->first_rx_desc_dma;
1405 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1406 tmp_rx->rdes0 = cpu_to_le32(0);
1407 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1408 tmp_rx_dma += sizeof(struct rx_desc);
1409 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1410 tmp_rx->next_rx_desc = tmp_rx + 1;
1411 }
1412 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1413 tmp_rx->next_rx_desc = db->first_rx_desc;
1414
1415 /* pre-allocate Rx buffer */
1416 allocate_rx_buffer(db);
1417}
1418
1419
1420/*
1421 * Update CR6 value
1422 * Firstly stop DM910X , then written value and start
1423 */
1424
1425static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1426{
1427 u32 cr6_tmp;
1428
1429 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1430 outl(cr6_tmp, ioaddr + DCR6);
1431 udelay(5);
1432 outl(cr6_data, ioaddr + DCR6);
1433 udelay(5);
1434}
1435
1436
1437/*
1438 * Send a setup frame for DM9132
1439 * This setup frame initilize DM910X address filter mode
1440*/
1441
1442static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
1443{
1444 struct dev_mc_list *mcptr;
1445 u16 * addrptr;
1446 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1447 u32 hash_val;
1448 u16 i, hash_table[4];
1449
1450 DMFE_DBUG(0, "dm9132_id_table()", 0);
1451
1452 /* Node address */
1453 addrptr = (u16 *) dev->dev_addr;
1454 outw(addrptr[0], ioaddr);
1455 ioaddr += 4;
1456 outw(addrptr[1], ioaddr);
1457 ioaddr += 4;
1458 outw(addrptr[2], ioaddr);
1459 ioaddr += 4;
1460
1461 /* Clear Hash Table */
1462 for (i = 0; i < 4; i++)
1463 hash_table[i] = 0x0;
1464
1465 /* broadcast address */
1466 hash_table[3] = 0x8000;
1467
1468 /* the multicast address in Hash Table : 64 bits */
1469 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1470 hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
1471 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1472 }
1473
1474 /* Write the hash table to MAC MD table */
1475 for (i = 0; i < 4; i++, ioaddr += 4)
1476 outw(hash_table[i], ioaddr);
1477}
1478
1479
1480/*
1481 * Send a setup frame for DM9102/DM9102A
1482 * This setup frame initilize DM910X address filter mode
1483 */
1484
1485static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
1486{
1487 struct dmfe_board_info *db = netdev_priv(dev);
1488 struct dev_mc_list *mcptr;
1489 struct tx_desc *txptr;
1490 u16 * addrptr;
1491 u32 * suptr;
1492 int i;
1493
1494 DMFE_DBUG(0, "send_filter_frame()", 0);
1495
1496 txptr = db->tx_insert_ptr;
1497 suptr = (u32 *) txptr->tx_buf_ptr;
1498
1499 /* Node address */
1500 addrptr = (u16 *) dev->dev_addr;
1501 *suptr++ = addrptr[0];
1502 *suptr++ = addrptr[1];
1503 *suptr++ = addrptr[2];
1504
1505 /* broadcast address */
1506 *suptr++ = 0xffff;
1507 *suptr++ = 0xffff;
1508 *suptr++ = 0xffff;
1509
1510 /* fit the multicast address */
1511 for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
1512 addrptr = (u16 *) mcptr->dmi_addr;
1513 *suptr++ = addrptr[0];
1514 *suptr++ = addrptr[1];
1515 *suptr++ = addrptr[2];
1516 }
1517
1518 for (; i<14; i++) {
1519 *suptr++ = 0xffff;
1520 *suptr++ = 0xffff;
1521 *suptr++ = 0xffff;
1522 }
1523
1524 /* prepare the setup frame */
1525 db->tx_insert_ptr = txptr->next_tx_desc;
1526 txptr->tdes1 = cpu_to_le32(0x890000c0);
1527
1528 /* Resource Check and Send the setup packet */
1529 if (!db->tx_packet_cnt) {
1530 /* Resource Empty */
1531 db->tx_packet_cnt++;
1532 txptr->tdes0 = cpu_to_le32(0x80000000);
1533 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1534 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1535 update_cr6(db->cr6_data, dev->base_addr);
1536 dev->trans_start = jiffies;
1537 } else
1538 db->tx_queue_cnt++; /* Put in TX queue */
1539}
1540
1541
1542/*
1543 * Allocate rx buffer,
1544 * As possible as allocate maxiumn Rx buffer
1545 */
1546
1547static void allocate_rx_buffer(struct dmfe_board_info *db)
1548{
1549 struct rx_desc *rxptr;
1550 struct sk_buff *skb;
1551
1552 rxptr = db->rx_insert_ptr;
1553
1554 while(db->rx_avail_cnt < RX_DESC_CNT) {
1555 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1556 break;
1557 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001558 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1559 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 wmb();
1561 rxptr->rdes0 = cpu_to_le32(0x80000000);
1562 rxptr = rxptr->next_rx_desc;
1563 db->rx_avail_cnt++;
1564 }
1565
1566 db->rx_insert_ptr = rxptr;
1567}
1568
1569
1570/*
1571 * Read one word data from the serial ROM
1572 */
1573
1574static u16 read_srom_word(long ioaddr, int offset)
1575{
1576 int i;
1577 u16 srom_data = 0;
1578 long cr9_ioaddr = ioaddr + DCR9;
1579
1580 outl(CR9_SROM_READ, cr9_ioaddr);
1581 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1582
1583 /* Send the Read Command 110b */
1584 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1585 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1586 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1587
1588 /* Send the offset */
1589 for (i = 5; i >= 0; i--) {
1590 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1591 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1592 }
1593
1594 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1595
1596 for (i = 16; i > 0; i--) {
1597 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1598 udelay(5);
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001599 srom_data = (srom_data << 1) |
1600 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1602 udelay(5);
1603 }
1604
1605 outl(CR9_SROM_READ, cr9_ioaddr);
1606 return srom_data;
1607}
1608
1609
1610/*
1611 * Auto sense the media mode
1612 */
1613
1614static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1615{
1616 u8 ErrFlag = 0;
1617 u16 phy_mode;
1618
1619 /* CR6 bit18=0, select 10/100M */
1620 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1621
1622 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1623 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1624
1625 if ( (phy_mode & 0x24) == 0x24 ) {
1626 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001627 phy_mode = phy_read(db->ioaddr,
1628 db->phy_addr, 7, db->chip_id) & 0xf000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 else /* DM9102/DM9102A */
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001630 phy_mode = phy_read(db->ioaddr,
1631 db->phy_addr, 17, db->chip_id) & 0xf000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
1633 switch (phy_mode) {
1634 case 0x1000: db->op_mode = DMFE_10MHF; break;
1635 case 0x2000: db->op_mode = DMFE_10MFD; break;
1636 case 0x4000: db->op_mode = DMFE_100MHF; break;
1637 case 0x8000: db->op_mode = DMFE_100MFD; break;
1638 default: db->op_mode = DMFE_10MHF;
1639 ErrFlag = 1;
1640 break;
1641 }
1642 } else {
1643 db->op_mode = DMFE_10MHF;
1644 DMFE_DBUG(0, "Link Failed :", phy_mode);
1645 ErrFlag = 1;
1646 }
1647
1648 return ErrFlag;
1649}
1650
1651
1652/*
1653 * Set 10/100 phyxcer capability
1654 * AUTO mode : phyxcer register4 is NIC capability
1655 * Force mode: phyxcer register4 is the force media
1656 */
1657
1658static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1659{
1660 u16 phy_reg;
1661
1662 /* Select 10/100M phyxcer */
1663 db->cr6_data &= ~0x40000;
1664 update_cr6(db->cr6_data, db->ioaddr);
1665
1666 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1667 if (db->chip_id == PCI_DM9009_ID) {
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001668 phy_reg = phy_read(db->ioaddr,
1669 db->phy_addr, 18, db->chip_id) & ~0x1000;
1670
1671 phy_write(db->ioaddr,
1672 db->phy_addr, 18, phy_reg, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 }
1674
1675 /* Phyxcer capability setting */
1676 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1677
1678 if (db->media_mode & DMFE_AUTO) {
1679 /* AUTO Mode */
1680 phy_reg |= db->PHY_reg4;
1681 } else {
1682 /* Force Mode */
1683 switch(db->media_mode) {
1684 case DMFE_10MHF: phy_reg |= 0x20; break;
1685 case DMFE_10MFD: phy_reg |= 0x40; break;
1686 case DMFE_100MHF: phy_reg |= 0x80; break;
1687 case DMFE_100MFD: phy_reg |= 0x100; break;
1688 }
1689 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1690 }
1691
1692 /* Write new capability to Phyxcer Reg4 */
1693 if ( !(phy_reg & 0x01e0)) {
1694 phy_reg|=db->PHY_reg4;
1695 db->media_mode|=DMFE_AUTO;
1696 }
1697 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1698
1699 /* Restart Auto-Negotiation */
1700 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1701 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1702 if ( !db->chip_type )
1703 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1704}
1705
1706
1707/*
1708 * Process op-mode
1709 * AUTO mode : PHY controller in Auto-negotiation Mode
1710 * Force mode: PHY controller in force mode with HUB
1711 * N-way force capability with SWITCH
1712 */
1713
1714static void dmfe_process_mode(struct dmfe_board_info *db)
1715{
1716 u16 phy_reg;
1717
1718 /* Full Duplex Mode Check */
1719 if (db->op_mode & 0x4)
1720 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1721 else
1722 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1723
1724 /* Transciver Selection */
1725 if (db->op_mode & 0x10) /* 1M HomePNA */
1726 db->cr6_data |= 0x40000;/* External MII select */
1727 else
1728 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1729
1730 update_cr6(db->cr6_data, db->ioaddr);
1731
1732 /* 10/100M phyxcer force mode need */
1733 if ( !(db->media_mode & 0x18)) {
1734 /* Forece Mode */
1735 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1736 if ( !(phy_reg & 0x1) ) {
1737 /* parter without N-Way capability */
1738 phy_reg = 0x0;
1739 switch(db->op_mode) {
1740 case DMFE_10MHF: phy_reg = 0x0; break;
1741 case DMFE_10MFD: phy_reg = 0x100; break;
1742 case DMFE_100MHF: phy_reg = 0x2000; break;
1743 case DMFE_100MFD: phy_reg = 0x2100; break;
1744 }
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001745 phy_write(db->ioaddr,
1746 db->phy_addr, 0, phy_reg, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1748 mdelay(20);
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001749 phy_write(db->ioaddr,
1750 db->phy_addr, 0, phy_reg, db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 }
1752 }
1753}
1754
1755
1756/*
1757 * Write a word to Phy register
1758 */
1759
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001760static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1761 u16 phy_data, u32 chip_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762{
1763 u16 i;
1764 unsigned long ioaddr;
1765
1766 if (chip_id == PCI_DM9132_ID) {
1767 ioaddr = iobase + 0x80 + offset * 4;
1768 outw(phy_data, ioaddr);
1769 } else {
1770 /* DM9102/DM9102A Chip */
1771 ioaddr = iobase + DCR9;
1772
1773 /* Send 33 synchronization clock to Phy controller */
1774 for (i = 0; i < 35; i++)
1775 phy_write_1bit(ioaddr, PHY_DATA_1);
1776
1777 /* Send start command(01) to Phy */
1778 phy_write_1bit(ioaddr, PHY_DATA_0);
1779 phy_write_1bit(ioaddr, PHY_DATA_1);
1780
1781 /* Send write command(01) to Phy */
1782 phy_write_1bit(ioaddr, PHY_DATA_0);
1783 phy_write_1bit(ioaddr, PHY_DATA_1);
1784
1785 /* Send Phy address */
1786 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001787 phy_write_1bit(ioaddr,
1788 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
1790 /* Send register address */
1791 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001792 phy_write_1bit(ioaddr,
1793 offset & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795 /* written trasnition */
1796 phy_write_1bit(ioaddr, PHY_DATA_1);
1797 phy_write_1bit(ioaddr, PHY_DATA_0);
1798
1799 /* Write a word data to PHY controller */
1800 for ( i = 0x8000; i > 0; i >>= 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001801 phy_write_1bit(ioaddr,
1802 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 }
1804}
1805
1806
1807/*
1808 * Read a word data from phy register
1809 */
1810
1811static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1812{
1813 int i;
1814 u16 phy_data;
1815 unsigned long ioaddr;
1816
1817 if (chip_id == PCI_DM9132_ID) {
1818 /* DM9132 Chip */
1819 ioaddr = iobase + 0x80 + offset * 4;
1820 phy_data = inw(ioaddr);
1821 } else {
1822 /* DM9102/DM9102A Chip */
1823 ioaddr = iobase + DCR9;
1824
1825 /* Send 33 synchronization clock to Phy controller */
1826 for (i = 0; i < 35; i++)
1827 phy_write_1bit(ioaddr, PHY_DATA_1);
1828
1829 /* Send start command(01) to Phy */
1830 phy_write_1bit(ioaddr, PHY_DATA_0);
1831 phy_write_1bit(ioaddr, PHY_DATA_1);
1832
1833 /* Send read command(10) to Phy */
1834 phy_write_1bit(ioaddr, PHY_DATA_1);
1835 phy_write_1bit(ioaddr, PHY_DATA_0);
1836
1837 /* Send Phy address */
1838 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001839 phy_write_1bit(ioaddr,
1840 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
1842 /* Send register address */
1843 for (i = 0x10; i > 0; i = i >> 1)
Maxim Levitskyf67ba792007-03-06 02:41:51 -08001844 phy_write_1bit(ioaddr,
1845 offset & i ? PHY_DATA_1 : PHY_DATA_0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
1847 /* Skip transition state */
1848 phy_read_1bit(ioaddr);
1849
1850 /* read 16bit data */
1851 for (phy_data = 0, i = 0; i < 16; i++) {
1852 phy_data <<= 1;
1853 phy_data |= phy_read_1bit(ioaddr);
1854 }
1855 }
1856
1857 return phy_data;
1858}
1859
1860
1861/*
1862 * Write one bit data to Phy Controller
1863 */
1864
1865static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1866{
1867 outl(phy_data, ioaddr); /* MII Clock Low */
1868 udelay(1);
1869 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1870 udelay(1);
1871 outl(phy_data, ioaddr); /* MII Clock Low */
1872 udelay(1);
1873}
1874
1875
1876/*
1877 * Read one bit phy data from PHY controller
1878 */
1879
1880static u16 phy_read_1bit(unsigned long ioaddr)
1881{
1882 u16 phy_data;
1883
1884 outl(0x50000, ioaddr);
1885 udelay(1);
1886 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1887 outl(0x40000, ioaddr);
1888 udelay(1);
1889
1890 return phy_data;
1891}
1892
1893
1894/*
1895 * Parser SROM and media mode
1896 */
1897
1898static void dmfe_parse_srom(struct dmfe_board_info * db)
1899{
1900 char * srom = db->srom;
1901 int dmfe_mode, tmp_reg;
1902
1903 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1904
1905 /* Init CR15 */
1906 db->cr15_data = CR15_DEFAULT;
1907
1908 /* Check SROM Version */
1909 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1910 /* SROM V4.01 */
1911 /* Get NIC support media mode */
Andrew Morton16b110c2005-06-20 15:32:59 -07001912 db->NIC_capability = le16_to_cpup((__le16 *)srom + 34/2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 db->PHY_reg4 = 0;
1914 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1915 switch( db->NIC_capability & tmp_reg ) {
1916 case 0x1: db->PHY_reg4 |= 0x0020; break;
1917 case 0x2: db->PHY_reg4 |= 0x0040; break;
1918 case 0x4: db->PHY_reg4 |= 0x0080; break;
1919 case 0x8: db->PHY_reg4 |= 0x0100; break;
1920 }
1921 }
1922
1923 /* Media Mode Force or not check */
Andrew Morton16b110c2005-06-20 15:32:59 -07001924 dmfe_mode = le32_to_cpup((__le32 *)srom + 34/4) &
1925 le32_to_cpup((__le32 *)srom + 36/4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 switch(dmfe_mode) {
1927 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1928 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1929 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1930 case 0x100:
1931 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1932 }
1933
1934 /* Special Function setting */
1935 /* VLAN function */
1936 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1937 db->cr15_data |= 0x40;
1938
1939 /* Flow Control */
1940 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1941 db->cr15_data |= 0x400;
1942
1943 /* TX pause packet */
1944 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1945 db->cr15_data |= 0x9800;
1946 }
1947
1948 /* Parse HPNA parameter */
1949 db->HPNA_command = 1;
1950
1951 /* Accept remote command or not */
1952 if (HPNA_rx_cmd == 0)
1953 db->HPNA_command |= 0x8000;
1954
1955 /* Issue remote command & operation mode */
1956 if (HPNA_tx_cmd == 1)
1957 switch(HPNA_mode) { /* Issue Remote Command */
1958 case 0: db->HPNA_command |= 0x0904; break;
1959 case 1: db->HPNA_command |= 0x0a00; break;
1960 case 2: db->HPNA_command |= 0x0506; break;
1961 case 3: db->HPNA_command |= 0x0602; break;
1962 }
1963 else
1964 switch(HPNA_mode) { /* Don't Issue */
1965 case 0: db->HPNA_command |= 0x0004; break;
1966 case 1: db->HPNA_command |= 0x0000; break;
1967 case 2: db->HPNA_command |= 0x0006; break;
1968 case 3: db->HPNA_command |= 0x0002; break;
1969 }
1970
1971 /* Check DM9801 or DM9802 present or not */
1972 db->HPNA_present = 0;
1973 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1974 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1975 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1976 /* DM9801 or DM9802 present */
1977 db->HPNA_timer = 8;
1978 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1979 /* DM9801 HomeRun */
1980 db->HPNA_present = 1;
1981 dmfe_program_DM9801(db, tmp_reg);
1982 } else {
1983 /* DM9802 LongRun */
1984 db->HPNA_present = 2;
1985 dmfe_program_DM9802(db);
1986 }
1987 }
1988
1989}
1990
1991
1992/*
1993 * Init HomeRun DM9801
1994 */
1995
1996static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
1997{
1998 uint reg17, reg25;
1999
2000 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2001 switch(HPNA_rev) {
2002 case 0xb900: /* DM9801 E3 */
2003 db->HPNA_command |= 0x1000;
2004 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2005 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2006 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2007 break;
2008 case 0xb901: /* DM9801 E4 */
2009 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2010 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2011 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2012 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2013 break;
2014 case 0xb902: /* DM9801 E5 */
2015 case 0xb903: /* DM9801 E6 */
2016 default:
2017 db->HPNA_command |= 0x1000;
2018 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2019 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2020 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2021 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2022 break;
2023 }
2024 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2025 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2026 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2027}
2028
2029
2030/*
2031 * Init HomeRun DM9802
2032 */
2033
2034static void dmfe_program_DM9802(struct dmfe_board_info * db)
2035{
2036 uint phy_reg;
2037
2038 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2039 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2040 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2041 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2042 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2043}
2044
2045
2046/*
2047 * Check remote HPNA power and speed status. If not correct,
2048 * issue command again.
2049*/
2050
2051static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2052{
2053 uint phy_reg;
2054
2055 /* Got remote device status */
2056 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2057 switch(phy_reg) {
2058 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2059 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2060 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2061 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2062 }
2063
2064 /* Check remote device status match our setting ot not */
2065 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
Maxim Levitskyf67ba792007-03-06 02:41:51 -08002066 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2067 db->chip_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 db->HPNA_timer=8;
2069 } else
2070 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2071}
2072
2073
2074
2075static struct pci_device_id dmfe_pci_tbl[] = {
2076 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2077 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2078 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2079 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2080 { 0, }
2081};
2082MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2083
2084
Maxim Levitskybc8a8382007-03-06 02:41:53 -08002085#ifdef CONFIG_PM
2086static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2087{
2088 struct net_device *dev = pci_get_drvdata(pci_dev);
2089 struct dmfe_board_info *db = netdev_priv(dev);
Maxim Levitskyf1069042007-03-06 02:41:54 -08002090 u32 tmp;
Maxim Levitskybc8a8382007-03-06 02:41:53 -08002091
2092 /* Disable upper layer interface */
2093 netif_device_detach(dev);
2094
2095 /* Disable Tx/Rx */
2096 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2097 update_cr6(db->cr6_data, dev->base_addr);
2098
2099 /* Disable Interrupt */
2100 outl(0, dev->base_addr + DCR7);
2101 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2102
2103 /* Fre RX buffers */
2104 dmfe_free_rxbuffer(db);
2105
Maxim Levitskyf1069042007-03-06 02:41:54 -08002106 /* Enable WOL */
2107 pci_read_config_dword(pci_dev, 0x40, &tmp);
2108 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2109
2110 if (db->wol_mode & WAKE_PHY)
2111 tmp |= DMFE_WOL_LINKCHANGE;
2112 if (db->wol_mode & WAKE_MAGIC)
2113 tmp |= DMFE_WOL_MAGICPACKET;
2114
2115 pci_write_config_dword(pci_dev, 0x40, tmp);
2116
2117 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2118 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2119
Maxim Levitskybc8a8382007-03-06 02:41:53 -08002120 /* Power down device*/
2121 pci_set_power_state(pci_dev, pci_choose_state (pci_dev,state));
2122 pci_save_state(pci_dev);
2123
2124 return 0;
2125}
2126
2127static int dmfe_resume(struct pci_dev *pci_dev)
2128{
2129 struct net_device *dev = pci_get_drvdata(pci_dev);
Maxim Levitskyf1069042007-03-06 02:41:54 -08002130 u32 tmp;
Maxim Levitskybc8a8382007-03-06 02:41:53 -08002131
2132 pci_restore_state(pci_dev);
2133 pci_set_power_state(pci_dev, PCI_D0);
2134
2135 /* Re-initilize DM910X board */
2136 dmfe_init_dm910x(dev);
2137
Maxim Levitskyf1069042007-03-06 02:41:54 -08002138 /* Disable WOL */
2139 pci_read_config_dword(pci_dev, 0x40, &tmp);
2140
2141 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2142 pci_write_config_dword(pci_dev, 0x40, tmp);
2143
2144 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2145 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2146
Maxim Levitskybc8a8382007-03-06 02:41:53 -08002147 /* Restart upper layer interface */
2148 netif_device_attach(dev);
2149
2150 return 0;
2151}
2152#else
2153#define dmfe_suspend NULL
2154#define dmfe_resume NULL
2155#endif
2156
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157static struct pci_driver dmfe_driver = {
2158 .name = "dmfe",
2159 .id_table = dmfe_pci_tbl,
2160 .probe = dmfe_init_one,
2161 .remove = __devexit_p(dmfe_remove_one),
Maxim Levitskybc8a8382007-03-06 02:41:53 -08002162 .suspend = dmfe_suspend,
2163 .resume = dmfe_resume
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164};
2165
2166MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2167MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2168MODULE_LICENSE("GPL");
2169MODULE_VERSION(DRV_VERSION);
2170
2171module_param(debug, int, 0);
2172module_param(mode, byte, 0);
2173module_param(cr6set, int, 0);
2174module_param(chkmode, byte, 0);
2175module_param(HPNA_mode, byte, 0);
2176module_param(HPNA_rx_cmd, byte, 0);
2177module_param(HPNA_tx_cmd, byte, 0);
2178module_param(HPNA_NoiseFloor, byte, 0);
2179module_param(SF_mode, byte, 0);
2180MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
Maxim Levitskyf67ba792007-03-06 02:41:51 -08002181MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2182 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2183
2184MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2185 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
2187/* Description:
2188 * when user used insmod to add module, system invoked init_module()
2189 * to initilize and register.
2190 */
2191
2192static int __init dmfe_init_module(void)
2193{
2194 int rc;
2195
2196 printk(version);
2197 printed_version = 1;
2198
2199 DMFE_DBUG(0, "init_module() ", debug);
2200
2201 if (debug)
2202 dmfe_debug = debug; /* set debug flag */
2203 if (cr6set)
2204 dmfe_cr6_user_set = cr6set;
2205
2206 switch(mode) {
2207 case DMFE_10MHF:
2208 case DMFE_100MHF:
2209 case DMFE_10MFD:
2210 case DMFE_100MFD:
2211 case DMFE_1M_HPNA:
2212 dmfe_media_mode = mode;
2213 break;
2214 default:dmfe_media_mode = DMFE_AUTO;
2215 break;
2216 }
2217
2218 if (HPNA_mode > 4)
2219 HPNA_mode = 0; /* Default: LP/HS */
2220 if (HPNA_rx_cmd > 1)
2221 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2222 if (HPNA_tx_cmd > 1)
2223 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2224 if (HPNA_NoiseFloor > 15)
2225 HPNA_NoiseFloor = 0;
2226
Jeff Garzik29917622006-08-19 17:48:59 -04002227 rc = pci_register_driver(&dmfe_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 if (rc < 0)
2229 return rc;
2230
2231 return 0;
2232}
2233
2234
2235/*
2236 * Description:
2237 * when user used rmmod to delete module, system invoked clean_module()
2238 * to un-register all registered services.
2239 */
2240
2241static void __exit dmfe_cleanup_module(void)
2242{
2243 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2244 pci_unregister_driver(&dmfe_driver);
2245}
2246
2247module_init(dmfe_init_module);
2248module_exit(dmfe_cleanup_module);