blob: ff4f24e5f59c664f5ad20edc78ec53ec6b6db4f1 [file] [log] [blame]
Francois Romieu890e8d02005-07-30 13:08:43 +02001/*
2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver
3
4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
7
8 Based on r8169.c, tg3.c, 8139cp.c, skge.c and probably even epic100.c.
9
10 This software may be used and distributed according to the terms of
11 the GNU General Public License (GPL), incorporated herein by reference.
12 Drivers based on or derived from this code fall under the GPL and must
13 retain the authorship, copyright and license notice. This file is not
14 a complete program and may only be used when the entire operating
15 system is licensed under the GPL.
16
17 See the file COPYING in this distribution for more information.
18
19 */
20
21#include <linux/module.h>
22#include <linux/moduleparam.h>
23#include <linux/netdevice.h>
Francois Romieu43afb942005-07-30 13:10:21 +020024#include <linux/rtnetlink.h>
Francois Romieu890e8d02005-07-30 13:08:43 +020025#include <linux/etherdevice.h>
26#include <linux/ethtool.h>
27#include <linux/pci.h>
28#include <linux/mii.h>
29#include <linux/delay.h>
30#include <linux/crc32.h>
31#include <linux/dma-mapping.h>
32#include <asm/irq.h>
33
34#define net_drv(p, arg...) if (netif_msg_drv(p)) \
35 printk(arg)
36#define net_probe(p, arg...) if (netif_msg_probe(p)) \
37 printk(arg)
38#define net_link(p, arg...) if (netif_msg_link(p)) \
39 printk(arg)
40#define net_intr(p, arg...) if (netif_msg_intr(p)) \
41 printk(arg)
42#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
43 printk(arg)
44
45#ifdef CONFIG_SIS190_NAPI
46#define NAPI_SUFFIX "-NAPI"
47#else
48#define NAPI_SUFFIX ""
49#endif
50
51#define DRV_VERSION "1.2" NAPI_SUFFIX
52#define DRV_NAME "sis190"
53#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
54#define PFX DRV_NAME ": "
55
56#ifdef CONFIG_SIS190_NAPI
57#define sis190_rx_skb netif_receive_skb
58#define sis190_rx_quota(count, quota) min(count, quota)
59#else
60#define sis190_rx_skb netif_rx
61#define sis190_rx_quota(count, quota) count
62#endif
63
64#define MAC_ADDR_LEN 6
65
66#define NUM_TX_DESC 64
67#define NUM_RX_DESC 64
68#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
69#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
70#define RX_BUF_SIZE 1536
71
72#define SIS190_REGS_SIZE 0x80
73#define SIS190_TX_TIMEOUT (6*HZ)
74#define SIS190_PHY_TIMEOUT (10*HZ)
75#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
77 NETIF_MSG_IFDOWN)
78
79/* Enhanced PHY access register bit definitions */
80#define EhnMIIread 0x0000
81#define EhnMIIwrite 0x0020
82#define EhnMIIdataShift 16
83#define EhnMIIpmdShift 6 /* 7016 only */
84#define EhnMIIregShift 11
85#define EhnMIIreq 0x0010
86#define EhnMIInotDone 0x0010
87
88/* Write/read MMIO register */
89#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg))
90#define SIS_W16(reg, val) writew ((val), ioaddr + (reg))
91#define SIS_W32(reg, val) writel ((val), ioaddr + (reg))
92#define SIS_R8(reg) readb (ioaddr + (reg))
93#define SIS_R16(reg) readw (ioaddr + (reg))
94#define SIS_R32(reg) readl (ioaddr + (reg))
95
96#define SIS_PCI_COMMIT() SIS_R32(IntrControl)
97
98enum sis190_registers {
99 TxControl = 0x00,
100 TxDescStartAddr = 0x04,
101 TxNextDescAddr = 0x0c, // unused
102 RxControl = 0x10,
103 RxDescStartAddr = 0x14,
104 RxNextDescAddr = 0x1c, // unused
105 IntrStatus = 0x20,
106 IntrMask = 0x24,
107 IntrControl = 0x28,
108 IntrTimer = 0x2c, // unused
109 PMControl = 0x30, // unused
110 ROMControl = 0x38,
111 ROMInterface = 0x3c,
112 StationControl = 0x40,
113 GMIIControl = 0x44,
114 TxMacControl = 0x50,
115 RxMacControl = 0x60,
116 RxMacAddr = 0x62,
117 RxHashTable = 0x68,
118 // Undocumented = 0x6c,
119 RxWakeOnLan = 0x70,
120 // Undocumented = 0x74,
121 RxMPSControl = 0x78, // unused
122};
123
124enum sis190_register_content {
125 /* IntrStatus */
126 SoftInt = 0x40000000, // unused
127 Timeup = 0x20000000, // unused
128 PauseFrame = 0x00080000, // unused
129 MagicPacket = 0x00040000, // unused
130 WakeupFrame = 0x00020000, // unused
131 LinkChange = 0x00010000,
132 RxQEmpty = 0x00000080,
133 RxQInt = 0x00000040,
134 TxQ1Empty = 0x00000020, // unused
135 TxQ1Int = 0x00000010,
136 TxQ0Empty = 0x00000008, // unused
137 TxQ0Int = 0x00000004,
138 RxHalt = 0x00000002,
139 TxHalt = 0x00000001,
140
141 /* RxStatusDesc */
142 RxRES = 0x00200000, // unused
143 RxCRC = 0x00080000,
144 RxRUNT = 0x00100000, // unused
145 RxRWT = 0x00400000, // unused
146
147 /* {Rx/Tx}CmdBits */
148 CmdReset = 0x10,
149 CmdRxEnb = 0x08, // unused
150 CmdTxEnb = 0x01,
151 RxBufEmpty = 0x01, // unused
152
153 /* Cfg9346Bits */
154 Cfg9346_Lock = 0x00, // unused
155 Cfg9346_Unlock = 0xc0, // unused
156
157 /* RxMacControl */
158 AcceptErr = 0x20, // unused
159 AcceptRunt = 0x10, // unused
160 AcceptBroadcast = 0x0800,
161 AcceptMulticast = 0x0400,
162 AcceptMyPhys = 0x0200,
163 AcceptAllPhys = 0x0100,
164
165 /* RxConfigBits */
166 RxCfgFIFOShift = 13,
167 RxCfgDMAShift = 8, // 0x1a in RxControl ?
168
169 /* TxConfigBits */
170 TxInterFrameGapShift = 24,
171 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
172
173 /* StationControl */
174 _1000bpsF = 0x1c00,
175 _1000bpsH = 0x0c00,
176 _100bpsF = 0x1800,
177 _100bpsH = 0x0800,
178 _10bpsF = 0x1400,
179 _10bpsH = 0x0400,
180
181 LinkStatus = 0x02, // unused
182 FullDup = 0x01, // unused
183
184 /* TBICSRBit */
185 TBILinkOK = 0x02000000, // unused
186};
187
188struct TxDesc {
189 u32 PSize;
190 u32 status;
191 u32 addr;
192 u32 size;
193};
194
195struct RxDesc {
196 u32 PSize;
197 u32 status;
198 u32 addr;
199 u32 size;
200};
201
202enum _DescStatusBit {
203 /* _Desc.status */
204 OWNbit = 0x80000000,
205 INTbit = 0x40000000,
206 DEFbit = 0x00200000,
207 CRCbit = 0x00020000,
208 PADbit = 0x00010000,
209 /* _Desc.size */
210 RingEnd = (1 << 31),
211 /* _Desc.PSize */
212 RxSizeMask = 0x0000ffff
213};
214
215struct sis190_private {
216 void __iomem *mmio_addr;
217 struct pci_dev *pci_dev;
218 struct net_device_stats stats;
219 spinlock_t lock;
220 u32 rx_buf_sz;
221 u32 cur_rx;
222 u32 cur_tx;
223 u32 dirty_rx;
224 u32 dirty_tx;
225 dma_addr_t rx_dma;
226 dma_addr_t tx_dma;
227 struct RxDesc *RxDescRing;
228 struct TxDesc *TxDescRing;
229 struct sk_buff *Rx_skbuff[NUM_RX_DESC];
230 struct sk_buff *Tx_skbuff[NUM_TX_DESC];
231 struct work_struct phy_task;
232 struct timer_list timer;
233 u32 msg_enable;
Francois Romieu43afb942005-07-30 13:10:21 +0200234 struct mii_if_info mii_if;
Francois Romieu890e8d02005-07-30 13:08:43 +0200235};
236
237const static struct {
238 const char *name;
239 u8 version; /* depend on docs */
240 u32 RxConfigMask; /* clear the bits supported by this chip */
241} sis_chip_info[] = {
242 { DRV_NAME, 0x00, 0xff7e1880, },
243};
244
245static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
246 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
247 { 0, },
248};
249
250MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
251
252static int rx_copybreak = 200;
253
254static struct {
255 u32 msg_enable;
256} debug = { -1 };
257
258MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
259module_param(rx_copybreak, int, 0);
260MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
261module_param_named(debug, debug.msg_enable, int, 0);
262MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
263MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
264MODULE_VERSION(DRV_VERSION);
265MODULE_LICENSE("GPL");
266
267static const u32 sis190_intr_mask =
268 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
269
270/*
271 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
272 * The chips use a 64 element hash table based on the Ethernet CRC.
273 */
274static int multicast_filter_limit = 32;
275
276static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
277{
278 unsigned int i;
279
280 SIS_W32(GMIIControl, ctl);
281
282 msleep(1);
283
284 for (i = 0; i < 100; i++) {
285 if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
286 break;
287 msleep(1);
288 }
289
290 if (i > 999)
291 printk(KERN_ERR PFX "PHY command failed !\n");
292}
293
294static void mdio_write(void __iomem *ioaddr, int reg, int val)
295{
296 u32 pmd = 1;
297
298 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
299 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift) |
300 (((u32) val) << EhnMIIdataShift));
301}
302
303static int mdio_read(void __iomem *ioaddr, int reg)
304{
305 u32 pmd = 1;
306
307 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
308 (((u32) reg) << EhnMIIregShift) | (pmd << EhnMIIpmdShift));
309
310 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
311}
312
Francois Romieu43afb942005-07-30 13:10:21 +0200313static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
314{
315 struct sis190_private *tp = netdev_priv(dev);
316
317 mdio_write(tp->mmio_addr, reg, val);
318}
319
320static int __mdio_read(struct net_device *dev, int phy_id, int reg)
321{
322 struct sis190_private *tp = netdev_priv(dev);
323
324 return mdio_read(tp->mmio_addr, reg);
325}
326
Francois Romieu890e8d02005-07-30 13:08:43 +0200327static int sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
328{
329 unsigned int i;
330 u16 data;
331 u32 val;
332
333 if (!(SIS_R32(ROMControl) & 0x0002))
334 return 0;
335
336 val = (0x0080 | (0x2 << 8) | (reg << 10));
337
338 SIS_W32(ROMInterface, val);
339
340 for (i = 0; i < 200; i++) {
341 if (!(SIS_R32(ROMInterface) & 0x0080))
342 break;
343 msleep(1);
344 }
345
346 data = (u16) ((SIS_R32(ROMInterface) & 0xffff0000) >> 16);
347
348 return data;
349}
350
351static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
352{
353 SIS_W32(IntrMask, 0x00);
354 SIS_W32(IntrStatus, 0xffffffff);
355 SIS_PCI_COMMIT();
356}
357
358static void sis190_asic_down(void __iomem *ioaddr)
359{
360 /* Stop the chip's Tx and Rx DMA processes. */
361
362 SIS_W32(TxControl, 0x1a00);
363 SIS_W32(RxControl, 0x1a00);
364
365 sis190_irq_mask_and_ack(ioaddr);
366}
367
368static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
369{
370 desc->size |= cpu_to_le32(RingEnd);
371}
372
373static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
374{
375 u32 eor = le32_to_cpu(desc->size) & RingEnd;
376
377 desc->PSize = 0x0;
378 desc->size = cpu_to_le32(rx_buf_sz | eor);
379 wmb();
380 desc->status = cpu_to_le32(OWNbit | INTbit);
381}
382
383static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
384 u32 rx_buf_sz)
385{
386 desc->addr = cpu_to_le32(mapping);
387 sis190_give_to_asic(desc, rx_buf_sz);
388}
389
390static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
391{
392 desc->PSize = 0x0;
393 desc->addr = 0xdeadbeef;
394 desc->size &= cpu_to_le32(RingEnd);
395 wmb();
396 desc->status = 0x0;
397}
398
399static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
400 struct RxDesc *desc, u32 rx_buf_sz)
401{
402 struct sk_buff *skb;
403 dma_addr_t mapping;
404 int ret = 0;
405
406 skb = dev_alloc_skb(rx_buf_sz);
407 if (!skb)
408 goto err_out;
409
410 *sk_buff = skb;
411
412 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
413 PCI_DMA_FROMDEVICE);
414
415 sis190_map_to_asic(desc, mapping, rx_buf_sz);
416out:
417 return ret;
418
419err_out:
420 ret = -ENOMEM;
421 sis190_make_unusable_by_asic(desc);
422 goto out;
423}
424
425static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
426 u32 start, u32 end)
427{
428 u32 cur;
429
430 for (cur = start; cur < end; cur++) {
431 int ret, i = cur % NUM_RX_DESC;
432
433 if (tp->Rx_skbuff[i])
434 continue;
435
436 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
437 tp->RxDescRing + i, tp->rx_buf_sz);
438 if (ret < 0)
439 break;
440 }
441 return cur - start;
442}
443
444static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
445 struct RxDesc *desc, int rx_buf_sz)
446{
447 int ret = -1;
448
449 if (pkt_size < rx_copybreak) {
450 struct sk_buff *skb;
451
452 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
453 if (skb) {
454 skb_reserve(skb, NET_IP_ALIGN);
455 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
456 *sk_buff = skb;
457 sis190_give_to_asic(desc, rx_buf_sz);
458 ret = 0;
459 }
460 }
461 return ret;
462}
463
464static int sis190_rx_interrupt(struct net_device *dev,
465 struct sis190_private *tp, void __iomem *ioaddr)
466{
467 struct net_device_stats *stats = &tp->stats;
468 u32 rx_left, cur_rx = tp->cur_rx;
469 u32 delta, count;
470
471 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
472 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
473
474 for (; rx_left > 0; rx_left--, cur_rx++) {
475 unsigned int entry = cur_rx % NUM_RX_DESC;
476 struct RxDesc *desc = tp->RxDescRing + entry;
477 u32 status;
478
479 if (desc->status & OWNbit)
480 break;
481
482 status = le32_to_cpu(desc->PSize);
483
484 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
485 // status);
486
487 if (status & RxCRC) {
488 net_intr(tp, KERN_INFO "%s: bad crc. status = %08x.\n",
489 dev->name, status);
490 stats->rx_errors++;
491 stats->rx_crc_errors++;
492 sis190_give_to_asic(desc, tp->rx_buf_sz);
493 } else if (!(status & PADbit)) {
494 net_intr(tp, KERN_INFO "%s: bad pad. status = %08x.\n",
495 dev->name, status);
496 stats->rx_errors++;
497 stats->rx_length_errors++;
498 sis190_give_to_asic(desc, tp->rx_buf_sz);
499 } else {
500 struct sk_buff *skb = tp->Rx_skbuff[entry];
501 int pkt_size = (status & RxSizeMask) - 4;
502 void (*pci_action)(struct pci_dev *, dma_addr_t,
503 size_t, int) = pci_dma_sync_single_for_device;
504
505 if (unlikely(pkt_size > tp->rx_buf_sz)) {
506 net_intr(tp, KERN_INFO
507 "%s: (frag) status = %08x.\n",
508 dev->name, status);
509 stats->rx_dropped++;
510 stats->rx_length_errors++;
511 sis190_give_to_asic(desc, tp->rx_buf_sz);
512 continue;
513 }
514
515 pci_dma_sync_single_for_cpu(tp->pci_dev,
516 le32_to_cpu(desc->addr), tp->rx_buf_sz,
517 PCI_DMA_FROMDEVICE);
518
519 if (sis190_try_rx_copy(&skb, pkt_size, desc,
520 tp->rx_buf_sz)) {
521 pci_action = pci_unmap_single;
522 tp->Rx_skbuff[entry] = NULL;
523 sis190_make_unusable_by_asic(desc);
524 }
525
526 pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
527 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
528
529 skb->dev = dev;
530 skb_put(skb, pkt_size);
531 skb->protocol = eth_type_trans(skb, dev);
532
533 sis190_rx_skb(skb);
534
535 dev->last_rx = jiffies;
536 stats->rx_bytes += pkt_size;
537 stats->rx_packets++;
538 }
539 }
540 count = cur_rx - tp->cur_rx;
541 tp->cur_rx = cur_rx;
542
543 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
544 if (!delta && count && netif_msg_intr(tp))
545 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
546 tp->dirty_rx += delta;
547
548 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
549 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
550
551 return count;
552}
553
554static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
555 struct TxDesc *desc)
556{
557 unsigned int len;
558
559 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
560
561 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
562
563 memset(desc, 0x00, sizeof(*desc));
564}
565
566static void sis190_tx_interrupt(struct net_device *dev,
567 struct sis190_private *tp, void __iomem *ioaddr)
568{
569 u32 pending, dirty_tx = tp->dirty_tx;
570 /*
571 * It would not be needed if queueing was allowed to be enabled
572 * again too early (hint: think preempt and unclocked smp systems).
573 */
574 unsigned int queue_stopped;
575
576 smp_rmb();
577 pending = tp->cur_tx - dirty_tx;
578 queue_stopped = (pending == NUM_TX_DESC);
579
580 for (; pending; pending--, dirty_tx++) {
581 unsigned int entry = dirty_tx % NUM_TX_DESC;
582 struct TxDesc *txd = tp->TxDescRing + entry;
583 struct sk_buff *skb;
584
585 if (le32_to_cpu(txd->status) & OWNbit)
586 break;
587
588 skb = tp->Tx_skbuff[entry];
589
590 tp->stats.tx_packets++;
591 tp->stats.tx_bytes += skb->len;
592
593 sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
594 tp->Tx_skbuff[entry] = NULL;
595 dev_kfree_skb_irq(skb);
596 }
597
598 if (tp->dirty_tx != dirty_tx) {
599 tp->dirty_tx = dirty_tx;
600 smp_wmb();
601 if (queue_stopped)
602 netif_wake_queue(dev);
603 }
604}
605
606/*
607 * The interrupt handler does all of the Rx thread work and cleans up after
608 * the Tx thread.
609 */
610static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
611{
612 struct net_device *dev = __dev;
613 struct sis190_private *tp = netdev_priv(dev);
614 void __iomem *ioaddr = tp->mmio_addr;
615 unsigned int handled = 0;
616 u32 status;
617
618 status = SIS_R32(IntrStatus);
619
620 if ((status == 0xffffffff) || !status)
621 goto out;
622
623 handled = 1;
624
625 if (unlikely(!netif_running(dev))) {
626 sis190_asic_down(ioaddr);
627 goto out;
628 }
629
630 SIS_W32(IntrStatus, status);
631
632 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
633
634 if (status & LinkChange) {
635 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
636 schedule_work(&tp->phy_task);
637 }
638
639 if (status & RxQInt)
640 sis190_rx_interrupt(dev, tp, ioaddr);
641
642 if (status & TxQ0Int)
643 sis190_tx_interrupt(dev, tp, ioaddr);
644out:
645 return IRQ_RETVAL(handled);
646}
647
Francois Romieu4405d3b2005-07-30 13:09:20 +0200648#ifdef CONFIG_NET_POLL_CONTROLLER
649static void sis190_netpoll(struct net_device *dev)
650{
651 struct sis190_private *tp = netdev_priv(dev);
652 struct pci_dev *pdev = tp->pci_dev;
653
654 disable_irq(pdev->irq);
655 sis190_interrupt(pdev->irq, dev, NULL);
656 enable_irq(pdev->irq);
657}
658#endif
659
Francois Romieu890e8d02005-07-30 13:08:43 +0200660static void sis190_free_rx_skb(struct sis190_private *tp,
661 struct sk_buff **sk_buff, struct RxDesc *desc)
662{
663 struct pci_dev *pdev = tp->pci_dev;
664
665 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
666 PCI_DMA_FROMDEVICE);
667 dev_kfree_skb(*sk_buff);
668 *sk_buff = NULL;
669 sis190_make_unusable_by_asic(desc);
670}
671
672static void sis190_rx_clear(struct sis190_private *tp)
673{
674 unsigned int i;
675
676 for (i = 0; i < NUM_RX_DESC; i++) {
677 if (!tp->Rx_skbuff[i])
678 continue;
679 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
680 }
681}
682
683static void sis190_init_ring_indexes(struct sis190_private *tp)
684{
685 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
686}
687
688static int sis190_init_ring(struct net_device *dev)
689{
690 struct sis190_private *tp = netdev_priv(dev);
691
692 sis190_init_ring_indexes(tp);
693
694 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
695 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
696
697 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
698 goto err_rx_clear;
699
700 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
701
702 return 0;
703
704err_rx_clear:
705 sis190_rx_clear(tp);
706 return -ENOMEM;
707}
708
709static void sis190_set_rx_mode(struct net_device *dev)
710{
711 struct sis190_private *tp = netdev_priv(dev);
712 void __iomem *ioaddr = tp->mmio_addr;
713 unsigned long flags;
714 u32 mc_filter[2]; /* Multicast hash filter */
715 u16 rx_mode;
716
717 if (dev->flags & IFF_PROMISC) {
718 /* Unconditionally log net taps. */
719 net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
720 dev->name);
721 rx_mode =
722 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
723 AcceptAllPhys;
724 mc_filter[1] = mc_filter[0] = 0xffffffff;
725 } else if ((dev->mc_count > multicast_filter_limit) ||
726 (dev->flags & IFF_ALLMULTI)) {
727 /* Too many to filter perfectly -- accept all multicasts. */
728 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
729 mc_filter[1] = mc_filter[0] = 0xffffffff;
730 } else {
731 struct dev_mc_list *mclist;
732 unsigned int i;
733
734 rx_mode = AcceptBroadcast | AcceptMyPhys;
735 mc_filter[1] = mc_filter[0] = 0;
736 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
737 i++, mclist = mclist->next) {
738 int bit_nr =
739 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
740 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
741 rx_mode |= AcceptMulticast;
742 }
743 }
744
745 spin_lock_irqsave(&tp->lock, flags);
746
747 SIS_W16(RxMacControl, rx_mode | 0x2);
748 SIS_W32(RxHashTable, mc_filter[0]);
749 SIS_W32(RxHashTable + 4, mc_filter[1]);
750
751 spin_unlock_irqrestore(&tp->lock, flags);
752}
753
754static void sis190_soft_reset(void __iomem *ioaddr)
755{
756 SIS_W32(IntrControl, 0x8000);
757 SIS_PCI_COMMIT();
758 msleep(1);
759 SIS_W32(IntrControl, 0x0);
760 sis190_asic_down(ioaddr);
761 msleep(1);
762}
763
764static void sis190_hw_start(struct net_device *dev)
765{
766 struct sis190_private *tp = netdev_priv(dev);
767 void __iomem *ioaddr = tp->mmio_addr;
768
769 sis190_soft_reset(ioaddr);
770
771 SIS_W32(TxDescStartAddr, tp->tx_dma);
772 SIS_W32(RxDescStartAddr, tp->rx_dma);
773
774 SIS_W32(IntrStatus, 0xffffffff);
775 SIS_W32(IntrMask, 0x0);
776 /*
777 * Default is 100Mbps.
778 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
779 */
780 SIS_W16(StationControl, 0x1901);
781 SIS_W32(GMIIControl, 0x0);
782 SIS_W32(TxMacControl, 0x60);
783 SIS_W16(RxMacControl, 0x02);
784 SIS_W32(RxHashTable, 0x0);
785 SIS_W32(0x6c, 0x0);
786 SIS_W32(RxWakeOnLan, 0x0);
787 SIS_W32(0x74, 0x0);
788
789 SIS_PCI_COMMIT();
790
791 sis190_set_rx_mode(dev);
792
793 /* Enable all known interrupts by setting the interrupt mask. */
794 SIS_W32(IntrMask, sis190_intr_mask);
795
796 SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
797 SIS_W32(RxControl, 0x1a1d);
798
799 netif_start_queue(dev);
800}
801
802static void sis190_phy_task(void * data)
803{
804 struct net_device *dev = data;
805 struct sis190_private *tp = netdev_priv(dev);
806 void __iomem *ioaddr = tp->mmio_addr;
807 u16 val;
808
Francois Romieu43afb942005-07-30 13:10:21 +0200809 rtnl_lock();
810
Francois Romieu890e8d02005-07-30 13:08:43 +0200811 val = mdio_read(ioaddr, MII_BMCR);
812 if (val & BMCR_RESET) {
813 // FIXME: needlessly high ? -- FR 02/07/2005
814 mod_timer(&tp->timer, jiffies + HZ/10);
815 } else if (!(mdio_read(ioaddr, MII_BMSR) & BMSR_ANEGCOMPLETE)) {
816 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
817 dev->name);
818 mdio_write(ioaddr, MII_BMCR, val | BMCR_RESET);
819 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
820 } else {
821 /* Rejoice ! */
822 struct {
823 int val;
824 const char *msg;
825 u16 ctl;
826 } reg31[] = {
827 { LPA_1000XFULL | LPA_SLCT,
828 "1000 Mbps Full Duplex",
829 0x01 | _1000bpsF },
830 { LPA_1000XHALF | LPA_SLCT,
831 "1000 Mbps Half Duplex",
832 0x01 | _1000bpsH },
833 { LPA_100FULL,
834 "100 Mbps Full Duplex",
835 0x01 | _100bpsF },
836 { LPA_100HALF,
837 "100 Mbps Half Duplex",
838 0x01 | _100bpsH },
839 { LPA_10FULL,
840 "10 Mbps Full Duplex",
841 0x01 | _10bpsF },
842 { LPA_10HALF,
843 "10 Mbps Half Duplex",
844 0x01 | _10bpsH },
845 { 0, "unknown", 0x0000 }
846 }, *p;
847
848 val = mdio_read(ioaddr, 0x1f);
849 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
850
851 val = mdio_read(ioaddr, MII_LPA);
852 net_link(tp, KERN_INFO "%s: mii lpa = %04x.\n", dev->name, val);
853
854 for (p = reg31; p->ctl; p++) {
855 if ((val & p->val) == p->val)
856 break;
857 }
858 if (p->ctl)
859 SIS_W16(StationControl, p->ctl);
860 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
861 p->msg);
862 netif_carrier_on(dev);
863 }
Francois Romieu43afb942005-07-30 13:10:21 +0200864
865 rtnl_unlock();
Francois Romieu890e8d02005-07-30 13:08:43 +0200866}
867
868static void sis190_phy_timer(unsigned long __opaque)
869{
870 struct net_device *dev = (struct net_device *)__opaque;
871 struct sis190_private *tp = netdev_priv(dev);
872
873 if (likely(netif_running(dev)))
874 schedule_work(&tp->phy_task);
875}
876
877static inline void sis190_delete_timer(struct net_device *dev)
878{
879 struct sis190_private *tp = netdev_priv(dev);
880
881 del_timer_sync(&tp->timer);
882}
883
884static inline void sis190_request_timer(struct net_device *dev)
885{
886 struct sis190_private *tp = netdev_priv(dev);
887 struct timer_list *timer = &tp->timer;
888
889 init_timer(timer);
890 timer->expires = jiffies + SIS190_PHY_TIMEOUT;
891 timer->data = (unsigned long)dev;
892 timer->function = sis190_phy_timer;
893 add_timer(timer);
894}
895
896static void sis190_set_rxbufsize(struct sis190_private *tp,
897 struct net_device *dev)
898{
899 unsigned int mtu = dev->mtu;
900
901 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
902}
903
904static int sis190_open(struct net_device *dev)
905{
906 struct sis190_private *tp = netdev_priv(dev);
907 struct pci_dev *pdev = tp->pci_dev;
908 int rc = -ENOMEM;
909
910 sis190_set_rxbufsize(tp, dev);
911
912 /*
913 * Rx and Tx descriptors need 256 bytes alignment.
914 * pci_alloc_consistent() guarantees a stronger alignment.
915 */
916 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
917 if (!tp->TxDescRing)
918 goto out;
919
920 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
921 if (!tp->RxDescRing)
922 goto err_free_tx_0;
923
924 rc = sis190_init_ring(dev);
925 if (rc < 0)
926 goto err_free_rx_1;
927
928 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
929
930 sis190_request_timer(dev);
931
932 rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
933 if (rc < 0)
934 goto err_release_timer_2;
935
936 sis190_hw_start(dev);
937out:
938 return rc;
939
940err_release_timer_2:
941 sis190_delete_timer(dev);
942 sis190_rx_clear(tp);
943err_free_rx_1:
944 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
945 tp->rx_dma);
946err_free_tx_0:
947 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
948 tp->tx_dma);
949 goto out;
950}
951
952static void sis190_tx_clear(struct sis190_private *tp)
953{
954 unsigned int i;
955
956 for (i = 0; i < NUM_TX_DESC; i++) {
957 struct sk_buff *skb = tp->Tx_skbuff[i];
958
959 if (!skb)
960 continue;
961
962 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
963 tp->Tx_skbuff[i] = NULL;
964 dev_kfree_skb(skb);
965
966 tp->stats.tx_dropped++;
967 }
968 tp->cur_tx = tp->dirty_tx = 0;
969}
970
971static void sis190_down(struct net_device *dev)
972{
973 struct sis190_private *tp = netdev_priv(dev);
974 void __iomem *ioaddr = tp->mmio_addr;
975 unsigned int poll_locked = 0;
976
977 sis190_delete_timer(dev);
978
979 netif_stop_queue(dev);
980
981 flush_scheduled_work();
982
983 do {
984 spin_lock_irq(&tp->lock);
985
986 sis190_asic_down(ioaddr);
987
988 spin_unlock_irq(&tp->lock);
989
990 synchronize_irq(dev->irq);
991
992 if (!poll_locked) {
993 netif_poll_disable(dev);
994 poll_locked++;
995 }
996
997 synchronize_sched();
998
999 } while (SIS_R32(IntrMask));
1000
1001 sis190_tx_clear(tp);
1002 sis190_rx_clear(tp);
1003}
1004
1005static int sis190_close(struct net_device *dev)
1006{
1007 struct sis190_private *tp = netdev_priv(dev);
1008 struct pci_dev *pdev = tp->pci_dev;
1009
1010 sis190_down(dev);
1011
1012 free_irq(dev->irq, dev);
1013
1014 netif_poll_enable(dev);
1015
1016 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1017 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1018
1019 tp->TxDescRing = NULL;
1020 tp->RxDescRing = NULL;
1021
1022 return 0;
1023}
1024
1025static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
1026{
1027 struct sis190_private *tp = netdev_priv(dev);
1028 void __iomem *ioaddr = tp->mmio_addr;
1029 u32 len, entry, dirty_tx;
1030 struct TxDesc *desc;
1031 dma_addr_t mapping;
1032
1033 if (unlikely(skb->len < ETH_ZLEN)) {
1034 skb = skb_padto(skb, ETH_ZLEN);
1035 if (!skb) {
1036 tp->stats.tx_dropped++;
1037 goto out;
1038 }
1039 len = ETH_ZLEN;
1040 } else {
1041 len = skb->len;
1042 }
1043
1044 entry = tp->cur_tx % NUM_TX_DESC;
1045 desc = tp->TxDescRing + entry;
1046
1047 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
1048 netif_stop_queue(dev);
1049 net_tx_err(tp, KERN_ERR PFX
1050 "%s: BUG! Tx Ring full when queue awake!\n",
1051 dev->name);
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
1056
1057 tp->Tx_skbuff[entry] = skb;
1058
1059 desc->PSize = cpu_to_le32(len);
1060 desc->addr = cpu_to_le32(mapping);
1061
1062 desc->size = cpu_to_le32(len);
1063 if (entry == (NUM_TX_DESC - 1))
1064 desc->size |= cpu_to_le32(RingEnd);
1065
1066 wmb();
1067
1068 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
1069
1070 tp->cur_tx++;
1071
1072 smp_wmb();
1073
1074 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
1075
1076 dev->trans_start = jiffies;
1077
1078 dirty_tx = tp->dirty_tx;
1079 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
1080 netif_stop_queue(dev);
1081 smp_rmb();
1082 if (dirty_tx != tp->dirty_tx)
1083 netif_wake_queue(dev);
1084 }
1085out:
1086 return NETDEV_TX_OK;
1087}
1088
1089static struct net_device_stats *sis190_get_stats(struct net_device *dev)
1090{
1091 struct sis190_private *tp = netdev_priv(dev);
1092
1093 return &tp->stats;
1094}
1095
1096static void sis190_release_board(struct pci_dev *pdev)
1097{
1098 struct net_device *dev = pci_get_drvdata(pdev);
1099 struct sis190_private *tp = netdev_priv(dev);
1100
1101 iounmap(tp->mmio_addr);
1102 pci_release_regions(pdev);
1103 pci_disable_device(pdev);
1104 free_netdev(dev);
1105}
1106
1107static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
1108{
1109 struct sis190_private *tp;
1110 struct net_device *dev;
1111 void __iomem *ioaddr;
1112 int rc;
1113
1114 dev = alloc_etherdev(sizeof(*tp));
1115 if (!dev) {
1116 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
1117 rc = -ENOMEM;
1118 goto err_out_0;
1119 }
1120
1121 SET_MODULE_OWNER(dev);
1122 SET_NETDEV_DEV(dev, &pdev->dev);
1123
1124 tp = netdev_priv(dev);
1125 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
1126
1127 rc = pci_enable_device(pdev);
1128 if (rc < 0) {
1129 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
1130 goto err_free_dev_1;
1131 }
1132
1133 rc = -ENODEV;
1134
1135 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1136 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
1137 pci_name(pdev));
1138 goto err_pci_disable_2;
1139 }
1140 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
1141 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
1142 pci_name(pdev));
1143 goto err_pci_disable_2;
1144 }
1145
1146 rc = pci_request_regions(pdev, DRV_NAME);
1147 if (rc < 0) {
1148 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
1149 pci_name(pdev));
1150 goto err_pci_disable_2;
1151 }
1152
1153 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1154 if (rc < 0) {
1155 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
1156 pci_name(pdev));
1157 goto err_free_res_3;
1158 }
1159
1160 pci_set_master(pdev);
1161
1162 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
1163 if (!ioaddr) {
1164 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
1165 pci_name(pdev));
1166 rc = -EIO;
1167 goto err_free_res_3;
1168 }
1169
1170 tp->pci_dev = pdev;
1171 tp->mmio_addr = ioaddr;
1172
Francois Romieu43afb942005-07-30 13:10:21 +02001173 tp->mii_if.dev = dev;
1174 tp->mii_if.mdio_read = __mdio_read;
1175 tp->mii_if.mdio_write = __mdio_write;
1176 // tp->mii_if.phy_id = XXX;
1177 tp->mii_if.phy_id_mask = 0x1f;
1178 tp->mii_if.reg_num_mask = 0x1f;
1179
Francois Romieu890e8d02005-07-30 13:08:43 +02001180 sis190_irq_mask_and_ack(ioaddr);
1181
1182 sis190_soft_reset(ioaddr);
1183out:
1184 return dev;
1185
1186err_free_res_3:
1187 pci_release_regions(pdev);
1188err_pci_disable_2:
1189 pci_disable_device(pdev);
1190err_free_dev_1:
1191 free_netdev(dev);
1192err_out_0:
1193 dev = ERR_PTR(rc);
1194 goto out;
1195}
1196
1197static void sis190_tx_timeout(struct net_device *dev)
1198{
1199 struct sis190_private *tp = netdev_priv(dev);
1200 void __iomem *ioaddr = tp->mmio_addr;
1201 u8 tmp8;
1202
1203 /* Disable Tx, if not already */
1204 tmp8 = SIS_R8(TxControl);
1205 if (tmp8 & CmdTxEnb)
1206 SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
1207
1208 /* Disable interrupts by clearing the interrupt mask. */
1209 SIS_W32(IntrMask, 0x0000);
1210
1211 /* Stop a shared interrupt from scavenging while we are. */
1212 spin_lock_irq(&tp->lock);
1213 sis190_tx_clear(tp);
1214 spin_unlock_irq(&tp->lock);
1215
1216 /* ...and finally, reset everything. */
1217 sis190_hw_start(dev);
1218
1219 netif_wake_queue(dev);
1220}
1221
1222static void sis190_set_speed_auto(struct net_device *dev)
1223{
1224 struct sis190_private *tp = netdev_priv(dev);
1225 void __iomem *ioaddr = tp->mmio_addr;
1226 int val;
1227
1228 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
1229
1230 val = mdio_read(ioaddr, MII_ADVERTISE);
1231
1232 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
1233 // unchanged.
1234 mdio_write(ioaddr, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
1235 ADVERTISE_100FULL | ADVERTISE_10FULL |
1236 ADVERTISE_100HALF | ADVERTISE_10HALF);
1237
1238 // Enable 1000 Full Mode.
1239 mdio_write(ioaddr, MII_CTRL1000, ADVERTISE_1000FULL);
1240
1241 // Enable auto-negotiation and restart auto-negotiation.
1242 mdio_write(ioaddr, MII_BMCR,
1243 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
1244}
1245
Francois Romieu43afb942005-07-30 13:10:21 +02001246static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1247{
1248 struct sis190_private *tp = netdev_priv(dev);
1249
1250 return mii_ethtool_gset(&tp->mii_if, cmd);
1251}
1252
1253static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1254{
1255 struct sis190_private *tp = netdev_priv(dev);
1256
1257 return mii_ethtool_sset(&tp->mii_if, cmd);
1258}
1259
Francois Romieu890e8d02005-07-30 13:08:43 +02001260static void sis190_get_drvinfo(struct net_device *dev,
1261 struct ethtool_drvinfo *info)
1262{
1263 struct sis190_private *tp = netdev_priv(dev);
1264
1265 strcpy(info->driver, DRV_NAME);
1266 strcpy(info->version, DRV_VERSION);
1267 strcpy(info->bus_info, pci_name(tp->pci_dev));
1268}
1269
1270static int sis190_get_regs_len(struct net_device *dev)
1271{
1272 return SIS190_REGS_SIZE;
1273}
1274
1275static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1276 void *p)
1277{
1278 struct sis190_private *tp = netdev_priv(dev);
1279 unsigned long flags;
1280
1281 if (regs->len > SIS190_REGS_SIZE)
1282 regs->len = SIS190_REGS_SIZE;
1283
1284 spin_lock_irqsave(&tp->lock, flags);
1285 memcpy_fromio(p, tp->mmio_addr, regs->len);
1286 spin_unlock_irqrestore(&tp->lock, flags);
1287}
1288
Francois Romieu43afb942005-07-30 13:10:21 +02001289static int sis190_nway_reset(struct net_device *dev)
1290{
1291 struct sis190_private *tp = netdev_priv(dev);
1292
1293 return mii_nway_restart(&tp->mii_if);
1294}
1295
Francois Romieu890e8d02005-07-30 13:08:43 +02001296static u32 sis190_get_msglevel(struct net_device *dev)
1297{
1298 struct sis190_private *tp = netdev_priv(dev);
1299
1300 return tp->msg_enable;
1301}
1302
1303static void sis190_set_msglevel(struct net_device *dev, u32 value)
1304{
1305 struct sis190_private *tp = netdev_priv(dev);
1306
1307 tp->msg_enable = value;
1308}
1309
1310static struct ethtool_ops sis190_ethtool_ops = {
Francois Romieu43afb942005-07-30 13:10:21 +02001311 .get_settings = sis190_get_settings,
1312 .set_settings = sis190_set_settings,
Francois Romieu890e8d02005-07-30 13:08:43 +02001313 .get_drvinfo = sis190_get_drvinfo,
1314 .get_regs_len = sis190_get_regs_len,
1315 .get_regs = sis190_get_regs,
1316 .get_link = ethtool_op_get_link,
1317 .get_msglevel = sis190_get_msglevel,
1318 .set_msglevel = sis190_set_msglevel,
Francois Romieu43afb942005-07-30 13:10:21 +02001319 .nway_reset = sis190_nway_reset,
Francois Romieu890e8d02005-07-30 13:08:43 +02001320};
1321
Francois Romieu43afb942005-07-30 13:10:21 +02001322static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1323{
1324 struct sis190_private *tp = netdev_priv(dev);
1325
1326 return !netif_running(dev) ? -EINVAL :
1327 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
1328}
1329
Francois Romieu890e8d02005-07-30 13:08:43 +02001330static int __devinit sis190_init_one(struct pci_dev *pdev,
1331 const struct pci_device_id *ent)
1332{
1333 static int printed_version = 0;
1334 struct sis190_private *tp;
1335 struct net_device *dev;
1336 void __iomem *ioaddr;
1337 int i, rc;
1338
1339 if (!printed_version) {
1340 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
1341 printed_version = 1;
1342 }
1343
1344 dev = sis190_init_board(pdev);
1345 if (IS_ERR(dev)) {
1346 rc = PTR_ERR(dev);
1347 goto out;
1348 }
1349
1350 tp = netdev_priv(dev);
1351 ioaddr = tp->mmio_addr;
1352
1353 /* Get MAC address */
1354 /* Read node address from the EEPROM */
1355
1356 if (SIS_R32(ROMControl) & 0x4) {
1357 for (i = 0; i < 3; i++) {
1358 SIS_W16(RxMacAddr + 2*i,
1359 sis190_read_eeprom(ioaddr, 3 + i));
1360 }
1361 }
1362
1363 for (i = 0; i < MAC_ADDR_LEN; i++)
1364 dev->dev_addr[i] = SIS_R8(RxMacAddr + i);
1365
1366 INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
1367
1368 dev->open = sis190_open;
1369 dev->stop = sis190_close;
Francois Romieu43afb942005-07-30 13:10:21 +02001370 dev->do_ioctl = sis190_ioctl;
Francois Romieu890e8d02005-07-30 13:08:43 +02001371 dev->get_stats = sis190_get_stats;
1372 dev->tx_timeout = sis190_tx_timeout;
1373 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1374 dev->hard_start_xmit = sis190_start_xmit;
Francois Romieu4405d3b2005-07-30 13:09:20 +02001375#ifdef CONFIG_NET_POLL_CONTROLLER
1376 dev->poll_controller = sis190_netpoll;
1377#endif
Francois Romieu890e8d02005-07-30 13:08:43 +02001378 dev->set_multicast_list = sis190_set_rx_mode;
1379 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1380 dev->irq = pdev->irq;
1381 dev->base_addr = (unsigned long) 0xdead;
1382
1383 spin_lock_init(&tp->lock);
1384 rc = register_netdev(dev);
1385 if (rc < 0) {
1386 sis190_release_board(pdev);
1387 goto out;
1388 }
1389
1390 pci_set_drvdata(pdev, dev);
1391
1392 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
1393 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
1394 pci_name(pdev), sis_chip_info[ent->driver_data].name,
1395 ioaddr, dev->irq,
1396 dev->dev_addr[0], dev->dev_addr[1],
1397 dev->dev_addr[2], dev->dev_addr[3],
1398 dev->dev_addr[4], dev->dev_addr[5]);
1399
1400 netif_carrier_off(dev);
1401
1402 sis190_set_speed_auto(dev);
1403out:
1404 return rc;
1405}
1406
1407static void __devexit sis190_remove_one(struct pci_dev *pdev)
1408{
1409 struct net_device *dev = pci_get_drvdata(pdev);
1410
1411 unregister_netdev(dev);
1412 sis190_release_board(pdev);
1413 pci_set_drvdata(pdev, NULL);
1414}
1415
1416static struct pci_driver sis190_pci_driver = {
1417 .name = DRV_NAME,
1418 .id_table = sis190_pci_tbl,
1419 .probe = sis190_init_one,
1420 .remove = __devexit_p(sis190_remove_one),
1421};
1422
1423static int __init sis190_init_module(void)
1424{
1425 return pci_module_init(&sis190_pci_driver);
1426}
1427
1428static void __exit sis190_cleanup_module(void)
1429{
1430 pci_unregister_driver(&sis190_pci_driver);
1431}
1432
1433module_init(sis190_init_module);
1434module_exit(sis190_cleanup_module);