blob: c82b9cd1c9246c1b1660a059757fd0764912ee5b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001,2002,2003 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Author: MontaVista Software, Inc.
13 * ppopov@mvista.com or source@mvista.com
14 *
15 * ########################################################################
16 *
17 * This program is free software; you can distribute it and/or modify it
18 * under the terms of the GNU General Public License (Version 2) as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 * for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
29 *
30 * ########################################################################
31 *
32 *
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <linux/string.h>
39#include <linux/timer.h>
40#include <linux/errno.h>
41#include <linux/in.h>
42#include <linux/ioport.h>
43#include <linux/bitops.h>
44#include <linux/slab.h>
45#include <linux/interrupt.h>
46#include <linux/pci.h>
47#include <linux/init.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/ethtool.h>
51#include <linux/mii.h>
52#include <linux/skbuff.h>
53#include <linux/delay.h>
54#include <asm/mipsregs.h>
55#include <asm/irq.h>
56#include <asm/io.h>
57#include <asm/processor.h>
58
59#include <asm/mach-au1x00/au1000.h>
60#include <asm/cpu.h>
61#include "au1000_eth.h"
62
63#ifdef AU1000_ETH_DEBUG
64static int au1000_debug = 5;
65#else
66static int au1000_debug = 3;
67#endif
68
69#define DRV_NAME "au1000eth"
70#define DRV_VERSION "1.5"
71#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
72#define DRV_DESC "Au1xxx on-chip Ethernet driver"
73
74MODULE_AUTHOR(DRV_AUTHOR);
75MODULE_DESCRIPTION(DRV_DESC);
76MODULE_LICENSE("GPL");
77
78// prototypes
79static void hard_stop(struct net_device *);
80static void enable_rx_tx(struct net_device *dev);
81static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
82static int au1000_init(struct net_device *);
83static int au1000_open(struct net_device *);
84static int au1000_close(struct net_device *);
85static int au1000_tx(struct sk_buff *, struct net_device *);
86static int au1000_rx(struct net_device *);
87static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
88static void au1000_tx_timeout(struct net_device *);
89static int au1000_set_config(struct net_device *dev, struct ifmap *map);
90static void set_rx_mode(struct net_device *);
91static struct net_device_stats *au1000_get_stats(struct net_device *);
92static inline void update_tx_stats(struct net_device *, u32, u32);
93static inline void update_rx_stats(struct net_device *, u32);
94static void au1000_timer(unsigned long);
95static int au1000_ioctl(struct net_device *, struct ifreq *, int);
96static int mdio_read(struct net_device *, int, int);
97static void mdio_write(struct net_device *, int, int, u16);
98static void dump_mii(struct net_device *dev, int phy_id);
99
100// externs
101extern void ack_rise_edge_irq(unsigned int);
102extern int get_ethernet_addr(char *ethernet_addr);
103extern void str2eaddr(unsigned char *ea, unsigned char *str);
104extern char * __init prom_getcmdline(void);
105
106/*
107 * Theory of operation
108 *
109 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
110 * There are four receive and four transmit descriptors. These
111 * descriptors are not in memory; rather, they are just a set of
112 * hardware registers.
113 *
114 * Since the Au1000 has a coherent data cache, the receive and
115 * transmit buffers are allocated from the KSEG0 segment. The
116 * hardware registers, however, are still mapped at KSEG1 to
117 * make sure there's no out-of-order writes, and that all writes
118 * complete immediately.
119 */
120
121/* These addresses are only used if yamon doesn't tell us what
122 * the mac address is, and the mac address is not passed on the
123 * command line.
124 */
125static unsigned char au1000_mac_addr[6] __devinitdata = {
126 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
127};
128
129#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
130#define RUN_AT(x) (jiffies + (x))
131
132// For reading/writing 32-bit words from/to DMA memory
133#define cpu_to_dma32 cpu_to_be32
134#define dma32_to_cpu be32_to_cpu
135
136struct au1000_private *au_macs[NUM_ETH_INTERFACES];
137
138/* FIXME
139 * All of the PHY code really should be detached from the MAC
140 * code.
141 */
142
143/* Default advertise */
144#define GENMII_DEFAULT_ADVERTISE \
145 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
146 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
147 ADVERTISED_Autoneg
148
149#define GENMII_DEFAULT_FEATURES \
150 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
151 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
152 SUPPORTED_Autoneg
153
154static char *phy_link[] =
155{ "unknown",
156 "10Base2", "10BaseT",
157 "AUI",
158 "100BaseT", "100BaseTX", "100BaseFX"
159};
160
161int bcm_5201_init(struct net_device *dev, int phy_addr)
162{
163 s16 data;
164
165 /* Stop auto-negotiation */
166 data = mdio_read(dev, phy_addr, MII_CONTROL);
167 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
168
169 /* Set advertisement to 10/100 and Half/Full duplex
170 * (full capabilities) */
171 data = mdio_read(dev, phy_addr, MII_ANADV);
172 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
173 mdio_write(dev, phy_addr, MII_ANADV, data);
174
175 /* Restart auto-negotiation */
176 data = mdio_read(dev, phy_addr, MII_CONTROL);
177 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
178 mdio_write(dev, phy_addr, MII_CONTROL, data);
179
180 if (au1000_debug > 4)
181 dump_mii(dev, phy_addr);
182 return 0;
183}
184
185int bcm_5201_reset(struct net_device *dev, int phy_addr)
186{
187 s16 mii_control, timeout;
188
189 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
190 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
191 mdelay(1);
192 for (timeout = 100; timeout > 0; --timeout) {
193 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
194 if ((mii_control & MII_CNTL_RESET) == 0)
195 break;
196 mdelay(1);
197 }
198 if (mii_control & MII_CNTL_RESET) {
199 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
200 return -1;
201 }
202 return 0;
203}
204
205int
206bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
207{
208 u16 mii_data;
209 struct au1000_private *aup;
210
211 if (!dev) {
212 printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
213 return -1;
214 }
215 aup = (struct au1000_private *) dev->priv;
216
217 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
218 if (mii_data & MII_STAT_LINK) {
219 *link = 1;
220 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
221 if (mii_data & MII_AUX_100) {
222 if (mii_data & MII_AUX_FDX) {
223 *speed = IF_PORT_100BASEFX;
224 dev->if_port = IF_PORT_100BASEFX;
225 }
226 else {
227 *speed = IF_PORT_100BASETX;
228 dev->if_port = IF_PORT_100BASETX;
229 }
230 }
231 else {
232 *speed = IF_PORT_10BASET;
233 dev->if_port = IF_PORT_10BASET;
234 }
235
236 }
237 else {
238 *link = 0;
239 *speed = 0;
240 dev->if_port = IF_PORT_UNKNOWN;
241 }
242 return 0;
243}
244
245int lsi_80227_init(struct net_device *dev, int phy_addr)
246{
247 if (au1000_debug > 4)
248 printk("lsi_80227_init\n");
249
250 /* restart auto-negotiation */
251 mdio_write(dev, phy_addr, MII_CONTROL,
252 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
253 mdelay(1);
254
255 /* set up LEDs to correct display */
256#ifdef CONFIG_MIPS_MTX1
257 mdio_write(dev, phy_addr, 17, 0xff80);
258#else
259 mdio_write(dev, phy_addr, 17, 0xffc0);
260#endif
261
262 if (au1000_debug > 4)
263 dump_mii(dev, phy_addr);
264 return 0;
265}
266
267int lsi_80227_reset(struct net_device *dev, int phy_addr)
268{
269 s16 mii_control, timeout;
270
271 if (au1000_debug > 4) {
272 printk("lsi_80227_reset\n");
273 dump_mii(dev, phy_addr);
274 }
275
276 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
277 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
278 mdelay(1);
279 for (timeout = 100; timeout > 0; --timeout) {
280 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
281 if ((mii_control & MII_CNTL_RESET) == 0)
282 break;
283 mdelay(1);
284 }
285 if (mii_control & MII_CNTL_RESET) {
286 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
287 return -1;
288 }
289 return 0;
290}
291
292int
293lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
294{
295 u16 mii_data;
296 struct au1000_private *aup;
297
298 if (!dev) {
299 printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
300 return -1;
301 }
302 aup = (struct au1000_private *) dev->priv;
303
304 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
305 if (mii_data & MII_STAT_LINK) {
306 *link = 1;
307 mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
308 if (mii_data & MII_LSI_PHY_STAT_SPD) {
309 if (mii_data & MII_LSI_PHY_STAT_FDX) {
310 *speed = IF_PORT_100BASEFX;
311 dev->if_port = IF_PORT_100BASEFX;
312 }
313 else {
314 *speed = IF_PORT_100BASETX;
315 dev->if_port = IF_PORT_100BASETX;
316 }
317 }
318 else {
319 *speed = IF_PORT_10BASET;
320 dev->if_port = IF_PORT_10BASET;
321 }
322
323 }
324 else {
325 *link = 0;
326 *speed = 0;
327 dev->if_port = IF_PORT_UNKNOWN;
328 }
329 return 0;
330}
331
332int am79c901_init(struct net_device *dev, int phy_addr)
333{
334 printk("am79c901_init\n");
335 return 0;
336}
337
338int am79c901_reset(struct net_device *dev, int phy_addr)
339{
340 printk("am79c901_reset\n");
341 return 0;
342}
343
344int
345am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
346{
347 return 0;
348}
349
350int am79c874_init(struct net_device *dev, int phy_addr)
351{
352 s16 data;
353
354 /* 79c874 has quit resembled bit assignments to BCM5201 */
355 if (au1000_debug > 4)
356 printk("am79c847_init\n");
357
358 /* Stop auto-negotiation */
359 data = mdio_read(dev, phy_addr, MII_CONTROL);
360 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
361
362 /* Set advertisement to 10/100 and Half/Full duplex
363 * (full capabilities) */
364 data = mdio_read(dev, phy_addr, MII_ANADV);
365 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
366 mdio_write(dev, phy_addr, MII_ANADV, data);
367
368 /* Restart auto-negotiation */
369 data = mdio_read(dev, phy_addr, MII_CONTROL);
370 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
371
372 mdio_write(dev, phy_addr, MII_CONTROL, data);
373
374 if (au1000_debug > 4) dump_mii(dev, phy_addr);
375 return 0;
376}
377
378int am79c874_reset(struct net_device *dev, int phy_addr)
379{
380 s16 mii_control, timeout;
381
382 if (au1000_debug > 4)
383 printk("am79c874_reset\n");
384
385 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
386 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
387 mdelay(1);
388 for (timeout = 100; timeout > 0; --timeout) {
389 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
390 if ((mii_control & MII_CNTL_RESET) == 0)
391 break;
392 mdelay(1);
393 }
394 if (mii_control & MII_CNTL_RESET) {
395 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
396 return -1;
397 }
398 return 0;
399}
400
401int
402am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
403{
404 u16 mii_data;
405 struct au1000_private *aup;
406
407 // printk("am79c874_status\n");
408 if (!dev) {
409 printk(KERN_ERR "am79c874_status error: NULL dev\n");
410 return -1;
411 }
412
413 aup = (struct au1000_private *) dev->priv;
414 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
415
416 if (mii_data & MII_STAT_LINK) {
417 *link = 1;
418 mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
419 if (mii_data & MII_AMD_PHY_STAT_SPD) {
420 if (mii_data & MII_AMD_PHY_STAT_FDX) {
421 *speed = IF_PORT_100BASEFX;
422 dev->if_port = IF_PORT_100BASEFX;
423 }
424 else {
425 *speed = IF_PORT_100BASETX;
426 dev->if_port = IF_PORT_100BASETX;
427 }
428 }
429 else {
430 *speed = IF_PORT_10BASET;
431 dev->if_port = IF_PORT_10BASET;
432 }
433
434 }
435 else {
436 *link = 0;
437 *speed = 0;
438 dev->if_port = IF_PORT_UNKNOWN;
439 }
440 return 0;
441}
442
443int lxt971a_init(struct net_device *dev, int phy_addr)
444{
445 if (au1000_debug > 4)
446 printk("lxt971a_init\n");
447
448 /* restart auto-negotiation */
449 mdio_write(dev, phy_addr, MII_CONTROL,
450 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
451
452 /* set up LEDs to correct display */
453 mdio_write(dev, phy_addr, 20, 0x0422);
454
455 if (au1000_debug > 4)
456 dump_mii(dev, phy_addr);
457 return 0;
458}
459
460int lxt971a_reset(struct net_device *dev, int phy_addr)
461{
462 s16 mii_control, timeout;
463
464 if (au1000_debug > 4) {
465 printk("lxt971a_reset\n");
466 dump_mii(dev, phy_addr);
467 }
468
469 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
470 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
471 mdelay(1);
472 for (timeout = 100; timeout > 0; --timeout) {
473 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
474 if ((mii_control & MII_CNTL_RESET) == 0)
475 break;
476 mdelay(1);
477 }
478 if (mii_control & MII_CNTL_RESET) {
479 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
480 return -1;
481 }
482 return 0;
483}
484
485int
486lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
487{
488 u16 mii_data;
489 struct au1000_private *aup;
490
491 if (!dev) {
492 printk(KERN_ERR "lxt971a_status error: NULL dev\n");
493 return -1;
494 }
495 aup = (struct au1000_private *) dev->priv;
496
497 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
498 if (mii_data & MII_STAT_LINK) {
499 *link = 1;
500 mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
501 if (mii_data & MII_INTEL_PHY_STAT_SPD) {
502 if (mii_data & MII_INTEL_PHY_STAT_FDX) {
503 *speed = IF_PORT_100BASEFX;
504 dev->if_port = IF_PORT_100BASEFX;
505 }
506 else {
507 *speed = IF_PORT_100BASETX;
508 dev->if_port = IF_PORT_100BASETX;
509 }
510 }
511 else {
512 *speed = IF_PORT_10BASET;
513 dev->if_port = IF_PORT_10BASET;
514 }
515
516 }
517 else {
518 *link = 0;
519 *speed = 0;
520 dev->if_port = IF_PORT_UNKNOWN;
521 }
522 return 0;
523}
524
525int ks8995m_init(struct net_device *dev, int phy_addr)
526{
527 s16 data;
528
529// printk("ks8995m_init\n");
530 /* Stop auto-negotiation */
531 data = mdio_read(dev, phy_addr, MII_CONTROL);
532 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
533
534 /* Set advertisement to 10/100 and Half/Full duplex
535 * (full capabilities) */
536 data = mdio_read(dev, phy_addr, MII_ANADV);
537 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
538 mdio_write(dev, phy_addr, MII_ANADV, data);
539
540 /* Restart auto-negotiation */
541 data = mdio_read(dev, phy_addr, MII_CONTROL);
542 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
543 mdio_write(dev, phy_addr, MII_CONTROL, data);
544
545 if (au1000_debug > 4) dump_mii(dev, phy_addr);
546
547 return 0;
548}
549
550int ks8995m_reset(struct net_device *dev, int phy_addr)
551{
552 s16 mii_control, timeout;
553
554// printk("ks8995m_reset\n");
555 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
556 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
557 mdelay(1);
558 for (timeout = 100; timeout > 0; --timeout) {
559 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
560 if ((mii_control & MII_CNTL_RESET) == 0)
561 break;
562 mdelay(1);
563 }
564 if (mii_control & MII_CNTL_RESET) {
565 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
566 return -1;
567 }
568 return 0;
569}
570
571int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
572{
573 u16 mii_data;
574 struct au1000_private *aup;
575
576 if (!dev) {
577 printk(KERN_ERR "ks8995m_status error: NULL dev\n");
578 return -1;
579 }
580 aup = (struct au1000_private *) dev->priv;
581
582 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
583 if (mii_data & MII_STAT_LINK) {
584 *link = 1;
585 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
586 if (mii_data & MII_AUX_100) {
587 if (mii_data & MII_AUX_FDX) {
588 *speed = IF_PORT_100BASEFX;
589 dev->if_port = IF_PORT_100BASEFX;
590 }
591 else {
592 *speed = IF_PORT_100BASETX;
593 dev->if_port = IF_PORT_100BASETX;
594 }
595 }
596 else {
597 *speed = IF_PORT_10BASET;
598 dev->if_port = IF_PORT_10BASET;
599 }
600
601 }
602 else {
603 *link = 0;
604 *speed = 0;
605 dev->if_port = IF_PORT_UNKNOWN;
606 }
607 return 0;
608}
609
610int
611smsc_83C185_init (struct net_device *dev, int phy_addr)
612{
613 s16 data;
614
615 if (au1000_debug > 4)
616 printk("smsc_83C185_init\n");
617
618 /* Stop auto-negotiation */
619 data = mdio_read(dev, phy_addr, MII_CONTROL);
620 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
621
622 /* Set advertisement to 10/100 and Half/Full duplex
623 * (full capabilities) */
624 data = mdio_read(dev, phy_addr, MII_ANADV);
625 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
626 mdio_write(dev, phy_addr, MII_ANADV, data);
627
628 /* Restart auto-negotiation */
629 data = mdio_read(dev, phy_addr, MII_CONTROL);
630 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
631
632 mdio_write(dev, phy_addr, MII_CONTROL, data);
633
634 if (au1000_debug > 4) dump_mii(dev, phy_addr);
635 return 0;
636}
637
638int
639smsc_83C185_reset (struct net_device *dev, int phy_addr)
640{
641 s16 mii_control, timeout;
642
643 if (au1000_debug > 4)
644 printk("smsc_83C185_reset\n");
645
646 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
647 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
648 mdelay(1);
649 for (timeout = 100; timeout > 0; --timeout) {
650 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
651 if ((mii_control & MII_CNTL_RESET) == 0)
652 break;
653 mdelay(1);
654 }
655 if (mii_control & MII_CNTL_RESET) {
656 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
657 return -1;
658 }
659 return 0;
660}
661
662int
663smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
664{
665 u16 mii_data;
666 struct au1000_private *aup;
667
668 if (!dev) {
669 printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
670 return -1;
671 }
672
673 aup = (struct au1000_private *) dev->priv;
674 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
675
676 if (mii_data & MII_STAT_LINK) {
677 *link = 1;
678 mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
679 if (mii_data & (1<<3)) {
680 if (mii_data & (1<<4)) {
681 *speed = IF_PORT_100BASEFX;
682 dev->if_port = IF_PORT_100BASEFX;
683 }
684 else {
685 *speed = IF_PORT_100BASETX;
686 dev->if_port = IF_PORT_100BASETX;
687 }
688 }
689 else {
690 *speed = IF_PORT_10BASET;
691 dev->if_port = IF_PORT_10BASET;
692 }
693 }
694 else {
695 *link = 0;
696 *speed = 0;
697 dev->if_port = IF_PORT_UNKNOWN;
698 }
699 return 0;
700}
701
702
703#ifdef CONFIG_MIPS_BOSPORUS
704int stub_init(struct net_device *dev, int phy_addr)
705{
706 //printk("PHY stub_init\n");
707 return 0;
708}
709
710int stub_reset(struct net_device *dev, int phy_addr)
711{
712 //printk("PHY stub_reset\n");
713 return 0;
714}
715
716int
717stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
718{
719 //printk("PHY stub_status\n");
720 *link = 1;
721 /* hmmm, revisit */
722 *speed = IF_PORT_100BASEFX;
723 dev->if_port = IF_PORT_100BASEFX;
724 return 0;
725}
726#endif
727
728struct phy_ops bcm_5201_ops = {
729 bcm_5201_init,
730 bcm_5201_reset,
731 bcm_5201_status,
732};
733
734struct phy_ops am79c874_ops = {
735 am79c874_init,
736 am79c874_reset,
737 am79c874_status,
738};
739
740struct phy_ops am79c901_ops = {
741 am79c901_init,
742 am79c901_reset,
743 am79c901_status,
744};
745
746struct phy_ops lsi_80227_ops = {
747 lsi_80227_init,
748 lsi_80227_reset,
749 lsi_80227_status,
750};
751
752struct phy_ops lxt971a_ops = {
753 lxt971a_init,
754 lxt971a_reset,
755 lxt971a_status,
756};
757
758struct phy_ops ks8995m_ops = {
759 ks8995m_init,
760 ks8995m_reset,
761 ks8995m_status,
762};
763
764struct phy_ops smsc_83C185_ops = {
765 smsc_83C185_init,
766 smsc_83C185_reset,
767 smsc_83C185_status,
768};
769
770#ifdef CONFIG_MIPS_BOSPORUS
771struct phy_ops stub_ops = {
772 stub_init,
773 stub_reset,
774 stub_status,
775};
776#endif
777
778static struct mii_chip_info {
779 const char * name;
780 u16 phy_id0;
781 u16 phy_id1;
782 struct phy_ops *phy_ops;
783 int dual_phy;
784} mii_chip_table[] = {
785 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
786 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
787 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
788 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
789 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
790 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
791 {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
792 {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
793 {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
794#ifdef CONFIG_MIPS_BOSPORUS
795 {"Stub", 0x1234, 0x5678, &stub_ops },
796#endif
797 {0,},
798};
799
800static int mdio_read(struct net_device *dev, int phy_id, int reg)
801{
802 struct au1000_private *aup = (struct au1000_private *) dev->priv;
803 volatile u32 *mii_control_reg;
804 volatile u32 *mii_data_reg;
805 u32 timedout = 20;
806 u32 mii_control;
807
808 #ifdef CONFIG_BCM5222_DUAL_PHY
809 /* First time we probe, it's for the mac0 phy.
810 * Since we haven't determined yet that we have a dual phy,
811 * aup->mii->mii_control_reg won't be setup and we'll
812 * default to the else statement.
813 * By the time we probe for the mac1 phy, the mii_control_reg
814 * will be setup to be the address of the mac0 phy control since
815 * both phys are controlled through mac0.
816 */
817 if (aup->mii && aup->mii->mii_control_reg) {
818 mii_control_reg = aup->mii->mii_control_reg;
819 mii_data_reg = aup->mii->mii_data_reg;
820 }
821 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
822 /* assume both phys are controlled through mac0 */
823 mii_control_reg = au_macs[0]->mii->mii_control_reg;
824 mii_data_reg = au_macs[0]->mii->mii_data_reg;
825 }
826 else
827 #endif
828 {
829 /* default control and data reg addresses */
830 mii_control_reg = &aup->mac->mii_control;
831 mii_data_reg = &aup->mac->mii_data;
832 }
833
834 while (*mii_control_reg & MAC_MII_BUSY) {
835 mdelay(1);
836 if (--timedout == 0) {
837 printk(KERN_ERR "%s: read_MII busy timeout!!\n",
838 dev->name);
839 return -1;
840 }
841 }
842
843 mii_control = MAC_SET_MII_SELECT_REG(reg) |
844 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
845
846 *mii_control_reg = mii_control;
847
848 timedout = 20;
849 while (*mii_control_reg & MAC_MII_BUSY) {
850 mdelay(1);
851 if (--timedout == 0) {
852 printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
853 dev->name);
854 return -1;
855 }
856 }
857 return (int)*mii_data_reg;
858}
859
860static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
861{
862 struct au1000_private *aup = (struct au1000_private *) dev->priv;
863 volatile u32 *mii_control_reg;
864 volatile u32 *mii_data_reg;
865 u32 timedout = 20;
866 u32 mii_control;
867
868 #ifdef CONFIG_BCM5222_DUAL_PHY
869 if (aup->mii && aup->mii->mii_control_reg) {
870 mii_control_reg = aup->mii->mii_control_reg;
871 mii_data_reg = aup->mii->mii_data_reg;
872 }
873 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
874 /* assume both phys are controlled through mac0 */
875 mii_control_reg = au_macs[0]->mii->mii_control_reg;
876 mii_data_reg = au_macs[0]->mii->mii_data_reg;
877 }
878 else
879 #endif
880 {
881 /* default control and data reg addresses */
882 mii_control_reg = &aup->mac->mii_control;
883 mii_data_reg = &aup->mac->mii_data;
884 }
885
886 while (*mii_control_reg & MAC_MII_BUSY) {
887 mdelay(1);
888 if (--timedout == 0) {
889 printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
890 dev->name);
891 return;
892 }
893 }
894
895 mii_control = MAC_SET_MII_SELECT_REG(reg) |
896 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
897
898 *mii_data_reg = value;
899 *mii_control_reg = mii_control;
900}
901
902
903static void dump_mii(struct net_device *dev, int phy_id)
904{
905 int i, val;
906
907 for (i = 0; i < 7; i++) {
908 if ((val = mdio_read(dev, phy_id, i)) >= 0)
909 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
910 }
911 for (i = 16; i < 25; i++) {
912 if ((val = mdio_read(dev, phy_id, i)) >= 0)
913 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
914 }
915}
916
917static int mii_probe (struct net_device * dev)
918{
919 struct au1000_private *aup = (struct au1000_private *) dev->priv;
920 int phy_addr;
921#ifdef CONFIG_MIPS_BOSPORUS
922 int phy_found=0;
923#endif
924
925 /* search for total of 32 possible mii phy addresses */
926 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
927 u16 mii_status;
928 u16 phy_id0, phy_id1;
929 int i;
930
931 #ifdef CONFIG_BCM5222_DUAL_PHY
932 /* Mask the already found phy, try next one */
933 if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
934 if (au_macs[0]->phy_addr == phy_addr)
935 continue;
936 }
937 #endif
938
939 mii_status = mdio_read(dev, phy_addr, MII_STATUS);
940 if (mii_status == 0xffff || mii_status == 0x0000)
941 /* the mii is not accessable, try next one */
942 continue;
943
944 phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
945 phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
946
947 /* search our mii table for the current mii */
948 for (i = 0; mii_chip_table[i].phy_id1; i++) {
949 if (phy_id0 == mii_chip_table[i].phy_id0 &&
950 phy_id1 == mii_chip_table[i].phy_id1) {
951 struct mii_phy * mii_phy = aup->mii;
952
953 printk(KERN_INFO "%s: %s at phy address %d\n",
954 dev->name, mii_chip_table[i].name,
955 phy_addr);
956#ifdef CONFIG_MIPS_BOSPORUS
957 phy_found = 1;
958#endif
959 mii_phy->chip_info = mii_chip_table+i;
960 aup->phy_addr = phy_addr;
961 aup->want_autoneg = 1;
962 aup->phy_ops = mii_chip_table[i].phy_ops;
963 aup->phy_ops->phy_init(dev,phy_addr);
964
965 // Check for dual-phy and then store required
966 // values and set indicators. We need to do
967 // this now since mdio_{read,write} need the
968 // control and data register addresses.
969 #ifdef CONFIG_BCM5222_DUAL_PHY
970 if ( mii_chip_table[i].dual_phy) {
971
972 /* assume both phys are controlled
973 * through MAC0. Board specific? */
974
975 /* sanity check */
976 if (!au_macs[0] || !au_macs[0]->mii)
977 return -1;
978 aup->mii->mii_control_reg = (u32 *)
979 &au_macs[0]->mac->mii_control;
980 aup->mii->mii_data_reg = (u32 *)
981 &au_macs[0]->mac->mii_data;
982 }
983 #endif
984 goto found;
985 }
986 }
987 }
988found:
989
990#ifdef CONFIG_MIPS_BOSPORUS
991 /* This is a workaround for the Micrel/Kendin 5 port switch
992 The second MAC doesn't see a PHY connected... so we need to
993 trick it into thinking we have one.
994
995 If this kernel is run on another Au1500 development board
996 the stub will be found as well as the actual PHY. However,
997 the last found PHY will be used... usually at Addr 31 (Db1500).
998 */
999 if ( (!phy_found) )
1000 {
1001 u16 phy_id0, phy_id1;
1002 int i;
1003
1004 phy_id0 = 0x1234;
1005 phy_id1 = 0x5678;
1006
1007 /* search our mii table for the current mii */
1008 for (i = 0; mii_chip_table[i].phy_id1; i++) {
1009 if (phy_id0 == mii_chip_table[i].phy_id0 &&
1010 phy_id1 == mii_chip_table[i].phy_id1) {
1011 struct mii_phy * mii_phy;
1012
1013 printk(KERN_INFO "%s: %s at phy address %d\n",
1014 dev->name, mii_chip_table[i].name,
1015 phy_addr);
1016 mii_phy = kmalloc(sizeof(struct mii_phy),
1017 GFP_KERNEL);
1018 if (mii_phy) {
1019 mii_phy->chip_info = mii_chip_table+i;
1020 aup->phy_addr = phy_addr;
1021 mii_phy->next = aup->mii;
1022 aup->phy_ops =
1023 mii_chip_table[i].phy_ops;
1024 aup->mii = mii_phy;
1025 aup->phy_ops->phy_init(dev,phy_addr);
1026 } else {
1027 printk(KERN_ERR "%s: out of memory\n",
1028 dev->name);
1029 return -1;
1030 }
1031 mii_phy->chip_info = mii_chip_table+i;
1032 aup->phy_addr = phy_addr;
1033 aup->phy_ops = mii_chip_table[i].phy_ops;
1034 aup->phy_ops->phy_init(dev,phy_addr);
1035 break;
1036 }
1037 }
1038 }
1039 if (aup->mac_id == 0) {
1040 /* the Bosporus phy responds to addresses 0-5 but
1041 * 5 is the correct one.
1042 */
1043 aup->phy_addr = 5;
1044 }
1045#endif
1046
1047 if (aup->mii->chip_info == NULL) {
1048 printk(KERN_ERR "%s: Au1x No MII transceivers found!\n",
1049 dev->name);
1050 return -1;
1051 }
1052
1053 printk(KERN_INFO "%s: Using %s as default\n",
1054 dev->name, aup->mii->chip_info->name);
1055
1056 return 0;
1057}
1058
1059
1060/*
1061 * Buffer allocation/deallocation routines. The buffer descriptor returned
1062 * has the virtual and dma address of a buffer suitable for
1063 * both, receive and transmit operations.
1064 */
1065static db_dest_t *GetFreeDB(struct au1000_private *aup)
1066{
1067 db_dest_t *pDB;
1068 pDB = aup->pDBfree;
1069
1070 if (pDB) {
1071 aup->pDBfree = pDB->pnext;
1072 }
1073 return pDB;
1074}
1075
1076void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
1077{
1078 db_dest_t *pDBfree = aup->pDBfree;
1079 if (pDBfree)
1080 pDBfree->pnext = pDB;
1081 aup->pDBfree = pDB;
1082}
1083
1084static void enable_rx_tx(struct net_device *dev)
1085{
1086 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1087
1088 if (au1000_debug > 4)
1089 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1090
1091 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1092 au_sync_delay(10);
1093}
1094
1095static void hard_stop(struct net_device *dev)
1096{
1097 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1098
1099 if (au1000_debug > 4)
1100 printk(KERN_INFO "%s: hard stop\n", dev->name);
1101
1102 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1103 au_sync_delay(10);
1104}
1105
1106
1107static void reset_mac(struct net_device *dev)
1108{
1109 int i;
1110 u32 flags;
1111 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1112
1113 if (au1000_debug > 4)
1114 printk(KERN_INFO "%s: reset mac, aup %x\n",
1115 dev->name, (unsigned)aup);
1116
1117 spin_lock_irqsave(&aup->lock, flags);
1118 if (aup->timer.function == &au1000_timer) {/* check if timer initted */
1119 del_timer(&aup->timer);
1120 }
1121
1122 hard_stop(dev);
1123 #ifdef CONFIG_BCM5222_DUAL_PHY
1124 if (aup->mac_id != 0) {
1125 #endif
1126 /* If BCM5222, we can't leave MAC0 in reset because then
1127 * we can't access the dual phy for ETH1 */
1128 *aup->enable = MAC_EN_CLOCK_ENABLE;
1129 au_sync_delay(2);
1130 *aup->enable = 0;
1131 au_sync_delay(2);
1132 #ifdef CONFIG_BCM5222_DUAL_PHY
1133 }
1134 #endif
1135 aup->tx_full = 0;
1136 for (i = 0; i < NUM_RX_DMA; i++) {
1137 /* reset control bits */
1138 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1139 }
1140 for (i = 0; i < NUM_TX_DMA; i++) {
1141 /* reset control bits */
1142 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1143 }
1144 spin_unlock_irqrestore(&aup->lock, flags);
1145}
1146
1147
1148/*
1149 * Setup the receive and transmit "rings". These pointers are the addresses
1150 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1151 * these are not descriptors sitting in memory.
1152 */
1153static void
1154setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1155{
1156 int i;
1157
1158 for (i = 0; i < NUM_RX_DMA; i++) {
1159 aup->rx_dma_ring[i] =
1160 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1161 }
1162 for (i = 0; i < NUM_TX_DMA; i++) {
1163 aup->tx_dma_ring[i] =
1164 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1165 }
1166}
1167
1168static struct {
1169 int port;
1170 u32 base_addr;
1171 u32 macen_addr;
1172 int irq;
1173 struct net_device *dev;
1174} iflist[2];
1175
1176static int num_ifs;
1177
1178/*
1179 * Setup the base address and interupt of the Au1xxx ethernet macs
1180 * based on cpu type and whether the interface is enabled in sys_pinfunc
1181 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1182 */
1183static int __init au1000_init_module(void)
1184{
1185 struct cpuinfo_mips *c = &current_cpu_data;
1186 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1187 struct net_device *dev;
1188 int i, found_one = 0;
1189
1190 switch (c->cputype) {
1191#ifdef CONFIG_SOC_AU1000
1192 case CPU_AU1000:
1193 num_ifs = 2 - ni;
1194 iflist[0].base_addr = AU1000_ETH0_BASE;
1195 iflist[1].base_addr = AU1000_ETH1_BASE;
1196 iflist[0].macen_addr = AU1000_MAC0_ENABLE;
1197 iflist[1].macen_addr = AU1000_MAC1_ENABLE;
1198 iflist[0].irq = AU1000_MAC0_DMA_INT;
1199 iflist[1].irq = AU1000_MAC1_DMA_INT;
1200 break;
1201#endif
1202#ifdef CONFIG_SOC_AU1100
1203 case CPU_AU1100:
1204 num_ifs = 1 - ni;
1205 iflist[0].base_addr = AU1100_ETH0_BASE;
1206 iflist[0].macen_addr = AU1100_MAC0_ENABLE;
1207 iflist[0].irq = AU1100_MAC0_DMA_INT;
1208 break;
1209#endif
1210#ifdef CONFIG_SOC_AU1500
1211 case CPU_AU1500:
1212 num_ifs = 2 - ni;
1213 iflist[0].base_addr = AU1500_ETH0_BASE;
1214 iflist[1].base_addr = AU1500_ETH1_BASE;
1215 iflist[0].macen_addr = AU1500_MAC0_ENABLE;
1216 iflist[1].macen_addr = AU1500_MAC1_ENABLE;
1217 iflist[0].irq = AU1500_MAC0_DMA_INT;
1218 iflist[1].irq = AU1500_MAC1_DMA_INT;
1219 break;
1220#endif
1221#ifdef CONFIG_SOC_AU1550
1222 case CPU_AU1550:
1223 num_ifs = 2 - ni;
1224 iflist[0].base_addr = AU1550_ETH0_BASE;
1225 iflist[1].base_addr = AU1550_ETH1_BASE;
1226 iflist[0].macen_addr = AU1550_MAC0_ENABLE;
1227 iflist[1].macen_addr = AU1550_MAC1_ENABLE;
1228 iflist[0].irq = AU1550_MAC0_DMA_INT;
1229 iflist[1].irq = AU1550_MAC1_DMA_INT;
1230 break;
1231#endif
1232 default:
1233 num_ifs = 0;
1234 }
1235 for(i = 0; i < num_ifs; i++) {
1236 dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
1237 iflist[i].dev = dev;
1238 if (dev)
1239 found_one++;
1240 }
1241 if (!found_one)
1242 return -ENODEV;
1243 return 0;
1244}
1245
1246static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
1247{
1248 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1249 u16 ctl, adv;
1250
1251 /* Setup standard advertise */
1252 adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
1253 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1254 if (advertise & ADVERTISED_10baseT_Half)
1255 adv |= ADVERTISE_10HALF;
1256 if (advertise & ADVERTISED_10baseT_Full)
1257 adv |= ADVERTISE_10FULL;
1258 if (advertise & ADVERTISED_100baseT_Half)
1259 adv |= ADVERTISE_100HALF;
1260 if (advertise & ADVERTISED_100baseT_Full)
1261 adv |= ADVERTISE_100FULL;
1262 mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
1263
1264 /* Start/Restart aneg */
1265 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1266 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1267 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1268
1269 return 0;
1270}
1271
1272static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
1273{
1274 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1275 u16 ctl;
1276
1277 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1278 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
1279
1280 /* First reset the PHY */
1281 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
1282
1283 /* Select speed & duplex */
1284 switch (speed) {
1285 case SPEED_10:
1286 break;
1287 case SPEED_100:
1288 ctl |= BMCR_SPEED100;
1289 break;
1290 case SPEED_1000:
1291 default:
1292 return -EINVAL;
1293 }
1294 if (fd == DUPLEX_FULL)
1295 ctl |= BMCR_FULLDPLX;
1296 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1297
1298 return 0;
1299}
1300
1301
1302static void
1303au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
1304{
1305 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1306 u32 advertise;
1307 int autoneg;
1308 int forced_speed;
1309 int forced_duplex;
1310
1311 /* Default advertise */
1312 advertise = GENMII_DEFAULT_ADVERTISE;
1313 autoneg = aup->want_autoneg;
1314 forced_speed = SPEED_100;
1315 forced_duplex = DUPLEX_FULL;
1316
1317 /* Setup link parameters */
1318 if (cmd) {
1319 if (cmd->autoneg == AUTONEG_ENABLE) {
1320 advertise = cmd->advertising;
1321 autoneg = 1;
1322 } else {
1323 autoneg = 0;
1324
1325 forced_speed = cmd->speed;
1326 forced_duplex = cmd->duplex;
1327 }
1328 }
1329
1330 /* Configure PHY & start aneg */
1331 aup->want_autoneg = autoneg;
1332 if (autoneg)
1333 au1000_setup_aneg(dev, advertise);
1334 else
1335 au1000_setup_forced(dev, forced_speed, forced_duplex);
1336 mod_timer(&aup->timer, jiffies + HZ);
1337}
1338
1339static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1340{
1341 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1342 u16 link, speed;
1343
1344 cmd->supported = GENMII_DEFAULT_FEATURES;
1345 cmd->advertising = GENMII_DEFAULT_ADVERTISE;
1346 cmd->port = PORT_MII;
1347 cmd->transceiver = XCVR_EXTERNAL;
1348 cmd->phy_address = aup->phy_addr;
1349 spin_lock_irq(&aup->lock);
1350 cmd->autoneg = aup->want_autoneg;
1351 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1352 if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
1353 cmd->speed = SPEED_100;
1354 else if (speed == IF_PORT_10BASET)
1355 cmd->speed = SPEED_10;
1356 if (link && (dev->if_port == IF_PORT_100BASEFX))
1357 cmd->duplex = DUPLEX_FULL;
1358 else
1359 cmd->duplex = DUPLEX_HALF;
1360 spin_unlock_irq(&aup->lock);
1361 return 0;
1362}
1363
1364static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1365{
1366 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1367 unsigned long features = GENMII_DEFAULT_FEATURES;
1368
1369 if (!capable(CAP_NET_ADMIN))
1370 return -EPERM;
1371
1372 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1373 return -EINVAL;
1374 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1375 return -EINVAL;
1376 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1377 return -EINVAL;
1378 if (cmd->autoneg == AUTONEG_DISABLE)
1379 switch (cmd->speed) {
1380 case SPEED_10:
1381 if (cmd->duplex == DUPLEX_HALF &&
1382 (features & SUPPORTED_10baseT_Half) == 0)
1383 return -EINVAL;
1384 if (cmd->duplex == DUPLEX_FULL &&
1385 (features & SUPPORTED_10baseT_Full) == 0)
1386 return -EINVAL;
1387 break;
1388 case SPEED_100:
1389 if (cmd->duplex == DUPLEX_HALF &&
1390 (features & SUPPORTED_100baseT_Half) == 0)
1391 return -EINVAL;
1392 if (cmd->duplex == DUPLEX_FULL &&
1393 (features & SUPPORTED_100baseT_Full) == 0)
1394 return -EINVAL;
1395 break;
1396 default:
1397 return -EINVAL;
1398 }
1399 else if ((features & SUPPORTED_Autoneg) == 0)
1400 return -EINVAL;
1401
1402 spin_lock_irq(&aup->lock);
1403 au1000_start_link(dev, cmd);
1404 spin_unlock_irq(&aup->lock);
1405 return 0;
1406}
1407
1408static int au1000_nway_reset(struct net_device *dev)
1409{
1410 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1411
1412 if (!aup->want_autoneg)
1413 return -EINVAL;
1414 spin_lock_irq(&aup->lock);
1415 au1000_start_link(dev, NULL);
1416 spin_unlock_irq(&aup->lock);
1417 return 0;
1418}
1419
1420static void
1421au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1422{
1423 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1424
1425 strcpy(info->driver, DRV_NAME);
1426 strcpy(info->version, DRV_VERSION);
1427 info->fw_version[0] = '\0';
1428 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
1429 info->regdump_len = 0;
1430}
1431
1432static u32 au1000_get_link(struct net_device *dev)
1433{
1434 return netif_carrier_ok(dev);
1435}
1436
1437static struct ethtool_ops au1000_ethtool_ops = {
1438 .get_settings = au1000_get_settings,
1439 .set_settings = au1000_set_settings,
1440 .get_drvinfo = au1000_get_drvinfo,
1441 .nway_reset = au1000_nway_reset,
1442 .get_link = au1000_get_link
1443};
1444
1445static struct net_device *
1446au1000_probe(u32 ioaddr, int irq, int port_num)
1447{
1448 static unsigned version_printed = 0;
1449 struct au1000_private *aup = NULL;
1450 struct net_device *dev = NULL;
1451 db_dest_t *pDB, *pDBfree;
1452 char *pmac, *argptr;
1453 char ethaddr[6];
1454 int i, err;
1455
1456 if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
1457 return NULL;
1458
1459 if (version_printed++ == 0)
1460 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1461
1462 dev = alloc_etherdev(sizeof(struct au1000_private));
1463 if (!dev) {
1464 printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");
1465 return NULL;
1466 }
1467
1468 if ((err = register_netdev(dev))) {
1469 printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
1470 err);
1471 free_netdev(dev);
1472 return NULL;
1473 }
1474
1475 printk("%s: Au1x Ethernet found at 0x%x, irq %d\n",
1476 dev->name, ioaddr, irq);
1477
1478 aup = dev->priv;
1479
1480 /* Allocate the data buffers */
1481 /* Snooping works fine with eth on all au1xxx */
1482 aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
1483 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1484 &aup->dma_addr,
1485 0);
1486 if (!aup->vaddr) {
1487 free_netdev(dev);
1488 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1489 return NULL;
1490 }
1491
1492 /* aup->mac is the base address of the MAC's registers */
1493 aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
1494 /* Setup some variables for quick register address access */
1495 if (ioaddr == iflist[0].base_addr)
1496 {
1497 /* check env variables first */
1498 if (!get_ethernet_addr(ethaddr)) {
1499 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1500 } else {
1501 /* Check command line */
1502 argptr = prom_getcmdline();
1503 if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
1504 printk(KERN_INFO "%s: No mac address found\n",
1505 dev->name);
1506 /* use the hard coded mac addresses */
1507 } else {
1508 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1509 memcpy(au1000_mac_addr, ethaddr,
1510 sizeof(au1000_mac_addr));
1511 }
1512 }
1513 aup->enable = (volatile u32 *)
1514 ((unsigned long)iflist[0].macen_addr);
1515 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1516 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1517 aup->mac_id = 0;
1518 au_macs[0] = aup;
1519 }
1520 else
1521 if (ioaddr == iflist[1].base_addr)
1522 {
1523 aup->enable = (volatile u32 *)
1524 ((unsigned long)iflist[1].macen_addr);
1525 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1526 dev->dev_addr[4] += 0x10;
1527 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1528 aup->mac_id = 1;
1529 au_macs[1] = aup;
1530 }
1531 else
1532 {
1533 printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
1534 }
1535
1536 /* bring the device out of reset, otherwise probing the mii
1537 * will hang */
1538 *aup->enable = MAC_EN_CLOCK_ENABLE;
1539 au_sync_delay(2);
1540 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1541 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1542 au_sync_delay(2);
1543
1544 aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1545 if (!aup->mii) {
1546 printk(KERN_ERR "%s: out of memory\n", dev->name);
1547 goto err_out;
1548 }
1549 aup->mii->mii_control_reg = 0;
1550 aup->mii->mii_data_reg = 0;
1551
1552 if (mii_probe(dev) != 0) {
1553 goto err_out;
1554 }
1555
1556 pDBfree = NULL;
1557 /* setup the data buffer descriptors and attach a buffer to each one */
1558 pDB = aup->db;
1559 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1560 pDB->pnext = pDBfree;
1561 pDBfree = pDB;
1562 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1563 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1564 pDB++;
1565 }
1566 aup->pDBfree = pDBfree;
1567
1568 for (i = 0; i < NUM_RX_DMA; i++) {
1569 pDB = GetFreeDB(aup);
1570 if (!pDB) {
1571 goto err_out;
1572 }
1573 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1574 aup->rx_db_inuse[i] = pDB;
1575 }
1576 for (i = 0; i < NUM_TX_DMA; i++) {
1577 pDB = GetFreeDB(aup);
1578 if (!pDB) {
1579 goto err_out;
1580 }
1581 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1582 aup->tx_dma_ring[i]->len = 0;
1583 aup->tx_db_inuse[i] = pDB;
1584 }
1585
1586 spin_lock_init(&aup->lock);
1587 dev->base_addr = ioaddr;
1588 dev->irq = irq;
1589 dev->open = au1000_open;
1590 dev->hard_start_xmit = au1000_tx;
1591 dev->stop = au1000_close;
1592 dev->get_stats = au1000_get_stats;
1593 dev->set_multicast_list = &set_rx_mode;
1594 dev->do_ioctl = &au1000_ioctl;
1595 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1596 dev->set_config = &au1000_set_config;
1597 dev->tx_timeout = au1000_tx_timeout;
1598 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1599
1600 /*
1601 * The boot code uses the ethernet controller, so reset it to start
1602 * fresh. au1000_init() expects that the device is in reset state.
1603 */
1604 reset_mac(dev);
1605
1606 return dev;
1607
1608err_out:
1609 /* here we should have a valid dev plus aup-> register addresses
1610 * so we can reset the mac properly.*/
1611 reset_mac(dev);
1612 if (aup->mii)
1613 kfree(aup->mii);
1614 for (i = 0; i < NUM_RX_DMA; i++) {
1615 if (aup->rx_db_inuse[i])
1616 ReleaseDB(aup, aup->rx_db_inuse[i]);
1617 }
1618 for (i = 0; i < NUM_TX_DMA; i++) {
1619 if (aup->tx_db_inuse[i])
1620 ReleaseDB(aup, aup->tx_db_inuse[i]);
1621 }
1622 dma_free_noncoherent(NULL,
1623 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1624 (void *)aup->vaddr,
1625 aup->dma_addr);
1626 unregister_netdev(dev);
1627 free_netdev(dev);
1628 release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
1629 return NULL;
1630}
1631
1632/*
1633 * Initialize the interface.
1634 *
1635 * When the device powers up, the clocks are disabled and the
1636 * mac is in reset state. When the interface is closed, we
1637 * do the same -- reset the device and disable the clocks to
1638 * conserve power. Thus, whenever au1000_init() is called,
1639 * the device should already be in reset state.
1640 */
1641static int au1000_init(struct net_device *dev)
1642{
1643 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1644 u32 flags;
1645 int i;
1646 u32 control;
1647 u16 link, speed;
1648
1649 if (au1000_debug > 4)
1650 printk("%s: au1000_init\n", dev->name);
1651
1652 spin_lock_irqsave(&aup->lock, flags);
1653
1654 /* bring the device out of reset */
1655 *aup->enable = MAC_EN_CLOCK_ENABLE;
1656 au_sync_delay(2);
1657 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1658 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1659 au_sync_delay(20);
1660
1661 aup->mac->control = 0;
1662 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1663 aup->tx_tail = aup->tx_head;
1664 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1665
1666 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1667 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1668 dev->dev_addr[1]<<8 | dev->dev_addr[0];
1669
1670 for (i = 0; i < NUM_RX_DMA; i++) {
1671 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1672 }
1673 au_sync();
1674
1675 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1676 control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1677#ifndef CONFIG_CPU_LITTLE_ENDIAN
1678 control |= MAC_BIG_ENDIAN;
1679#endif
1680 if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1681 control |= MAC_FULL_DUPLEX;
1682 }
1683
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 aup->mac->control = control;
1685 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1686 au_sync();
1687
1688 spin_unlock_irqrestore(&aup->lock, flags);
1689 return 0;
1690}
1691
1692static void au1000_timer(unsigned long data)
1693{
1694 struct net_device *dev = (struct net_device *)data;
1695 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1696 unsigned char if_port;
1697 u16 link, speed;
1698
1699 if (!dev) {
1700 /* fatal error, don't restart the timer */
1701 printk(KERN_ERR "au1000_timer error: NULL dev\n");
1702 return;
1703 }
1704
1705 if_port = dev->if_port;
1706 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1707 if (link) {
7d17c1d2005-05-12 19:45:25 -04001708 if (!netif_carrier_ok(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 netif_carrier_on(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 printk(KERN_INFO "%s: link up\n", dev->name);
1711 }
1712 }
1713 else {
7d17c1d2005-05-12 19:45:25 -04001714 if (netif_carrier_ok(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 dev->if_port = 0;
1717 printk(KERN_INFO "%s: link down\n", dev->name);
1718 }
1719 }
1720 }
1721
1722 if (link && (dev->if_port != if_port) &&
1723 (dev->if_port != IF_PORT_UNKNOWN)) {
1724 hard_stop(dev);
1725 if (dev->if_port == IF_PORT_100BASEFX) {
1726 printk(KERN_INFO "%s: going to full duplex\n",
1727 dev->name);
1728 aup->mac->control |= MAC_FULL_DUPLEX;
1729 au_sync_delay(1);
1730 }
1731 else {
1732 aup->mac->control &= ~MAC_FULL_DUPLEX;
1733 au_sync_delay(1);
1734 }
1735 enable_rx_tx(dev);
1736 }
1737
1738 aup->timer.expires = RUN_AT((1*HZ));
1739 aup->timer.data = (unsigned long)dev;
1740 aup->timer.function = &au1000_timer; /* timer handler */
1741 add_timer(&aup->timer);
1742
1743}
1744
1745static int au1000_open(struct net_device *dev)
1746{
1747 int retval;
1748 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1749
1750 if (au1000_debug > 4)
1751 printk("%s: open: dev=%p\n", dev->name, dev);
1752
1753 if ((retval = au1000_init(dev))) {
1754 printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1755 free_irq(dev->irq, dev);
1756 return retval;
1757 }
1758 netif_start_queue(dev);
1759
1760 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1761 dev->name, dev))) {
1762 printk(KERN_ERR "%s: unable to get IRQ %d\n",
1763 dev->name, dev->irq);
1764 return retval;
1765 }
1766
1767 init_timer(&aup->timer); /* used in ioctl() */
1768 aup->timer.expires = RUN_AT((3*HZ));
1769 aup->timer.data = (unsigned long)dev;
1770 aup->timer.function = &au1000_timer; /* timer handler */
1771 add_timer(&aup->timer);
1772
1773 if (au1000_debug > 4)
1774 printk("%s: open: Initialization done.\n", dev->name);
1775
1776 return 0;
1777}
1778
1779static int au1000_close(struct net_device *dev)
1780{
1781 u32 flags;
1782 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1783
1784 if (au1000_debug > 4)
1785 printk("%s: close: dev=%p\n", dev->name, dev);
1786
1787 reset_mac(dev);
1788
1789 spin_lock_irqsave(&aup->lock, flags);
1790
1791 /* stop the device */
1792 netif_stop_queue(dev);
1793
1794 /* disable the interrupt */
1795 free_irq(dev->irq, dev);
1796 spin_unlock_irqrestore(&aup->lock, flags);
1797
1798 return 0;
1799}
1800
1801static void __exit au1000_cleanup_module(void)
1802{
1803 int i, j;
1804 struct net_device *dev;
1805 struct au1000_private *aup;
1806
1807 for (i = 0; i < num_ifs; i++) {
1808 dev = iflist[i].dev;
1809 if (dev) {
1810 aup = (struct au1000_private *) dev->priv;
1811 unregister_netdev(dev);
1812 if (aup->mii)
1813 kfree(aup->mii);
1814 for (j = 0; j < NUM_RX_DMA; j++) {
1815 if (aup->rx_db_inuse[j])
1816 ReleaseDB(aup, aup->rx_db_inuse[j]);
1817 }
1818 for (j = 0; j < NUM_TX_DMA; j++) {
1819 if (aup->tx_db_inuse[j])
1820 ReleaseDB(aup, aup->tx_db_inuse[j]);
1821 }
1822 dma_free_noncoherent(NULL,
1823 MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
1824 (void *)aup->vaddr,
1825 aup->dma_addr);
1826 free_netdev(dev);
1827 release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
1828 }
1829 }
1830}
1831
1832
1833static inline void
1834update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
1835{
1836 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1837 struct net_device_stats *ps = &aup->stats;
1838
1839 ps->tx_packets++;
1840 ps->tx_bytes += pkt_len;
1841
1842 if (status & TX_FRAME_ABORTED) {
1843 if (dev->if_port == IF_PORT_100BASEFX) {
1844 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1845 /* any other tx errors are only valid
1846 * in half duplex mode */
1847 ps->tx_errors++;
1848 ps->tx_aborted_errors++;
1849 }
1850 }
1851 else {
1852 ps->tx_errors++;
1853 ps->tx_aborted_errors++;
1854 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1855 ps->tx_carrier_errors++;
1856 }
1857 }
1858}
1859
1860
1861/*
1862 * Called from the interrupt service routine to acknowledge
1863 * the TX DONE bits. This is a must if the irq is setup as
1864 * edge triggered.
1865 */
1866static void au1000_tx_ack(struct net_device *dev)
1867{
1868 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1869 volatile tx_dma_t *ptxd;
1870
1871 ptxd = aup->tx_dma_ring[aup->tx_tail];
1872
1873 while (ptxd->buff_stat & TX_T_DONE) {
1874 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
1875 ptxd->buff_stat &= ~TX_T_DONE;
1876 ptxd->len = 0;
1877 au_sync();
1878
1879 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1880 ptxd = aup->tx_dma_ring[aup->tx_tail];
1881
1882 if (aup->tx_full) {
1883 aup->tx_full = 0;
1884 netif_wake_queue(dev);
1885 }
1886 }
1887}
1888
1889
1890/*
1891 * Au1000 transmit routine.
1892 */
1893static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1894{
1895 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1896 volatile tx_dma_t *ptxd;
1897 u32 buff_stat;
1898 db_dest_t *pDB;
1899 int i;
1900
1901 if (au1000_debug > 5)
1902 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1903 dev->name, (unsigned)aup, skb->len,
1904 skb->data, aup->tx_head);
1905
1906 ptxd = aup->tx_dma_ring[aup->tx_head];
1907 buff_stat = ptxd->buff_stat;
1908 if (buff_stat & TX_DMA_ENABLE) {
1909 /* We've wrapped around and the transmitter is still busy */
1910 netif_stop_queue(dev);
1911 aup->tx_full = 1;
1912 return 1;
1913 }
1914 else if (buff_stat & TX_T_DONE) {
1915 update_tx_stats(dev, ptxd->status, ptxd->len & 0x3ff);
1916 ptxd->len = 0;
1917 }
1918
1919 if (aup->tx_full) {
1920 aup->tx_full = 0;
1921 netif_wake_queue(dev);
1922 }
1923
1924 pDB = aup->tx_db_inuse[aup->tx_head];
1925 memcpy((void *)pDB->vaddr, skb->data, skb->len);
1926 if (skb->len < ETH_ZLEN) {
1927 for (i=skb->len; i<ETH_ZLEN; i++) {
1928 ((char *)pDB->vaddr)[i] = 0;
1929 }
1930 ptxd->len = ETH_ZLEN;
1931 }
1932 else
1933 ptxd->len = skb->len;
1934
1935 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1936 au_sync();
1937 dev_kfree_skb(skb);
1938 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1939 dev->trans_start = jiffies;
1940 return 0;
1941}
1942
1943
1944static inline void update_rx_stats(struct net_device *dev, u32 status)
1945{
1946 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1947 struct net_device_stats *ps = &aup->stats;
1948
1949 ps->rx_packets++;
1950 if (status & RX_MCAST_FRAME)
1951 ps->multicast++;
1952
1953 if (status & RX_ERROR) {
1954 ps->rx_errors++;
1955 if (status & RX_MISSED_FRAME)
1956 ps->rx_missed_errors++;
1957 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1958 ps->rx_length_errors++;
1959 if (status & RX_CRC_ERROR)
1960 ps->rx_crc_errors++;
1961 if (status & RX_COLL)
1962 ps->collisions++;
1963 }
1964 else
1965 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1966
1967}
1968
1969/*
1970 * Au1000 receive routine.
1971 */
1972static int au1000_rx(struct net_device *dev)
1973{
1974 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1975 struct sk_buff *skb;
1976 volatile rx_dma_t *prxd;
1977 u32 buff_stat, status;
1978 db_dest_t *pDB;
1979 u32 frmlen;
1980
1981 if (au1000_debug > 5)
1982 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1983
1984 prxd = aup->rx_dma_ring[aup->rx_head];
1985 buff_stat = prxd->buff_stat;
1986 while (buff_stat & RX_T_DONE) {
1987 status = prxd->status;
1988 pDB = aup->rx_db_inuse[aup->rx_head];
1989 update_rx_stats(dev, status);
1990 if (!(status & RX_ERROR)) {
1991
1992 /* good frame */
1993 frmlen = (status & RX_FRAME_LEN_MASK);
1994 frmlen -= 4; /* Remove FCS */
1995 skb = dev_alloc_skb(frmlen + 2);
1996 if (skb == NULL) {
1997 printk(KERN_ERR
1998 "%s: Memory squeeze, dropping packet.\n",
1999 dev->name);
2000 aup->stats.rx_dropped++;
2001 continue;
2002 }
2003 skb->dev = dev;
2004 skb_reserve(skb, 2); /* 16 byte IP header align */
2005 eth_copy_and_sum(skb,
2006 (unsigned char *)pDB->vaddr, frmlen, 0);
2007 skb_put(skb, frmlen);
2008 skb->protocol = eth_type_trans(skb, dev);
2009 netif_rx(skb); /* pass the packet to upper layers */
2010 }
2011 else {
2012 if (au1000_debug > 4) {
2013 if (status & RX_MISSED_FRAME)
2014 printk("rx miss\n");
2015 if (status & RX_WDOG_TIMER)
2016 printk("rx wdog\n");
2017 if (status & RX_RUNT)
2018 printk("rx runt\n");
2019 if (status & RX_OVERLEN)
2020 printk("rx overlen\n");
2021 if (status & RX_COLL)
2022 printk("rx coll\n");
2023 if (status & RX_MII_ERROR)
2024 printk("rx mii error\n");
2025 if (status & RX_CRC_ERROR)
2026 printk("rx crc error\n");
2027 if (status & RX_LEN_ERROR)
2028 printk("rx len error\n");
2029 if (status & RX_U_CNTRL_FRAME)
2030 printk("rx u control frame\n");
2031 if (status & RX_MISSED_FRAME)
2032 printk("rx miss\n");
2033 }
2034 }
2035 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
2036 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
2037 au_sync();
2038
2039 /* next descriptor */
2040 prxd = aup->rx_dma_ring[aup->rx_head];
2041 buff_stat = prxd->buff_stat;
2042 dev->last_rx = jiffies;
2043 }
2044 return 0;
2045}
2046
2047
2048/*
2049 * Au1000 interrupt service routine.
2050 */
2051static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2052{
2053 struct net_device *dev = (struct net_device *) dev_id;
2054
2055 if (dev == NULL) {
2056 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
2057 return IRQ_RETVAL(1);
2058 }
2059
2060 /* Handle RX interrupts first to minimize chance of overrun */
2061
2062 au1000_rx(dev);
2063 au1000_tx_ack(dev);
2064 return IRQ_RETVAL(1);
2065}
2066
2067
2068/*
2069 * The Tx ring has been full longer than the watchdog timeout
2070 * value. The transmitter must be hung?
2071 */
2072static void au1000_tx_timeout(struct net_device *dev)
2073{
2074 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
2075 reset_mac(dev);
2076 au1000_init(dev);
2077 dev->trans_start = jiffies;
2078 netif_wake_queue(dev);
2079}
2080
2081
2082static unsigned const ethernet_polynomial = 0x04c11db7U;
2083static inline u32 ether_crc(int length, unsigned char *data)
2084{
2085 int crc = -1;
2086
2087 while(--length >= 0) {
2088 unsigned char current_octet = *data++;
2089 int bit;
2090 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2091 crc = (crc << 1) ^
2092 ((crc < 0) ^ (current_octet & 1) ?
2093 ethernet_polynomial : 0);
2094 }
2095 return crc;
2096}
2097
2098static void set_rx_mode(struct net_device *dev)
2099{
2100 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2101
2102 if (au1000_debug > 4)
2103 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
2104
2105 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2106 aup->mac->control |= MAC_PROMISCUOUS;
2107 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
2108 } else if ((dev->flags & IFF_ALLMULTI) ||
2109 dev->mc_count > MULTICAST_FILTER_LIMIT) {
2110 aup->mac->control |= MAC_PASS_ALL_MULTI;
2111 aup->mac->control &= ~MAC_PROMISCUOUS;
2112 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
2113 } else {
2114 int i;
2115 struct dev_mc_list *mclist;
2116 u32 mc_filter[2]; /* Multicast hash filter */
2117
2118 mc_filter[1] = mc_filter[0] = 0;
2119 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2120 i++, mclist = mclist->next) {
2121 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
2122 (long *)mc_filter);
2123 }
2124 aup->mac->multi_hash_high = mc_filter[1];
2125 aup->mac->multi_hash_low = mc_filter[0];
2126 aup->mac->control &= ~MAC_PROMISCUOUS;
2127 aup->mac->control |= MAC_HASH_MODE;
2128 }
2129}
2130
2131
2132static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2133{
2134 struct au1000_private *aup = (struct au1000_private *)dev->priv;
2135 u16 *data = (u16 *)&rq->ifr_ifru;
2136
2137 switch(cmd) {
2138 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2139 case SIOCGMIIPHY:
2140 if (!netif_running(dev)) return -EINVAL;
2141 data[0] = aup->phy_addr;
2142 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2143 case SIOCGMIIREG:
2144 data[3] = mdio_read(dev, data[0], data[1]);
2145 return 0;
2146 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2147 case SIOCSMIIREG:
2148 if (!capable(CAP_NET_ADMIN))
2149 return -EPERM;
2150 mdio_write(dev, data[0], data[1],data[2]);
2151 return 0;
2152 default:
2153 return -EOPNOTSUPP;
2154 }
2155
2156}
2157
2158
2159static int au1000_set_config(struct net_device *dev, struct ifmap *map)
2160{
2161 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2162 u16 control;
2163
2164 if (au1000_debug > 4) {
2165 printk("%s: set_config called: dev->if_port %d map->port %x\n",
2166 dev->name, dev->if_port, map->port);
2167 }
2168
2169 switch(map->port){
2170 case IF_PORT_UNKNOWN: /* use auto here */
2171 printk(KERN_INFO "%s: config phy for aneg\n",
2172 dev->name);
2173 dev->if_port = map->port;
2174 /* Link Down: the timer will bring it up */
2175 netif_carrier_off(dev);
2176
2177 /* read current control */
2178 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2179 control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
2180
2181 /* enable auto negotiation and reset the negotiation */
2182 mdio_write(dev, aup->phy_addr, MII_CONTROL,
2183 control | MII_CNTL_AUTO |
2184 MII_CNTL_RST_AUTO);
2185
2186 break;
2187
2188 case IF_PORT_10BASET: /* 10BaseT */
2189 printk(KERN_INFO "%s: config phy for 10BaseT\n",
2190 dev->name);
2191 dev->if_port = map->port;
2192
2193 /* Link Down: the timer will bring it up */
2194 netif_carrier_off(dev);
2195
2196 /* set Speed to 10Mbps, Half Duplex */
2197 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2198 control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
2199 MII_CNTL_FDX);
2200
2201 /* disable auto negotiation and force 10M/HD mode*/
2202 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2203 break;
2204
2205 case IF_PORT_100BASET: /* 100BaseT */
2206 case IF_PORT_100BASETX: /* 100BaseTx */
2207 printk(KERN_INFO "%s: config phy for 100BaseTX\n",
2208 dev->name);
2209 dev->if_port = map->port;
2210
2211 /* Link Down: the timer will bring it up */
2212 netif_carrier_off(dev);
2213
2214 /* set Speed to 100Mbps, Half Duplex */
2215 /* disable auto negotiation and enable 100MBit Mode */
2216 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2217 control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
2218 control |= MII_CNTL_F100;
2219 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2220 break;
2221
2222 case IF_PORT_100BASEFX: /* 100BaseFx */
2223 printk(KERN_INFO "%s: config phy for 100BaseFX\n",
2224 dev->name);
2225 dev->if_port = map->port;
2226
2227 /* Link Down: the timer will bring it up */
2228 netif_carrier_off(dev);
2229
2230 /* set Speed to 100Mbps, Full Duplex */
2231 /* disable auto negotiation and enable 100MBit Mode */
2232 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2233 control &= ~MII_CNTL_AUTO;
2234 control |= MII_CNTL_F100 | MII_CNTL_FDX;
2235 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2236 break;
2237 case IF_PORT_10BASE2: /* 10Base2 */
2238 case IF_PORT_AUI: /* AUI */
2239 /* These Modes are not supported (are they?)*/
2240 printk(KERN_ERR "%s: 10Base2/AUI not supported",
2241 dev->name);
2242 return -EOPNOTSUPP;
2243 break;
2244
2245 default:
2246 printk(KERN_ERR "%s: Invalid media selected",
2247 dev->name);
2248 return -EINVAL;
2249 }
2250 return 0;
2251}
2252
2253static struct net_device_stats *au1000_get_stats(struct net_device *dev)
2254{
2255 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2256
2257 if (au1000_debug > 4)
2258 printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
2259
2260 if (netif_device_present(dev)) {
2261 return &aup->stats;
2262 }
2263 return 0;
2264}
2265
2266module_init(au1000_init_module);
2267module_exit(au1000_cleanup_module);