blob: d5dfc784bccd565a6f99ef347d4e315c907a7212 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
Sergei Shtylyov89be0502006-04-19 22:46:21 +04005 * Copyright 2001-2003, 2006 MontaVista Software Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Author: MontaVista Software, Inc.
13 * ppopov@mvista.com or source@mvista.com
14 *
15 * ########################################################################
16 *
17 * This program is free software; you can distribute it and/or modify it
18 * under the terms of the GNU General Public License (Version 2) as
19 * published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
24 * for more details.
25 *
26 * You should have received a copy of the GNU General Public License along
27 * with this program; if not, write to the Free Software Foundation, Inc.,
28 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
29 *
30 * ########################################################################
31 *
32 *
33 */
34
Ralf Baechle0c0abbc2005-11-14 00:16:29 +000035#include <linux/config.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/sched.h>
39#include <linux/string.h>
40#include <linux/timer.h>
41#include <linux/errno.h>
42#include <linux/in.h>
43#include <linux/ioport.h>
44#include <linux/bitops.h>
45#include <linux/slab.h>
46#include <linux/interrupt.h>
47#include <linux/pci.h>
48#include <linux/init.h>
49#include <linux/netdevice.h>
50#include <linux/etherdevice.h>
51#include <linux/ethtool.h>
52#include <linux/mii.h>
53#include <linux/skbuff.h>
54#include <linux/delay.h>
55#include <asm/mipsregs.h>
56#include <asm/irq.h>
57#include <asm/io.h>
58#include <asm/processor.h>
59
60#include <asm/mach-au1x00/au1000.h>
61#include <asm/cpu.h>
62#include "au1000_eth.h"
63
64#ifdef AU1000_ETH_DEBUG
65static int au1000_debug = 5;
66#else
67static int au1000_debug = 3;
68#endif
69
Sergei Shtylyov89be0502006-04-19 22:46:21 +040070#define DRV_NAME "au1000_eth"
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#define DRV_VERSION "1.5"
72#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
73#define DRV_DESC "Au1xxx on-chip Ethernet driver"
74
75MODULE_AUTHOR(DRV_AUTHOR);
76MODULE_DESCRIPTION(DRV_DESC);
77MODULE_LICENSE("GPL");
78
79// prototypes
80static void hard_stop(struct net_device *);
81static void enable_rx_tx(struct net_device *dev);
Sergei Shtylyov89be0502006-04-19 22:46:21 +040082static struct net_device * au1000_probe(int port_num);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083static int au1000_init(struct net_device *);
84static int au1000_open(struct net_device *);
85static int au1000_close(struct net_device *);
86static int au1000_tx(struct sk_buff *, struct net_device *);
87static int au1000_rx(struct net_device *);
88static irqreturn_t au1000_interrupt(int, void *, struct pt_regs *);
89static void au1000_tx_timeout(struct net_device *);
90static int au1000_set_config(struct net_device *dev, struct ifmap *map);
91static void set_rx_mode(struct net_device *);
92static struct net_device_stats *au1000_get_stats(struct net_device *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static void au1000_timer(unsigned long);
94static int au1000_ioctl(struct net_device *, struct ifreq *, int);
95static int mdio_read(struct net_device *, int, int);
96static void mdio_write(struct net_device *, int, int, u16);
97static void dump_mii(struct net_device *dev, int phy_id);
98
99// externs
100extern void ack_rise_edge_irq(unsigned int);
101extern int get_ethernet_addr(char *ethernet_addr);
102extern void str2eaddr(unsigned char *ea, unsigned char *str);
103extern char * __init prom_getcmdline(void);
104
105/*
106 * Theory of operation
107 *
108 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
109 * There are four receive and four transmit descriptors. These
110 * descriptors are not in memory; rather, they are just a set of
111 * hardware registers.
112 *
113 * Since the Au1000 has a coherent data cache, the receive and
114 * transmit buffers are allocated from the KSEG0 segment. The
115 * hardware registers, however, are still mapped at KSEG1 to
116 * make sure there's no out-of-order writes, and that all writes
117 * complete immediately.
118 */
119
120/* These addresses are only used if yamon doesn't tell us what
121 * the mac address is, and the mac address is not passed on the
122 * command line.
123 */
124static unsigned char au1000_mac_addr[6] __devinitdata = {
125 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00
126};
127
128#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
129#define RUN_AT(x) (jiffies + (x))
130
131// For reading/writing 32-bit words from/to DMA memory
132#define cpu_to_dma32 cpu_to_be32
133#define dma32_to_cpu be32_to_cpu
134
135struct au1000_private *au_macs[NUM_ETH_INTERFACES];
136
137/* FIXME
138 * All of the PHY code really should be detached from the MAC
139 * code.
140 */
141
142/* Default advertise */
143#define GENMII_DEFAULT_ADVERTISE \
144 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
145 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
146 ADVERTISED_Autoneg
147
148#define GENMII_DEFAULT_FEATURES \
149 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
150 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
151 SUPPORTED_Autoneg
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153int bcm_5201_init(struct net_device *dev, int phy_addr)
154{
155 s16 data;
156
157 /* Stop auto-negotiation */
158 data = mdio_read(dev, phy_addr, MII_CONTROL);
159 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
160
161 /* Set advertisement to 10/100 and Half/Full duplex
162 * (full capabilities) */
163 data = mdio_read(dev, phy_addr, MII_ANADV);
164 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
165 mdio_write(dev, phy_addr, MII_ANADV, data);
166
167 /* Restart auto-negotiation */
168 data = mdio_read(dev, phy_addr, MII_CONTROL);
169 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
170 mdio_write(dev, phy_addr, MII_CONTROL, data);
171
172 if (au1000_debug > 4)
173 dump_mii(dev, phy_addr);
174 return 0;
175}
176
177int bcm_5201_reset(struct net_device *dev, int phy_addr)
178{
179 s16 mii_control, timeout;
180
181 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
182 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
183 mdelay(1);
184 for (timeout = 100; timeout > 0; --timeout) {
185 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
186 if ((mii_control & MII_CNTL_RESET) == 0)
187 break;
188 mdelay(1);
189 }
190 if (mii_control & MII_CNTL_RESET) {
191 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
192 return -1;
193 }
194 return 0;
195}
196
197int
198bcm_5201_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
199{
200 u16 mii_data;
201 struct au1000_private *aup;
202
203 if (!dev) {
204 printk(KERN_ERR "bcm_5201_status error: NULL dev\n");
205 return -1;
206 }
207 aup = (struct au1000_private *) dev->priv;
208
209 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
210 if (mii_data & MII_STAT_LINK) {
211 *link = 1;
212 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
213 if (mii_data & MII_AUX_100) {
214 if (mii_data & MII_AUX_FDX) {
215 *speed = IF_PORT_100BASEFX;
216 dev->if_port = IF_PORT_100BASEFX;
217 }
218 else {
219 *speed = IF_PORT_100BASETX;
220 dev->if_port = IF_PORT_100BASETX;
221 }
222 }
223 else {
224 *speed = IF_PORT_10BASET;
225 dev->if_port = IF_PORT_10BASET;
226 }
227
228 }
229 else {
230 *link = 0;
231 *speed = 0;
232 dev->if_port = IF_PORT_UNKNOWN;
233 }
234 return 0;
235}
236
237int lsi_80227_init(struct net_device *dev, int phy_addr)
238{
239 if (au1000_debug > 4)
240 printk("lsi_80227_init\n");
241
242 /* restart auto-negotiation */
243 mdio_write(dev, phy_addr, MII_CONTROL,
244 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO); // | MII_CNTL_FDX);
245 mdelay(1);
246
247 /* set up LEDs to correct display */
248#ifdef CONFIG_MIPS_MTX1
249 mdio_write(dev, phy_addr, 17, 0xff80);
250#else
251 mdio_write(dev, phy_addr, 17, 0xffc0);
252#endif
253
254 if (au1000_debug > 4)
255 dump_mii(dev, phy_addr);
256 return 0;
257}
258
259int lsi_80227_reset(struct net_device *dev, int phy_addr)
260{
261 s16 mii_control, timeout;
262
263 if (au1000_debug > 4) {
264 printk("lsi_80227_reset\n");
265 dump_mii(dev, phy_addr);
266 }
267
268 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
269 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
270 mdelay(1);
271 for (timeout = 100; timeout > 0; --timeout) {
272 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
273 if ((mii_control & MII_CNTL_RESET) == 0)
274 break;
275 mdelay(1);
276 }
277 if (mii_control & MII_CNTL_RESET) {
278 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
279 return -1;
280 }
281 return 0;
282}
283
284int
285lsi_80227_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
286{
287 u16 mii_data;
288 struct au1000_private *aup;
289
290 if (!dev) {
291 printk(KERN_ERR "lsi_80227_status error: NULL dev\n");
292 return -1;
293 }
294 aup = (struct au1000_private *) dev->priv;
295
296 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
297 if (mii_data & MII_STAT_LINK) {
298 *link = 1;
299 mii_data = mdio_read(dev, aup->phy_addr, MII_LSI_PHY_STAT);
300 if (mii_data & MII_LSI_PHY_STAT_SPD) {
301 if (mii_data & MII_LSI_PHY_STAT_FDX) {
302 *speed = IF_PORT_100BASEFX;
303 dev->if_port = IF_PORT_100BASEFX;
304 }
305 else {
306 *speed = IF_PORT_100BASETX;
307 dev->if_port = IF_PORT_100BASETX;
308 }
309 }
310 else {
311 *speed = IF_PORT_10BASET;
312 dev->if_port = IF_PORT_10BASET;
313 }
314
315 }
316 else {
317 *link = 0;
318 *speed = 0;
319 dev->if_port = IF_PORT_UNKNOWN;
320 }
321 return 0;
322}
323
324int am79c901_init(struct net_device *dev, int phy_addr)
325{
326 printk("am79c901_init\n");
327 return 0;
328}
329
330int am79c901_reset(struct net_device *dev, int phy_addr)
331{
332 printk("am79c901_reset\n");
333 return 0;
334}
335
336int
337am79c901_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
338{
339 return 0;
340}
341
342int am79c874_init(struct net_device *dev, int phy_addr)
343{
344 s16 data;
345
346 /* 79c874 has quit resembled bit assignments to BCM5201 */
347 if (au1000_debug > 4)
348 printk("am79c847_init\n");
349
350 /* Stop auto-negotiation */
351 data = mdio_read(dev, phy_addr, MII_CONTROL);
352 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
353
354 /* Set advertisement to 10/100 and Half/Full duplex
355 * (full capabilities) */
356 data = mdio_read(dev, phy_addr, MII_ANADV);
357 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
358 mdio_write(dev, phy_addr, MII_ANADV, data);
359
360 /* Restart auto-negotiation */
361 data = mdio_read(dev, phy_addr, MII_CONTROL);
362 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
363
364 mdio_write(dev, phy_addr, MII_CONTROL, data);
365
366 if (au1000_debug > 4) dump_mii(dev, phy_addr);
367 return 0;
368}
369
370int am79c874_reset(struct net_device *dev, int phy_addr)
371{
372 s16 mii_control, timeout;
373
374 if (au1000_debug > 4)
375 printk("am79c874_reset\n");
376
377 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
378 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
379 mdelay(1);
380 for (timeout = 100; timeout > 0; --timeout) {
381 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
382 if ((mii_control & MII_CNTL_RESET) == 0)
383 break;
384 mdelay(1);
385 }
386 if (mii_control & MII_CNTL_RESET) {
387 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
388 return -1;
389 }
390 return 0;
391}
392
393int
394am79c874_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
395{
396 u16 mii_data;
397 struct au1000_private *aup;
398
399 // printk("am79c874_status\n");
400 if (!dev) {
401 printk(KERN_ERR "am79c874_status error: NULL dev\n");
402 return -1;
403 }
404
405 aup = (struct au1000_private *) dev->priv;
406 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
407
408 if (mii_data & MII_STAT_LINK) {
409 *link = 1;
410 mii_data = mdio_read(dev, aup->phy_addr, MII_AMD_PHY_STAT);
411 if (mii_data & MII_AMD_PHY_STAT_SPD) {
412 if (mii_data & MII_AMD_PHY_STAT_FDX) {
413 *speed = IF_PORT_100BASEFX;
414 dev->if_port = IF_PORT_100BASEFX;
415 }
416 else {
417 *speed = IF_PORT_100BASETX;
418 dev->if_port = IF_PORT_100BASETX;
419 }
420 }
421 else {
422 *speed = IF_PORT_10BASET;
423 dev->if_port = IF_PORT_10BASET;
424 }
425
426 }
427 else {
428 *link = 0;
429 *speed = 0;
430 dev->if_port = IF_PORT_UNKNOWN;
431 }
432 return 0;
433}
434
435int lxt971a_init(struct net_device *dev, int phy_addr)
436{
437 if (au1000_debug > 4)
438 printk("lxt971a_init\n");
439
440 /* restart auto-negotiation */
441 mdio_write(dev, phy_addr, MII_CONTROL,
442 MII_CNTL_F100 | MII_CNTL_AUTO | MII_CNTL_RST_AUTO | MII_CNTL_FDX);
443
444 /* set up LEDs to correct display */
445 mdio_write(dev, phy_addr, 20, 0x0422);
446
447 if (au1000_debug > 4)
448 dump_mii(dev, phy_addr);
449 return 0;
450}
451
452int lxt971a_reset(struct net_device *dev, int phy_addr)
453{
454 s16 mii_control, timeout;
455
456 if (au1000_debug > 4) {
457 printk("lxt971a_reset\n");
458 dump_mii(dev, phy_addr);
459 }
460
461 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
462 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
463 mdelay(1);
464 for (timeout = 100; timeout > 0; --timeout) {
465 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
466 if ((mii_control & MII_CNTL_RESET) == 0)
467 break;
468 mdelay(1);
469 }
470 if (mii_control & MII_CNTL_RESET) {
471 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
472 return -1;
473 }
474 return 0;
475}
476
477int
478lxt971a_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
479{
480 u16 mii_data;
481 struct au1000_private *aup;
482
483 if (!dev) {
484 printk(KERN_ERR "lxt971a_status error: NULL dev\n");
485 return -1;
486 }
487 aup = (struct au1000_private *) dev->priv;
488
489 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
490 if (mii_data & MII_STAT_LINK) {
491 *link = 1;
492 mii_data = mdio_read(dev, aup->phy_addr, MII_INTEL_PHY_STAT);
493 if (mii_data & MII_INTEL_PHY_STAT_SPD) {
494 if (mii_data & MII_INTEL_PHY_STAT_FDX) {
495 *speed = IF_PORT_100BASEFX;
496 dev->if_port = IF_PORT_100BASEFX;
497 }
498 else {
499 *speed = IF_PORT_100BASETX;
500 dev->if_port = IF_PORT_100BASETX;
501 }
502 }
503 else {
504 *speed = IF_PORT_10BASET;
505 dev->if_port = IF_PORT_10BASET;
506 }
507
508 }
509 else {
510 *link = 0;
511 *speed = 0;
512 dev->if_port = IF_PORT_UNKNOWN;
513 }
514 return 0;
515}
516
517int ks8995m_init(struct net_device *dev, int phy_addr)
518{
519 s16 data;
520
521// printk("ks8995m_init\n");
522 /* Stop auto-negotiation */
523 data = mdio_read(dev, phy_addr, MII_CONTROL);
524 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
525
526 /* Set advertisement to 10/100 and Half/Full duplex
527 * (full capabilities) */
528 data = mdio_read(dev, phy_addr, MII_ANADV);
529 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
530 mdio_write(dev, phy_addr, MII_ANADV, data);
531
532 /* Restart auto-negotiation */
533 data = mdio_read(dev, phy_addr, MII_CONTROL);
534 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
535 mdio_write(dev, phy_addr, MII_CONTROL, data);
536
537 if (au1000_debug > 4) dump_mii(dev, phy_addr);
538
539 return 0;
540}
541
542int ks8995m_reset(struct net_device *dev, int phy_addr)
543{
544 s16 mii_control, timeout;
545
546// printk("ks8995m_reset\n");
547 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
548 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
549 mdelay(1);
550 for (timeout = 100; timeout > 0; --timeout) {
551 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
552 if ((mii_control & MII_CNTL_RESET) == 0)
553 break;
554 mdelay(1);
555 }
556 if (mii_control & MII_CNTL_RESET) {
557 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
558 return -1;
559 }
560 return 0;
561}
562
563int ks8995m_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
564{
565 u16 mii_data;
566 struct au1000_private *aup;
567
568 if (!dev) {
569 printk(KERN_ERR "ks8995m_status error: NULL dev\n");
570 return -1;
571 }
572 aup = (struct au1000_private *) dev->priv;
573
574 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
575 if (mii_data & MII_STAT_LINK) {
576 *link = 1;
577 mii_data = mdio_read(dev, aup->phy_addr, MII_AUX_CNTRL);
578 if (mii_data & MII_AUX_100) {
579 if (mii_data & MII_AUX_FDX) {
580 *speed = IF_PORT_100BASEFX;
581 dev->if_port = IF_PORT_100BASEFX;
582 }
583 else {
584 *speed = IF_PORT_100BASETX;
585 dev->if_port = IF_PORT_100BASETX;
586 }
587 }
588 else {
589 *speed = IF_PORT_10BASET;
590 dev->if_port = IF_PORT_10BASET;
591 }
592
593 }
594 else {
595 *link = 0;
596 *speed = 0;
597 dev->if_port = IF_PORT_UNKNOWN;
598 }
599 return 0;
600}
601
602int
603smsc_83C185_init (struct net_device *dev, int phy_addr)
604{
605 s16 data;
606
607 if (au1000_debug > 4)
608 printk("smsc_83C185_init\n");
609
610 /* Stop auto-negotiation */
611 data = mdio_read(dev, phy_addr, MII_CONTROL);
612 mdio_write(dev, phy_addr, MII_CONTROL, data & ~MII_CNTL_AUTO);
613
614 /* Set advertisement to 10/100 and Half/Full duplex
615 * (full capabilities) */
616 data = mdio_read(dev, phy_addr, MII_ANADV);
617 data |= MII_NWAY_TX | MII_NWAY_TX_FDX | MII_NWAY_T_FDX | MII_NWAY_T;
618 mdio_write(dev, phy_addr, MII_ANADV, data);
619
620 /* Restart auto-negotiation */
621 data = mdio_read(dev, phy_addr, MII_CONTROL);
622 data |= MII_CNTL_RST_AUTO | MII_CNTL_AUTO;
623
624 mdio_write(dev, phy_addr, MII_CONTROL, data);
625
626 if (au1000_debug > 4) dump_mii(dev, phy_addr);
627 return 0;
628}
629
630int
631smsc_83C185_reset (struct net_device *dev, int phy_addr)
632{
633 s16 mii_control, timeout;
634
635 if (au1000_debug > 4)
636 printk("smsc_83C185_reset\n");
637
638 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
639 mdio_write(dev, phy_addr, MII_CONTROL, mii_control | MII_CNTL_RESET);
640 mdelay(1);
641 for (timeout = 100; timeout > 0; --timeout) {
642 mii_control = mdio_read(dev, phy_addr, MII_CONTROL);
643 if ((mii_control & MII_CNTL_RESET) == 0)
644 break;
645 mdelay(1);
646 }
647 if (mii_control & MII_CNTL_RESET) {
648 printk(KERN_ERR "%s PHY reset timeout !\n", dev->name);
649 return -1;
650 }
651 return 0;
652}
653
654int
655smsc_83C185_status (struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
656{
657 u16 mii_data;
658 struct au1000_private *aup;
659
660 if (!dev) {
661 printk(KERN_ERR "smsc_83C185_status error: NULL dev\n");
662 return -1;
663 }
664
665 aup = (struct au1000_private *) dev->priv;
666 mii_data = mdio_read(dev, aup->phy_addr, MII_STATUS);
667
668 if (mii_data & MII_STAT_LINK) {
669 *link = 1;
670 mii_data = mdio_read(dev, aup->phy_addr, 0x1f);
671 if (mii_data & (1<<3)) {
672 if (mii_data & (1<<4)) {
673 *speed = IF_PORT_100BASEFX;
674 dev->if_port = IF_PORT_100BASEFX;
675 }
676 else {
677 *speed = IF_PORT_100BASETX;
678 dev->if_port = IF_PORT_100BASETX;
679 }
680 }
681 else {
682 *speed = IF_PORT_10BASET;
683 dev->if_port = IF_PORT_10BASET;
684 }
685 }
686 else {
687 *link = 0;
688 *speed = 0;
689 dev->if_port = IF_PORT_UNKNOWN;
690 }
691 return 0;
692}
693
694
695#ifdef CONFIG_MIPS_BOSPORUS
696int stub_init(struct net_device *dev, int phy_addr)
697{
698 //printk("PHY stub_init\n");
699 return 0;
700}
701
702int stub_reset(struct net_device *dev, int phy_addr)
703{
704 //printk("PHY stub_reset\n");
705 return 0;
706}
707
708int
709stub_status(struct net_device *dev, int phy_addr, u16 *link, u16 *speed)
710{
711 //printk("PHY stub_status\n");
712 *link = 1;
713 /* hmmm, revisit */
714 *speed = IF_PORT_100BASEFX;
715 dev->if_port = IF_PORT_100BASEFX;
716 return 0;
717}
718#endif
719
720struct phy_ops bcm_5201_ops = {
721 bcm_5201_init,
722 bcm_5201_reset,
723 bcm_5201_status,
724};
725
726struct phy_ops am79c874_ops = {
727 am79c874_init,
728 am79c874_reset,
729 am79c874_status,
730};
731
732struct phy_ops am79c901_ops = {
733 am79c901_init,
734 am79c901_reset,
735 am79c901_status,
736};
737
738struct phy_ops lsi_80227_ops = {
739 lsi_80227_init,
740 lsi_80227_reset,
741 lsi_80227_status,
742};
743
744struct phy_ops lxt971a_ops = {
745 lxt971a_init,
746 lxt971a_reset,
747 lxt971a_status,
748};
749
750struct phy_ops ks8995m_ops = {
751 ks8995m_init,
752 ks8995m_reset,
753 ks8995m_status,
754};
755
756struct phy_ops smsc_83C185_ops = {
757 smsc_83C185_init,
758 smsc_83C185_reset,
759 smsc_83C185_status,
760};
761
762#ifdef CONFIG_MIPS_BOSPORUS
763struct phy_ops stub_ops = {
764 stub_init,
765 stub_reset,
766 stub_status,
767};
768#endif
769
770static struct mii_chip_info {
771 const char * name;
772 u16 phy_id0;
773 u16 phy_id1;
774 struct phy_ops *phy_ops;
775 int dual_phy;
776} mii_chip_table[] = {
777 {"Broadcom BCM5201 10/100 BaseT PHY",0x0040,0x6212, &bcm_5201_ops,0},
778 {"Broadcom BCM5221 10/100 BaseT PHY",0x0040,0x61e4, &bcm_5201_ops,0},
779 {"Broadcom BCM5222 10/100 BaseT PHY",0x0040,0x6322, &bcm_5201_ops,1},
Ralf Baechle7f553e32005-10-10 14:50:41 +0100780 {"NS DP83847 PHY", 0x2000, 0x5c30, &bcm_5201_ops ,0},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 {"AMD 79C901 HomePNA PHY",0x0000,0x35c8, &am79c901_ops,0},
782 {"AMD 79C874 10/100 BaseT PHY",0x0022,0x561b, &am79c874_ops,0},
783 {"LSI 80227 10/100 BaseT PHY",0x0016,0xf840, &lsi_80227_ops,0},
784 {"Intel LXT971A Dual Speed PHY",0x0013,0x78e2, &lxt971a_ops,0},
785 {"Kendin KS8995M 10/100 BaseT PHY",0x0022,0x1450, &ks8995m_ops,0},
786 {"SMSC LAN83C185 10/100 BaseT PHY",0x0007,0xc0a3, &smsc_83C185_ops,0},
787#ifdef CONFIG_MIPS_BOSPORUS
788 {"Stub", 0x1234, 0x5678, &stub_ops },
789#endif
790 {0,},
791};
792
793static int mdio_read(struct net_device *dev, int phy_id, int reg)
794{
795 struct au1000_private *aup = (struct au1000_private *) dev->priv;
796 volatile u32 *mii_control_reg;
797 volatile u32 *mii_data_reg;
798 u32 timedout = 20;
799 u32 mii_control;
800
801 #ifdef CONFIG_BCM5222_DUAL_PHY
802 /* First time we probe, it's for the mac0 phy.
803 * Since we haven't determined yet that we have a dual phy,
804 * aup->mii->mii_control_reg won't be setup and we'll
805 * default to the else statement.
806 * By the time we probe for the mac1 phy, the mii_control_reg
807 * will be setup to be the address of the mac0 phy control since
808 * both phys are controlled through mac0.
809 */
810 if (aup->mii && aup->mii->mii_control_reg) {
811 mii_control_reg = aup->mii->mii_control_reg;
812 mii_data_reg = aup->mii->mii_data_reg;
813 }
814 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
815 /* assume both phys are controlled through mac0 */
816 mii_control_reg = au_macs[0]->mii->mii_control_reg;
817 mii_data_reg = au_macs[0]->mii->mii_data_reg;
818 }
819 else
820 #endif
821 {
822 /* default control and data reg addresses */
823 mii_control_reg = &aup->mac->mii_control;
824 mii_data_reg = &aup->mac->mii_data;
825 }
826
827 while (*mii_control_reg & MAC_MII_BUSY) {
828 mdelay(1);
829 if (--timedout == 0) {
830 printk(KERN_ERR "%s: read_MII busy timeout!!\n",
831 dev->name);
832 return -1;
833 }
834 }
835
836 mii_control = MAC_SET_MII_SELECT_REG(reg) |
837 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_READ;
838
839 *mii_control_reg = mii_control;
840
841 timedout = 20;
842 while (*mii_control_reg & MAC_MII_BUSY) {
843 mdelay(1);
844 if (--timedout == 0) {
845 printk(KERN_ERR "%s: mdio_read busy timeout!!\n",
846 dev->name);
847 return -1;
848 }
849 }
850 return (int)*mii_data_reg;
851}
852
853static void mdio_write(struct net_device *dev, int phy_id, int reg, u16 value)
854{
855 struct au1000_private *aup = (struct au1000_private *) dev->priv;
856 volatile u32 *mii_control_reg;
857 volatile u32 *mii_data_reg;
858 u32 timedout = 20;
859 u32 mii_control;
860
861 #ifdef CONFIG_BCM5222_DUAL_PHY
862 if (aup->mii && aup->mii->mii_control_reg) {
863 mii_control_reg = aup->mii->mii_control_reg;
864 mii_data_reg = aup->mii->mii_data_reg;
865 }
866 else if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
867 /* assume both phys are controlled through mac0 */
868 mii_control_reg = au_macs[0]->mii->mii_control_reg;
869 mii_data_reg = au_macs[0]->mii->mii_data_reg;
870 }
871 else
872 #endif
873 {
874 /* default control and data reg addresses */
875 mii_control_reg = &aup->mac->mii_control;
876 mii_data_reg = &aup->mac->mii_data;
877 }
878
879 while (*mii_control_reg & MAC_MII_BUSY) {
880 mdelay(1);
881 if (--timedout == 0) {
882 printk(KERN_ERR "%s: mdio_write busy timeout!!\n",
883 dev->name);
884 return;
885 }
886 }
887
888 mii_control = MAC_SET_MII_SELECT_REG(reg) |
889 MAC_SET_MII_SELECT_PHY(phy_id) | MAC_MII_WRITE;
890
891 *mii_data_reg = value;
892 *mii_control_reg = mii_control;
893}
894
895
896static void dump_mii(struct net_device *dev, int phy_id)
897{
898 int i, val;
899
900 for (i = 0; i < 7; i++) {
901 if ((val = mdio_read(dev, phy_id, i)) >= 0)
902 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
903 }
904 for (i = 16; i < 25; i++) {
905 if ((val = mdio_read(dev, phy_id, i)) >= 0)
906 printk("%s: MII Reg %d=%x\n", dev->name, i, val);
907 }
908}
909
910static int mii_probe (struct net_device * dev)
911{
912 struct au1000_private *aup = (struct au1000_private *) dev->priv;
913 int phy_addr;
914#ifdef CONFIG_MIPS_BOSPORUS
915 int phy_found=0;
916#endif
917
918 /* search for total of 32 possible mii phy addresses */
919 for (phy_addr = 0; phy_addr < 32; phy_addr++) {
920 u16 mii_status;
921 u16 phy_id0, phy_id1;
922 int i;
923
924 #ifdef CONFIG_BCM5222_DUAL_PHY
925 /* Mask the already found phy, try next one */
926 if (au_macs[0]->mii && au_macs[0]->mii->mii_control_reg) {
927 if (au_macs[0]->phy_addr == phy_addr)
928 continue;
929 }
930 #endif
931
932 mii_status = mdio_read(dev, phy_addr, MII_STATUS);
933 if (mii_status == 0xffff || mii_status == 0x0000)
934 /* the mii is not accessable, try next one */
935 continue;
936
937 phy_id0 = mdio_read(dev, phy_addr, MII_PHY_ID0);
938 phy_id1 = mdio_read(dev, phy_addr, MII_PHY_ID1);
939
940 /* search our mii table for the current mii */
941 for (i = 0; mii_chip_table[i].phy_id1; i++) {
942 if (phy_id0 == mii_chip_table[i].phy_id0 &&
943 phy_id1 == mii_chip_table[i].phy_id1) {
944 struct mii_phy * mii_phy = aup->mii;
945
946 printk(KERN_INFO "%s: %s at phy address %d\n",
947 dev->name, mii_chip_table[i].name,
948 phy_addr);
949#ifdef CONFIG_MIPS_BOSPORUS
950 phy_found = 1;
951#endif
952 mii_phy->chip_info = mii_chip_table+i;
953 aup->phy_addr = phy_addr;
954 aup->want_autoneg = 1;
955 aup->phy_ops = mii_chip_table[i].phy_ops;
956 aup->phy_ops->phy_init(dev,phy_addr);
957
958 // Check for dual-phy and then store required
959 // values and set indicators. We need to do
960 // this now since mdio_{read,write} need the
961 // control and data register addresses.
962 #ifdef CONFIG_BCM5222_DUAL_PHY
963 if ( mii_chip_table[i].dual_phy) {
964
965 /* assume both phys are controlled
966 * through MAC0. Board specific? */
967
968 /* sanity check */
969 if (!au_macs[0] || !au_macs[0]->mii)
970 return -1;
971 aup->mii->mii_control_reg = (u32 *)
972 &au_macs[0]->mac->mii_control;
973 aup->mii->mii_data_reg = (u32 *)
974 &au_macs[0]->mac->mii_data;
975 }
976 #endif
977 goto found;
978 }
979 }
980 }
981found:
982
983#ifdef CONFIG_MIPS_BOSPORUS
984 /* This is a workaround for the Micrel/Kendin 5 port switch
985 The second MAC doesn't see a PHY connected... so we need to
986 trick it into thinking we have one.
987
988 If this kernel is run on another Au1500 development board
989 the stub will be found as well as the actual PHY. However,
990 the last found PHY will be used... usually at Addr 31 (Db1500).
991 */
992 if ( (!phy_found) )
993 {
994 u16 phy_id0, phy_id1;
995 int i;
996
997 phy_id0 = 0x1234;
998 phy_id1 = 0x5678;
999
1000 /* search our mii table for the current mii */
1001 for (i = 0; mii_chip_table[i].phy_id1; i++) {
1002 if (phy_id0 == mii_chip_table[i].phy_id0 &&
1003 phy_id1 == mii_chip_table[i].phy_id1) {
1004 struct mii_phy * mii_phy;
1005
1006 printk(KERN_INFO "%s: %s at phy address %d\n",
1007 dev->name, mii_chip_table[i].name,
1008 phy_addr);
1009 mii_phy = kmalloc(sizeof(struct mii_phy),
1010 GFP_KERNEL);
1011 if (mii_phy) {
1012 mii_phy->chip_info = mii_chip_table+i;
1013 aup->phy_addr = phy_addr;
1014 mii_phy->next = aup->mii;
1015 aup->phy_ops =
1016 mii_chip_table[i].phy_ops;
1017 aup->mii = mii_phy;
1018 aup->phy_ops->phy_init(dev,phy_addr);
1019 } else {
1020 printk(KERN_ERR "%s: out of memory\n",
1021 dev->name);
1022 return -1;
1023 }
1024 mii_phy->chip_info = mii_chip_table+i;
1025 aup->phy_addr = phy_addr;
1026 aup->phy_ops = mii_chip_table[i].phy_ops;
1027 aup->phy_ops->phy_init(dev,phy_addr);
1028 break;
1029 }
1030 }
1031 }
1032 if (aup->mac_id == 0) {
1033 /* the Bosporus phy responds to addresses 0-5 but
1034 * 5 is the correct one.
1035 */
1036 aup->phy_addr = 5;
1037 }
1038#endif
1039
1040 if (aup->mii->chip_info == NULL) {
Ralf Baechle7f553e32005-10-10 14:50:41 +01001041 printk(KERN_ERR "%s: Au1x No known MII transceivers found!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 dev->name);
1043 return -1;
1044 }
1045
1046 printk(KERN_INFO "%s: Using %s as default\n",
1047 dev->name, aup->mii->chip_info->name);
1048
1049 return 0;
1050}
1051
1052
1053/*
1054 * Buffer allocation/deallocation routines. The buffer descriptor returned
1055 * has the virtual and dma address of a buffer suitable for
1056 * both, receive and transmit operations.
1057 */
1058static db_dest_t *GetFreeDB(struct au1000_private *aup)
1059{
1060 db_dest_t *pDB;
1061 pDB = aup->pDBfree;
1062
1063 if (pDB) {
1064 aup->pDBfree = pDB->pnext;
1065 }
1066 return pDB;
1067}
1068
1069void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
1070{
1071 db_dest_t *pDBfree = aup->pDBfree;
1072 if (pDBfree)
1073 pDBfree->pnext = pDB;
1074 aup->pDBfree = pDB;
1075}
1076
1077static void enable_rx_tx(struct net_device *dev)
1078{
1079 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1080
1081 if (au1000_debug > 4)
1082 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
1083
1084 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
1085 au_sync_delay(10);
1086}
1087
1088static void hard_stop(struct net_device *dev)
1089{
1090 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1091
1092 if (au1000_debug > 4)
1093 printk(KERN_INFO "%s: hard stop\n", dev->name);
1094
1095 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
1096 au_sync_delay(10);
1097}
1098
1099
1100static void reset_mac(struct net_device *dev)
1101{
1102 int i;
1103 u32 flags;
1104 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1105
1106 if (au1000_debug > 4)
1107 printk(KERN_INFO "%s: reset mac, aup %x\n",
1108 dev->name, (unsigned)aup);
1109
1110 spin_lock_irqsave(&aup->lock, flags);
1111 if (aup->timer.function == &au1000_timer) {/* check if timer initted */
1112 del_timer(&aup->timer);
1113 }
1114
1115 hard_stop(dev);
1116 #ifdef CONFIG_BCM5222_DUAL_PHY
1117 if (aup->mac_id != 0) {
1118 #endif
1119 /* If BCM5222, we can't leave MAC0 in reset because then
1120 * we can't access the dual phy for ETH1 */
1121 *aup->enable = MAC_EN_CLOCK_ENABLE;
1122 au_sync_delay(2);
1123 *aup->enable = 0;
1124 au_sync_delay(2);
1125 #ifdef CONFIG_BCM5222_DUAL_PHY
1126 }
1127 #endif
1128 aup->tx_full = 0;
1129 for (i = 0; i < NUM_RX_DMA; i++) {
1130 /* reset control bits */
1131 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
1132 }
1133 for (i = 0; i < NUM_TX_DMA; i++) {
1134 /* reset control bits */
1135 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
1136 }
1137 spin_unlock_irqrestore(&aup->lock, flags);
1138}
1139
1140
1141/*
1142 * Setup the receive and transmit "rings". These pointers are the addresses
1143 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
1144 * these are not descriptors sitting in memory.
1145 */
1146static void
1147setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
1148{
1149 int i;
1150
1151 for (i = 0; i < NUM_RX_DMA; i++) {
1152 aup->rx_dma_ring[i] =
1153 (volatile rx_dma_t *) (rx_base + sizeof(rx_dma_t)*i);
1154 }
1155 for (i = 0; i < NUM_TX_DMA; i++) {
1156 aup->tx_dma_ring[i] =
1157 (volatile tx_dma_t *) (tx_base + sizeof(tx_dma_t)*i);
1158 }
1159}
1160
1161static struct {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 u32 base_addr;
1163 u32 macen_addr;
1164 int irq;
1165 struct net_device *dev;
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001166} iflist[2] = {
1167#ifdef CONFIG_SOC_AU1000
1168 {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
1169 {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
1170#endif
1171#ifdef CONFIG_SOC_AU1100
1172 {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
1173#endif
1174#ifdef CONFIG_SOC_AU1500
1175 {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
1176 {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
1177#endif
1178#ifdef CONFIG_SOC_AU1550
1179 {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
1180 {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
1181#endif
1182};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184static int num_ifs;
1185
1186/*
1187 * Setup the base address and interupt of the Au1xxx ethernet macs
1188 * based on cpu type and whether the interface is enabled in sys_pinfunc
1189 * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
1190 */
1191static int __init au1000_init_module(void)
1192{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
1194 struct net_device *dev;
1195 int i, found_one = 0;
1196
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001197 num_ifs = NUM_ETH_INTERFACES - ni;
1198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 for(i = 0; i < num_ifs; i++) {
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001200 dev = au1000_probe(i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 iflist[i].dev = dev;
1202 if (dev)
1203 found_one++;
1204 }
1205 if (!found_one)
1206 return -ENODEV;
1207 return 0;
1208}
1209
1210static int au1000_setup_aneg(struct net_device *dev, u32 advertise)
1211{
1212 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1213 u16 ctl, adv;
1214
1215 /* Setup standard advertise */
1216 adv = mdio_read(dev, aup->phy_addr, MII_ADVERTISE);
1217 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
1218 if (advertise & ADVERTISED_10baseT_Half)
1219 adv |= ADVERTISE_10HALF;
1220 if (advertise & ADVERTISED_10baseT_Full)
1221 adv |= ADVERTISE_10FULL;
1222 if (advertise & ADVERTISED_100baseT_Half)
1223 adv |= ADVERTISE_100HALF;
1224 if (advertise & ADVERTISED_100baseT_Full)
1225 adv |= ADVERTISE_100FULL;
1226 mdio_write(dev, aup->phy_addr, MII_ADVERTISE, adv);
1227
1228 /* Start/Restart aneg */
1229 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1230 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
1231 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1232
1233 return 0;
1234}
1235
1236static int au1000_setup_forced(struct net_device *dev, int speed, int fd)
1237{
1238 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1239 u16 ctl;
1240
1241 ctl = mdio_read(dev, aup->phy_addr, MII_BMCR);
1242 ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE);
1243
1244 /* First reset the PHY */
1245 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl | BMCR_RESET);
1246
1247 /* Select speed & duplex */
1248 switch (speed) {
1249 case SPEED_10:
1250 break;
1251 case SPEED_100:
1252 ctl |= BMCR_SPEED100;
1253 break;
1254 case SPEED_1000:
1255 default:
1256 return -EINVAL;
1257 }
1258 if (fd == DUPLEX_FULL)
1259 ctl |= BMCR_FULLDPLX;
1260 mdio_write(dev, aup->phy_addr, MII_BMCR, ctl);
1261
1262 return 0;
1263}
1264
1265
1266static void
1267au1000_start_link(struct net_device *dev, struct ethtool_cmd *cmd)
1268{
1269 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1270 u32 advertise;
1271 int autoneg;
1272 int forced_speed;
1273 int forced_duplex;
1274
1275 /* Default advertise */
1276 advertise = GENMII_DEFAULT_ADVERTISE;
1277 autoneg = aup->want_autoneg;
1278 forced_speed = SPEED_100;
1279 forced_duplex = DUPLEX_FULL;
1280
1281 /* Setup link parameters */
1282 if (cmd) {
1283 if (cmd->autoneg == AUTONEG_ENABLE) {
1284 advertise = cmd->advertising;
1285 autoneg = 1;
1286 } else {
1287 autoneg = 0;
1288
1289 forced_speed = cmd->speed;
1290 forced_duplex = cmd->duplex;
1291 }
1292 }
1293
1294 /* Configure PHY & start aneg */
1295 aup->want_autoneg = autoneg;
1296 if (autoneg)
1297 au1000_setup_aneg(dev, advertise);
1298 else
1299 au1000_setup_forced(dev, forced_speed, forced_duplex);
1300 mod_timer(&aup->timer, jiffies + HZ);
1301}
1302
1303static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1304{
1305 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1306 u16 link, speed;
1307
1308 cmd->supported = GENMII_DEFAULT_FEATURES;
1309 cmd->advertising = GENMII_DEFAULT_ADVERTISE;
1310 cmd->port = PORT_MII;
1311 cmd->transceiver = XCVR_EXTERNAL;
1312 cmd->phy_address = aup->phy_addr;
1313 spin_lock_irq(&aup->lock);
1314 cmd->autoneg = aup->want_autoneg;
1315 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1316 if ((speed == IF_PORT_100BASETX) || (speed == IF_PORT_100BASEFX))
1317 cmd->speed = SPEED_100;
1318 else if (speed == IF_PORT_10BASET)
1319 cmd->speed = SPEED_10;
1320 if (link && (dev->if_port == IF_PORT_100BASEFX))
1321 cmd->duplex = DUPLEX_FULL;
1322 else
1323 cmd->duplex = DUPLEX_HALF;
1324 spin_unlock_irq(&aup->lock);
1325 return 0;
1326}
1327
1328static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1329{
1330 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1331 unsigned long features = GENMII_DEFAULT_FEATURES;
1332
1333 if (!capable(CAP_NET_ADMIN))
1334 return -EPERM;
1335
1336 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1337 return -EINVAL;
1338 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1339 return -EINVAL;
1340 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1341 return -EINVAL;
1342 if (cmd->autoneg == AUTONEG_DISABLE)
1343 switch (cmd->speed) {
1344 case SPEED_10:
1345 if (cmd->duplex == DUPLEX_HALF &&
1346 (features & SUPPORTED_10baseT_Half) == 0)
1347 return -EINVAL;
1348 if (cmd->duplex == DUPLEX_FULL &&
1349 (features & SUPPORTED_10baseT_Full) == 0)
1350 return -EINVAL;
1351 break;
1352 case SPEED_100:
1353 if (cmd->duplex == DUPLEX_HALF &&
1354 (features & SUPPORTED_100baseT_Half) == 0)
1355 return -EINVAL;
1356 if (cmd->duplex == DUPLEX_FULL &&
1357 (features & SUPPORTED_100baseT_Full) == 0)
1358 return -EINVAL;
1359 break;
1360 default:
1361 return -EINVAL;
1362 }
1363 else if ((features & SUPPORTED_Autoneg) == 0)
1364 return -EINVAL;
1365
1366 spin_lock_irq(&aup->lock);
1367 au1000_start_link(dev, cmd);
1368 spin_unlock_irq(&aup->lock);
1369 return 0;
1370}
1371
1372static int au1000_nway_reset(struct net_device *dev)
1373{
1374 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1375
1376 if (!aup->want_autoneg)
1377 return -EINVAL;
1378 spin_lock_irq(&aup->lock);
1379 au1000_start_link(dev, NULL);
1380 spin_unlock_irq(&aup->lock);
1381 return 0;
1382}
1383
1384static void
1385au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1386{
1387 struct au1000_private *aup = (struct au1000_private *)dev->priv;
1388
1389 strcpy(info->driver, DRV_NAME);
1390 strcpy(info->version, DRV_VERSION);
1391 info->fw_version[0] = '\0';
1392 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
1393 info->regdump_len = 0;
1394}
1395
1396static u32 au1000_get_link(struct net_device *dev)
1397{
1398 return netif_carrier_ok(dev);
1399}
1400
1401static struct ethtool_ops au1000_ethtool_ops = {
1402 .get_settings = au1000_get_settings,
1403 .set_settings = au1000_set_settings,
1404 .get_drvinfo = au1000_get_drvinfo,
1405 .nway_reset = au1000_nway_reset,
1406 .get_link = au1000_get_link
1407};
1408
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001409static struct net_device * au1000_probe(int port_num)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410{
1411 static unsigned version_printed = 0;
1412 struct au1000_private *aup = NULL;
1413 struct net_device *dev = NULL;
1414 db_dest_t *pDB, *pDBfree;
1415 char *pmac, *argptr;
1416 char ethaddr[6];
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001417 int irq, i, err;
1418 u32 base, macen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001420 if (port_num >= NUM_ETH_INTERFACES)
1421 return NULL;
1422
1423 base = CPHYSADDR(iflist[port_num].base_addr );
1424 macen = CPHYSADDR(iflist[port_num].macen_addr);
1425 irq = iflist[port_num].irq;
1426
1427 if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
1428 !request_mem_region(macen, 4, "Au1x00 ENET"))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 return NULL;
1430
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001431 if (version_printed++ == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1433
1434 dev = alloc_etherdev(sizeof(struct au1000_private));
1435 if (!dev) {
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001436 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 return NULL;
1438 }
1439
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001440 if ((err = register_netdev(dev)) != 0) {
1441 printk(KERN_ERR "%s: Cannot register net device, error %d\n",
1442 DRV_NAME, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 free_netdev(dev);
1444 return NULL;
1445 }
1446
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001447 printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
1448 dev->name, base, irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
1450 aup = dev->priv;
1451
1452 /* Allocate the data buffers */
1453 /* Snooping works fine with eth on all au1xxx */
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001454 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1455 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1456 &aup->dma_addr, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 if (!aup->vaddr) {
1458 free_netdev(dev);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001459 release_mem_region( base, MAC_IOSIZE);
1460 release_mem_region(macen, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 return NULL;
1462 }
1463
1464 /* aup->mac is the base address of the MAC's registers */
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001465 aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 /* Setup some variables for quick register address access */
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001468 aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
1469 aup->mac_id = port_num;
1470 au_macs[port_num] = aup;
1471
1472 if (port_num == 0) {
1473 /* Check the environment variables first */
1474 if (get_ethernet_addr(ethaddr) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001476 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 /* Check command line */
1478 argptr = prom_getcmdline();
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001479 if ((pmac = strstr(argptr, "ethaddr=")) == NULL)
1480 printk(KERN_INFO "%s: No MAC address found\n",
1481 dev->name);
1482 /* Use the hard coded MAC addresses */
1483 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 str2eaddr(ethaddr, pmac + strlen("ethaddr="));
1485 memcpy(au1000_mac_addr, ethaddr,
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001486 sizeof(au1000_mac_addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 }
1488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001490 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1491 } else if (port_num == 1)
1492 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1493
1494 /*
1495 * Assign to the Ethernet ports two consecutive MAC addresses
1496 * to match those that are printed on their stickers
1497 */
1498 memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
1499 dev->dev_addr[5] += port_num;
1500
1501 /* Bring the device out of reset, otherwise probing the MII will hang */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 *aup->enable = MAC_EN_CLOCK_ENABLE;
1503 au_sync_delay(2);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001504 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 |
1505 MAC_EN_CLOCK_ENABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506 au_sync_delay(2);
1507
1508 aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
1509 if (!aup->mii) {
1510 printk(KERN_ERR "%s: out of memory\n", dev->name);
1511 goto err_out;
1512 }
Ralf Baechle7f553e32005-10-10 14:50:41 +01001513 aup->mii->next = NULL;
1514 aup->mii->chip_info = NULL;
1515 aup->mii->status = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 aup->mii->mii_control_reg = 0;
1517 aup->mii->mii_data_reg = 0;
1518
1519 if (mii_probe(dev) != 0) {
1520 goto err_out;
1521 }
1522
1523 pDBfree = NULL;
1524 /* setup the data buffer descriptors and attach a buffer to each one */
1525 pDB = aup->db;
1526 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1527 pDB->pnext = pDBfree;
1528 pDBfree = pDB;
1529 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1530 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1531 pDB++;
1532 }
1533 aup->pDBfree = pDBfree;
1534
1535 for (i = 0; i < NUM_RX_DMA; i++) {
1536 pDB = GetFreeDB(aup);
1537 if (!pDB) {
1538 goto err_out;
1539 }
1540 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1541 aup->rx_db_inuse[i] = pDB;
1542 }
1543 for (i = 0; i < NUM_TX_DMA; i++) {
1544 pDB = GetFreeDB(aup);
1545 if (!pDB) {
1546 goto err_out;
1547 }
1548 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1549 aup->tx_dma_ring[i]->len = 0;
1550 aup->tx_db_inuse[i] = pDB;
1551 }
1552
1553 spin_lock_init(&aup->lock);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001554 dev->base_addr = base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 dev->irq = irq;
1556 dev->open = au1000_open;
1557 dev->hard_start_xmit = au1000_tx;
1558 dev->stop = au1000_close;
1559 dev->get_stats = au1000_get_stats;
1560 dev->set_multicast_list = &set_rx_mode;
1561 dev->do_ioctl = &au1000_ioctl;
1562 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1563 dev->set_config = &au1000_set_config;
1564 dev->tx_timeout = au1000_tx_timeout;
1565 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1566
1567 /*
1568 * The boot code uses the ethernet controller, so reset it to start
1569 * fresh. au1000_init() expects that the device is in reset state.
1570 */
1571 reset_mac(dev);
1572
1573 return dev;
1574
1575err_out:
1576 /* here we should have a valid dev plus aup-> register addresses
1577 * so we can reset the mac properly.*/
1578 reset_mac(dev);
Jesper Juhlb4558ea2005-10-28 16:53:13 -04001579 kfree(aup->mii);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 for (i = 0; i < NUM_RX_DMA; i++) {
1581 if (aup->rx_db_inuse[i])
1582 ReleaseDB(aup, aup->rx_db_inuse[i]);
1583 }
1584 for (i = 0; i < NUM_TX_DMA; i++) {
1585 if (aup->tx_db_inuse[i])
1586 ReleaseDB(aup, aup->tx_db_inuse[i]);
1587 }
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001588 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1589 (void *)aup->vaddr, aup->dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 unregister_netdev(dev);
1591 free_netdev(dev);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001592 release_mem_region( base, MAC_IOSIZE);
1593 release_mem_region(macen, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 return NULL;
1595}
1596
1597/*
1598 * Initialize the interface.
1599 *
1600 * When the device powers up, the clocks are disabled and the
1601 * mac is in reset state. When the interface is closed, we
1602 * do the same -- reset the device and disable the clocks to
1603 * conserve power. Thus, whenever au1000_init() is called,
1604 * the device should already be in reset state.
1605 */
1606static int au1000_init(struct net_device *dev)
1607{
1608 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1609 u32 flags;
1610 int i;
1611 u32 control;
1612 u16 link, speed;
1613
1614 if (au1000_debug > 4)
1615 printk("%s: au1000_init\n", dev->name);
1616
1617 spin_lock_irqsave(&aup->lock, flags);
1618
1619 /* bring the device out of reset */
1620 *aup->enable = MAC_EN_CLOCK_ENABLE;
1621 au_sync_delay(2);
1622 *aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 |
1623 MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
1624 au_sync_delay(20);
1625
1626 aup->mac->control = 0;
1627 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
1628 aup->tx_tail = aup->tx_head;
1629 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
1630
1631 aup->mac->mac_addr_high = dev->dev_addr[5]<<8 | dev->dev_addr[4];
1632 aup->mac->mac_addr_low = dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
1633 dev->dev_addr[1]<<8 | dev->dev_addr[0];
1634
1635 for (i = 0; i < NUM_RX_DMA; i++) {
1636 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
1637 }
1638 au_sync();
1639
1640 aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed);
1641 control = MAC_DISABLE_RX_OWN | MAC_RX_ENABLE | MAC_TX_ENABLE;
1642#ifndef CONFIG_CPU_LITTLE_ENDIAN
1643 control |= MAC_BIG_ENDIAN;
1644#endif
1645 if (link && (dev->if_port == IF_PORT_100BASEFX)) {
1646 control |= MAC_FULL_DUPLEX;
1647 }
1648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 aup->mac->control = control;
1650 aup->mac->vlan1_tag = 0x8100; /* activate vlan support */
1651 au_sync();
1652
1653 spin_unlock_irqrestore(&aup->lock, flags);
1654 return 0;
1655}
1656
1657static void au1000_timer(unsigned long data)
1658{
1659 struct net_device *dev = (struct net_device *)data;
1660 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1661 unsigned char if_port;
1662 u16 link, speed;
1663
1664 if (!dev) {
1665 /* fatal error, don't restart the timer */
1666 printk(KERN_ERR "au1000_timer error: NULL dev\n");
1667 return;
1668 }
1669
1670 if_port = dev->if_port;
1671 if (aup->phy_ops->phy_status(dev, aup->phy_addr, &link, &speed) == 0) {
1672 if (link) {
7d17c1d2005-05-12 19:45:25 -04001673 if (!netif_carrier_ok(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 netif_carrier_on(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 printk(KERN_INFO "%s: link up\n", dev->name);
1676 }
1677 }
1678 else {
7d17c1d2005-05-12 19:45:25 -04001679 if (netif_carrier_ok(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 netif_carrier_off(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 dev->if_port = 0;
1682 printk(KERN_INFO "%s: link down\n", dev->name);
1683 }
1684 }
1685 }
1686
1687 if (link && (dev->if_port != if_port) &&
1688 (dev->if_port != IF_PORT_UNKNOWN)) {
1689 hard_stop(dev);
1690 if (dev->if_port == IF_PORT_100BASEFX) {
1691 printk(KERN_INFO "%s: going to full duplex\n",
1692 dev->name);
1693 aup->mac->control |= MAC_FULL_DUPLEX;
1694 au_sync_delay(1);
1695 }
1696 else {
1697 aup->mac->control &= ~MAC_FULL_DUPLEX;
1698 au_sync_delay(1);
1699 }
1700 enable_rx_tx(dev);
1701 }
1702
1703 aup->timer.expires = RUN_AT((1*HZ));
1704 aup->timer.data = (unsigned long)dev;
1705 aup->timer.function = &au1000_timer; /* timer handler */
1706 add_timer(&aup->timer);
1707
1708}
1709
1710static int au1000_open(struct net_device *dev)
1711{
1712 int retval;
1713 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1714
1715 if (au1000_debug > 4)
1716 printk("%s: open: dev=%p\n", dev->name, dev);
1717
1718 if ((retval = au1000_init(dev))) {
1719 printk(KERN_ERR "%s: error in au1000_init\n", dev->name);
1720 free_irq(dev->irq, dev);
1721 return retval;
1722 }
1723 netif_start_queue(dev);
1724
1725 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
1726 dev->name, dev))) {
1727 printk(KERN_ERR "%s: unable to get IRQ %d\n",
1728 dev->name, dev->irq);
1729 return retval;
1730 }
1731
1732 init_timer(&aup->timer); /* used in ioctl() */
1733 aup->timer.expires = RUN_AT((3*HZ));
1734 aup->timer.data = (unsigned long)dev;
1735 aup->timer.function = &au1000_timer; /* timer handler */
1736 add_timer(&aup->timer);
1737
1738 if (au1000_debug > 4)
1739 printk("%s: open: Initialization done.\n", dev->name);
1740
1741 return 0;
1742}
1743
1744static int au1000_close(struct net_device *dev)
1745{
1746 u32 flags;
1747 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1748
1749 if (au1000_debug > 4)
1750 printk("%s: close: dev=%p\n", dev->name, dev);
1751
1752 reset_mac(dev);
1753
1754 spin_lock_irqsave(&aup->lock, flags);
1755
1756 /* stop the device */
1757 netif_stop_queue(dev);
1758
1759 /* disable the interrupt */
1760 free_irq(dev->irq, dev);
1761 spin_unlock_irqrestore(&aup->lock, flags);
1762
1763 return 0;
1764}
1765
1766static void __exit au1000_cleanup_module(void)
1767{
1768 int i, j;
1769 struct net_device *dev;
1770 struct au1000_private *aup;
1771
1772 for (i = 0; i < num_ifs; i++) {
1773 dev = iflist[i].dev;
1774 if (dev) {
1775 aup = (struct au1000_private *) dev->priv;
1776 unregister_netdev(dev);
Jesper Juhlb4558ea2005-10-28 16:53:13 -04001777 kfree(aup->mii);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001778 for (j = 0; j < NUM_RX_DMA; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 if (aup->rx_db_inuse[j])
1780 ReleaseDB(aup, aup->rx_db_inuse[j]);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001781 for (j = 0; j < NUM_TX_DMA; j++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 if (aup->tx_db_inuse[j])
1783 ReleaseDB(aup, aup->tx_db_inuse[j]);
Sergei Shtylyov89be0502006-04-19 22:46:21 +04001784 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1785 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1786 (void *)aup->vaddr, aup->dma_addr);
1787 release_mem_region(dev->base_addr, MAC_IOSIZE);
1788 release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 free_netdev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
1791 }
1792}
1793
Sergei Shtylylovc2d3d4b2006-03-21 22:53:52 -08001794static void update_tx_stats(struct net_device *dev, u32 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795{
1796 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1797 struct net_device_stats *ps = &aup->stats;
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 if (status & TX_FRAME_ABORTED) {
1800 if (dev->if_port == IF_PORT_100BASEFX) {
1801 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
1802 /* any other tx errors are only valid
1803 * in half duplex mode */
1804 ps->tx_errors++;
1805 ps->tx_aborted_errors++;
1806 }
1807 }
1808 else {
1809 ps->tx_errors++;
1810 ps->tx_aborted_errors++;
1811 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
1812 ps->tx_carrier_errors++;
1813 }
1814 }
1815}
1816
1817
1818/*
1819 * Called from the interrupt service routine to acknowledge
1820 * the TX DONE bits. This is a must if the irq is setup as
1821 * edge triggered.
1822 */
1823static void au1000_tx_ack(struct net_device *dev)
1824{
1825 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1826 volatile tx_dma_t *ptxd;
1827
1828 ptxd = aup->tx_dma_ring[aup->tx_tail];
1829
1830 while (ptxd->buff_stat & TX_T_DONE) {
Sergei Shtylylovc2d3d4b2006-03-21 22:53:52 -08001831 update_tx_stats(dev, ptxd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 ptxd->buff_stat &= ~TX_T_DONE;
1833 ptxd->len = 0;
1834 au_sync();
1835
1836 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
1837 ptxd = aup->tx_dma_ring[aup->tx_tail];
1838
1839 if (aup->tx_full) {
1840 aup->tx_full = 0;
1841 netif_wake_queue(dev);
1842 }
1843 }
1844}
1845
1846
1847/*
1848 * Au1000 transmit routine.
1849 */
1850static int au1000_tx(struct sk_buff *skb, struct net_device *dev)
1851{
1852 struct au1000_private *aup = (struct au1000_private *) dev->priv;
Sergei Shtylylovc2d3d4b2006-03-21 22:53:52 -08001853 struct net_device_stats *ps = &aup->stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 volatile tx_dma_t *ptxd;
1855 u32 buff_stat;
1856 db_dest_t *pDB;
1857 int i;
1858
1859 if (au1000_debug > 5)
1860 printk("%s: tx: aup %x len=%d, data=%p, head %d\n",
1861 dev->name, (unsigned)aup, skb->len,
1862 skb->data, aup->tx_head);
1863
1864 ptxd = aup->tx_dma_ring[aup->tx_head];
1865 buff_stat = ptxd->buff_stat;
1866 if (buff_stat & TX_DMA_ENABLE) {
1867 /* We've wrapped around and the transmitter is still busy */
1868 netif_stop_queue(dev);
1869 aup->tx_full = 1;
1870 return 1;
1871 }
1872 else if (buff_stat & TX_T_DONE) {
Sergei Shtylylovc2d3d4b2006-03-21 22:53:52 -08001873 update_tx_stats(dev, ptxd->status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 ptxd->len = 0;
1875 }
1876
1877 if (aup->tx_full) {
1878 aup->tx_full = 0;
1879 netif_wake_queue(dev);
1880 }
1881
1882 pDB = aup->tx_db_inuse[aup->tx_head];
1883 memcpy((void *)pDB->vaddr, skb->data, skb->len);
1884 if (skb->len < ETH_ZLEN) {
1885 for (i=skb->len; i<ETH_ZLEN; i++) {
1886 ((char *)pDB->vaddr)[i] = 0;
1887 }
1888 ptxd->len = ETH_ZLEN;
1889 }
1890 else
1891 ptxd->len = skb->len;
1892
Sergei Shtylylovc2d3d4b2006-03-21 22:53:52 -08001893 ps->tx_packets++;
1894 ps->tx_bytes += ptxd->len;
1895
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1897 au_sync();
1898 dev_kfree_skb(skb);
1899 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1900 dev->trans_start = jiffies;
1901 return 0;
1902}
1903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904static inline void update_rx_stats(struct net_device *dev, u32 status)
1905{
1906 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1907 struct net_device_stats *ps = &aup->stats;
1908
1909 ps->rx_packets++;
1910 if (status & RX_MCAST_FRAME)
1911 ps->multicast++;
1912
1913 if (status & RX_ERROR) {
1914 ps->rx_errors++;
1915 if (status & RX_MISSED_FRAME)
1916 ps->rx_missed_errors++;
1917 if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR))
1918 ps->rx_length_errors++;
1919 if (status & RX_CRC_ERROR)
1920 ps->rx_crc_errors++;
1921 if (status & RX_COLL)
1922 ps->collisions++;
1923 }
1924 else
1925 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
1926
1927}
1928
1929/*
1930 * Au1000 receive routine.
1931 */
1932static int au1000_rx(struct net_device *dev)
1933{
1934 struct au1000_private *aup = (struct au1000_private *) dev->priv;
1935 struct sk_buff *skb;
1936 volatile rx_dma_t *prxd;
1937 u32 buff_stat, status;
1938 db_dest_t *pDB;
1939 u32 frmlen;
1940
1941 if (au1000_debug > 5)
1942 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
1943
1944 prxd = aup->rx_dma_ring[aup->rx_head];
1945 buff_stat = prxd->buff_stat;
1946 while (buff_stat & RX_T_DONE) {
1947 status = prxd->status;
1948 pDB = aup->rx_db_inuse[aup->rx_head];
1949 update_rx_stats(dev, status);
1950 if (!(status & RX_ERROR)) {
1951
1952 /* good frame */
1953 frmlen = (status & RX_FRAME_LEN_MASK);
1954 frmlen -= 4; /* Remove FCS */
1955 skb = dev_alloc_skb(frmlen + 2);
1956 if (skb == NULL) {
1957 printk(KERN_ERR
1958 "%s: Memory squeeze, dropping packet.\n",
1959 dev->name);
1960 aup->stats.rx_dropped++;
1961 continue;
1962 }
1963 skb->dev = dev;
1964 skb_reserve(skb, 2); /* 16 byte IP header align */
1965 eth_copy_and_sum(skb,
1966 (unsigned char *)pDB->vaddr, frmlen, 0);
1967 skb_put(skb, frmlen);
1968 skb->protocol = eth_type_trans(skb, dev);
1969 netif_rx(skb); /* pass the packet to upper layers */
1970 }
1971 else {
1972 if (au1000_debug > 4) {
1973 if (status & RX_MISSED_FRAME)
1974 printk("rx miss\n");
1975 if (status & RX_WDOG_TIMER)
1976 printk("rx wdog\n");
1977 if (status & RX_RUNT)
1978 printk("rx runt\n");
1979 if (status & RX_OVERLEN)
1980 printk("rx overlen\n");
1981 if (status & RX_COLL)
1982 printk("rx coll\n");
1983 if (status & RX_MII_ERROR)
1984 printk("rx mii error\n");
1985 if (status & RX_CRC_ERROR)
1986 printk("rx crc error\n");
1987 if (status & RX_LEN_ERROR)
1988 printk("rx len error\n");
1989 if (status & RX_U_CNTRL_FRAME)
1990 printk("rx u control frame\n");
1991 if (status & RX_MISSED_FRAME)
1992 printk("rx miss\n");
1993 }
1994 }
1995 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
1996 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
1997 au_sync();
1998
1999 /* next descriptor */
2000 prxd = aup->rx_dma_ring[aup->rx_head];
2001 buff_stat = prxd->buff_stat;
2002 dev->last_rx = jiffies;
2003 }
2004 return 0;
2005}
2006
2007
2008/*
2009 * Au1000 interrupt service routine.
2010 */
2011static irqreturn_t au1000_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2012{
2013 struct net_device *dev = (struct net_device *) dev_id;
2014
2015 if (dev == NULL) {
2016 printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
2017 return IRQ_RETVAL(1);
2018 }
2019
2020 /* Handle RX interrupts first to minimize chance of overrun */
2021
2022 au1000_rx(dev);
2023 au1000_tx_ack(dev);
2024 return IRQ_RETVAL(1);
2025}
2026
2027
2028/*
2029 * The Tx ring has been full longer than the watchdog timeout
2030 * value. The transmitter must be hung?
2031 */
2032static void au1000_tx_timeout(struct net_device *dev)
2033{
2034 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev);
2035 reset_mac(dev);
2036 au1000_init(dev);
2037 dev->trans_start = jiffies;
2038 netif_wake_queue(dev);
2039}
2040
2041
2042static unsigned const ethernet_polynomial = 0x04c11db7U;
2043static inline u32 ether_crc(int length, unsigned char *data)
2044{
2045 int crc = -1;
2046
2047 while(--length >= 0) {
2048 unsigned char current_octet = *data++;
2049 int bit;
2050 for (bit = 0; bit < 8; bit++, current_octet >>= 1)
2051 crc = (crc << 1) ^
2052 ((crc < 0) ^ (current_octet & 1) ?
2053 ethernet_polynomial : 0);
2054 }
2055 return crc;
2056}
2057
2058static void set_rx_mode(struct net_device *dev)
2059{
2060 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2061
2062 if (au1000_debug > 4)
2063 printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags);
2064
2065 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2066 aup->mac->control |= MAC_PROMISCUOUS;
2067 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
2068 } else if ((dev->flags & IFF_ALLMULTI) ||
2069 dev->mc_count > MULTICAST_FILTER_LIMIT) {
2070 aup->mac->control |= MAC_PASS_ALL_MULTI;
2071 aup->mac->control &= ~MAC_PROMISCUOUS;
2072 printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
2073 } else {
2074 int i;
2075 struct dev_mc_list *mclist;
2076 u32 mc_filter[2]; /* Multicast hash filter */
2077
2078 mc_filter[1] = mc_filter[0] = 0;
2079 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2080 i++, mclist = mclist->next) {
2081 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
2082 (long *)mc_filter);
2083 }
2084 aup->mac->multi_hash_high = mc_filter[1];
2085 aup->mac->multi_hash_low = mc_filter[0];
2086 aup->mac->control &= ~MAC_PROMISCUOUS;
2087 aup->mac->control |= MAC_HASH_MODE;
2088 }
2089}
2090
2091
2092static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2093{
2094 struct au1000_private *aup = (struct au1000_private *)dev->priv;
2095 u16 *data = (u16 *)&rq->ifr_ifru;
2096
2097 switch(cmd) {
2098 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
2099 case SIOCGMIIPHY:
2100 if (!netif_running(dev)) return -EINVAL;
2101 data[0] = aup->phy_addr;
2102 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
2103 case SIOCGMIIREG:
2104 data[3] = mdio_read(dev, data[0], data[1]);
2105 return 0;
2106 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
2107 case SIOCSMIIREG:
2108 if (!capable(CAP_NET_ADMIN))
2109 return -EPERM;
2110 mdio_write(dev, data[0], data[1],data[2]);
2111 return 0;
2112 default:
2113 return -EOPNOTSUPP;
2114 }
2115
2116}
2117
2118
2119static int au1000_set_config(struct net_device *dev, struct ifmap *map)
2120{
2121 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2122 u16 control;
2123
2124 if (au1000_debug > 4) {
2125 printk("%s: set_config called: dev->if_port %d map->port %x\n",
2126 dev->name, dev->if_port, map->port);
2127 }
2128
2129 switch(map->port){
2130 case IF_PORT_UNKNOWN: /* use auto here */
2131 printk(KERN_INFO "%s: config phy for aneg\n",
2132 dev->name);
2133 dev->if_port = map->port;
2134 /* Link Down: the timer will bring it up */
2135 netif_carrier_off(dev);
2136
2137 /* read current control */
2138 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2139 control &= ~(MII_CNTL_FDX | MII_CNTL_F100);
2140
2141 /* enable auto negotiation and reset the negotiation */
2142 mdio_write(dev, aup->phy_addr, MII_CONTROL,
2143 control | MII_CNTL_AUTO |
2144 MII_CNTL_RST_AUTO);
2145
2146 break;
2147
2148 case IF_PORT_10BASET: /* 10BaseT */
2149 printk(KERN_INFO "%s: config phy for 10BaseT\n",
2150 dev->name);
2151 dev->if_port = map->port;
2152
2153 /* Link Down: the timer will bring it up */
2154 netif_carrier_off(dev);
2155
2156 /* set Speed to 10Mbps, Half Duplex */
2157 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2158 control &= ~(MII_CNTL_F100 | MII_CNTL_AUTO |
2159 MII_CNTL_FDX);
2160
2161 /* disable auto negotiation and force 10M/HD mode*/
2162 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2163 break;
2164
2165 case IF_PORT_100BASET: /* 100BaseT */
2166 case IF_PORT_100BASETX: /* 100BaseTx */
2167 printk(KERN_INFO "%s: config phy for 100BaseTX\n",
2168 dev->name);
2169 dev->if_port = map->port;
2170
2171 /* Link Down: the timer will bring it up */
2172 netif_carrier_off(dev);
2173
2174 /* set Speed to 100Mbps, Half Duplex */
2175 /* disable auto negotiation and enable 100MBit Mode */
2176 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2177 control &= ~(MII_CNTL_AUTO | MII_CNTL_FDX);
2178 control |= MII_CNTL_F100;
2179 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2180 break;
2181
2182 case IF_PORT_100BASEFX: /* 100BaseFx */
2183 printk(KERN_INFO "%s: config phy for 100BaseFX\n",
2184 dev->name);
2185 dev->if_port = map->port;
2186
2187 /* Link Down: the timer will bring it up */
2188 netif_carrier_off(dev);
2189
2190 /* set Speed to 100Mbps, Full Duplex */
2191 /* disable auto negotiation and enable 100MBit Mode */
2192 control = mdio_read(dev, aup->phy_addr, MII_CONTROL);
2193 control &= ~MII_CNTL_AUTO;
2194 control |= MII_CNTL_F100 | MII_CNTL_FDX;
2195 mdio_write(dev, aup->phy_addr, MII_CONTROL, control);
2196 break;
2197 case IF_PORT_10BASE2: /* 10Base2 */
2198 case IF_PORT_AUI: /* AUI */
2199 /* These Modes are not supported (are they?)*/
2200 printk(KERN_ERR "%s: 10Base2/AUI not supported",
2201 dev->name);
2202 return -EOPNOTSUPP;
2203 break;
2204
2205 default:
2206 printk(KERN_ERR "%s: Invalid media selected",
2207 dev->name);
2208 return -EINVAL;
2209 }
2210 return 0;
2211}
2212
2213static struct net_device_stats *au1000_get_stats(struct net_device *dev)
2214{
2215 struct au1000_private *aup = (struct au1000_private *) dev->priv;
2216
2217 if (au1000_debug > 4)
2218 printk("%s: au1000_get_stats: dev=%p\n", dev->name, dev);
2219
2220 if (netif_device_present(dev)) {
2221 return &aup->stats;
2222 }
2223 return 0;
2224}
2225
2226module_init(au1000_init_module);
2227module_exit(au1000_cleanup_module);