blob: cd5f3faa4b20398e0a9bc58e5edcc60273a4576c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Fast Ethernet Controller (FCC) driver for Motorola MPC8260.
3 * Copyright (c) 2000 MontaVista Software, Inc. Dan Malek (dmalek@jlc.net)
4 *
5 * This version of the driver is a combination of the 8xx fec and
6 * 8260 SCC Ethernet drivers. This version has some additional
7 * configuration options, which should probably be moved out of
8 * here. This driver currently works for the EST SBC8260,
9 * SBS Diablo/BCM, Embedded Planet RPX6, TQM8260, and others.
10 *
11 * Right now, I am very watseful with the buffers. I allocate memory
12 * pages and then divide them into 2K frame buffers. This way I know I
13 * have buffers large enough to hold one frame within one buffer descriptor.
14 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
15 * will be much more memory efficient and will easily handle lots of
16 * small packets. Since this is a cache coherent processor and CPM,
17 * I could also preallocate SKB's and use them directly on the interface.
18 *
19 * 2004-12 Leo Li (leoli@freescale.com)
20 * - Rework the FCC clock configuration part, make it easier to configure.
21 *
22 */
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/string.h>
27#include <linux/ptrace.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/slab.h>
31#include <linux/interrupt.h>
32#include <linux/pci.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/spinlock.h>
39#include <linux/mii.h>
40#include <linux/workqueue.h>
41#include <linux/bitops.h>
42
43#include <asm/immap_cpm2.h>
44#include <asm/pgtable.h>
45#include <asm/mpc8260.h>
46#include <asm/irq.h>
47#include <asm/uaccess.h>
48#include <asm/signal.h>
49
50/* We can't use the PHY interrupt if we aren't using MDIO. */
51#if !defined(CONFIG_USE_MDIO)
52#undef PHY_INTERRUPT
53#endif
54
55/* If we have a PHY interrupt, we will advertise both full-duplex and half-
56 * duplex capabilities. If we don't have a PHY interrupt, then we will only
57 * advertise half-duplex capabilities.
58 */
59#define MII_ADVERTISE_HALF (ADVERTISE_100HALF | ADVERTISE_10HALF | \
60 ADVERTISE_CSMA)
61#define MII_ADVERTISE_ALL (ADVERTISE_100FULL | ADVERTISE_10FULL | \
62 MII_ADVERTISE_HALF)
63#ifdef PHY_INTERRUPT
64#define MII_ADVERTISE_DEFAULT MII_ADVERTISE_ALL
65#else
66#define MII_ADVERTISE_DEFAULT MII_ADVERTISE_HALF
67#endif
68#include <asm/cpm2.h>
69
70/* The transmitter timeout
71 */
72#define TX_TIMEOUT (2*HZ)
73
74#ifdef CONFIG_USE_MDIO
75/* Forward declarations of some structures to support different PHYs */
76
77typedef struct {
78 uint mii_data;
79 void (*funct)(uint mii_reg, struct net_device *dev);
80} phy_cmd_t;
81
82typedef struct {
83 uint id;
84 char *name;
85
86 const phy_cmd_t *config;
87 const phy_cmd_t *startup;
88 const phy_cmd_t *ack_int;
89 const phy_cmd_t *shutdown;
90} phy_info_t;
91
92/* values for phy_status */
93
94#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
95#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
96#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
97#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
98#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
99#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
100#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
101
102#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
103#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
104#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
105#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
106#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
107#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
108#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
109#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
110#endif /* CONFIG_USE_MDIO */
111
112/* The number of Tx and Rx buffers. These are allocated from the page
113 * pool. The code may assume these are power of two, so it is best
114 * to keep them that size.
115 * We don't need to allocate pages for the transmitter. We just use
116 * the skbuffer directly.
117 */
118#define FCC_ENET_RX_PAGES 16
119#define FCC_ENET_RX_FRSIZE 2048
120#define FCC_ENET_RX_FRPPG (PAGE_SIZE / FCC_ENET_RX_FRSIZE)
121#define RX_RING_SIZE (FCC_ENET_RX_FRPPG * FCC_ENET_RX_PAGES)
122#define TX_RING_SIZE 16 /* Must be power of two */
123#define TX_RING_MOD_MASK 15 /* for this to work */
124
125/* The FCC stores dest/src/type, data, and checksum for receive packets.
126 * size includes support for VLAN
127 */
128#define PKT_MAXBUF_SIZE 1522
129#define PKT_MINBUF_SIZE 64
130
131/* Maximum input DMA size. Must be a should(?) be a multiple of 4.
132 * size includes support for VLAN
133 */
134#define PKT_MAXDMA_SIZE 1524
135
136/* Maximum input buffer size. Must be a multiple of 32.
137*/
138#define PKT_MAXBLR_SIZE 1536
139
140static int fcc_enet_open(struct net_device *dev);
141static int fcc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev);
142static int fcc_enet_rx(struct net_device *dev);
143static irqreturn_t fcc_enet_interrupt(int irq, void *dev_id, struct pt_regs *);
144static int fcc_enet_close(struct net_device *dev);
145static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev);
146/* static void set_multicast_list(struct net_device *dev); */
147static void fcc_restart(struct net_device *dev, int duplex);
148static void fcc_stop(struct net_device *dev);
149static int fcc_enet_set_mac_address(struct net_device *dev, void *addr);
150
151/* These will be configurable for the FCC choice.
152 * Multiple ports can be configured. There is little choice among the
153 * I/O pins to the PHY, except the clocks. We will need some board
154 * dependent clock selection.
155 * Why in the hell did I put these inside #ifdef's? I dunno, maybe to
156 * help show what pins are used for each device.
157 */
158
159/* Since the CLK setting changes greatly from board to board, I changed
160 * it to a easy way. You just need to specify which CLK number to use.
161 * Note that only limited choices can be make on each port.
162 */
163
164/* FCC1 Clock Source Configuration. There are board specific.
165 Can only choose from CLK9-12 */
166#ifdef CONFIG_SBC82xx
167#define F1_RXCLK 9
168#define F1_TXCLK 10
169#elif defined(CONFIG_ADS8272)
170#define F1_RXCLK 11
171#define F1_TXCLK 10
172#else
173#define F1_RXCLK 12
174#define F1_TXCLK 11
175#endif
176
177/* FCC2 Clock Source Configuration. There are board specific.
178 Can only choose from CLK13-16 */
179#ifdef CONFIG_ADS8272
180#define F2_RXCLK 15
181#define F2_TXCLK 16
182#else
183#define F2_RXCLK 13
184#define F2_TXCLK 14
185#endif
186
187/* FCC3 Clock Source Configuration. There are board specific.
188 Can only choose from CLK13-16 */
189#define F3_RXCLK 15
190#define F3_TXCLK 16
191
192/* Automatically generates register configurations */
193#define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */
194
195#define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */
196#define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */
197#define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */
198#define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */
199#define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */
200#define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */
201
202#define PC_F1RXCLK PC_CLK(F1_RXCLK)
203#define PC_F1TXCLK PC_CLK(F1_TXCLK)
204#define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK))
205#define CMX1_CLK_MASK ((uint)0xff000000)
206
207#define PC_F2RXCLK PC_CLK(F2_RXCLK)
208#define PC_F2TXCLK PC_CLK(F2_TXCLK)
209#define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK))
210#define CMX2_CLK_MASK ((uint)0x00ff0000)
211
212#define PC_F3RXCLK PC_CLK(F3_RXCLK)
213#define PC_F3TXCLK PC_CLK(F3_TXCLK)
214#define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK))
215#define CMX3_CLK_MASK ((uint)0x0000ff00)
216
217
218/* I/O Pin assignment for FCC1. I don't yet know the best way to do this,
219 * but there is little variation among the choices.
220 */
221#define PA1_COL ((uint)0x00000001)
222#define PA1_CRS ((uint)0x00000002)
223#define PA1_TXER ((uint)0x00000004)
224#define PA1_TXEN ((uint)0x00000008)
225#define PA1_RXDV ((uint)0x00000010)
226#define PA1_RXER ((uint)0x00000020)
227#define PA1_TXDAT ((uint)0x00003c00)
228#define PA1_RXDAT ((uint)0x0003c000)
229#define PA1_PSORA_BOUT (PA1_RXDAT | PA1_TXDAT)
230#define PA1_PSORA_BIN (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
231 PA1_RXDV | PA1_RXER)
232#define PA1_DIRA_BOUT (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
233#define PA1_DIRA_BIN (PA1_TXDAT | PA1_TXEN | PA1_TXER)
234
235
236/* I/O Pin assignment for FCC2. I don't yet know the best way to do this,
237 * but there is little variation among the choices.
238 */
239#define PB2_TXER ((uint)0x00000001)
240#define PB2_RXDV ((uint)0x00000002)
241#define PB2_TXEN ((uint)0x00000004)
242#define PB2_RXER ((uint)0x00000008)
243#define PB2_COL ((uint)0x00000010)
244#define PB2_CRS ((uint)0x00000020)
245#define PB2_TXDAT ((uint)0x000003c0)
246#define PB2_RXDAT ((uint)0x00003c00)
247#define PB2_PSORB_BOUT (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
248 PB2_RXER | PB2_RXDV | PB2_TXER)
249#define PB2_PSORB_BIN (PB2_TXEN)
250#define PB2_DIRB_BOUT (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
251#define PB2_DIRB_BIN (PB2_TXDAT | PB2_TXEN | PB2_TXER)
252
253
254/* I/O Pin assignment for FCC3. I don't yet know the best way to do this,
255 * but there is little variation among the choices.
256 */
257#define PB3_RXDV ((uint)0x00004000)
258#define PB3_RXER ((uint)0x00008000)
259#define PB3_TXER ((uint)0x00010000)
260#define PB3_TXEN ((uint)0x00020000)
261#define PB3_COL ((uint)0x00040000)
262#define PB3_CRS ((uint)0x00080000)
263#ifndef CONFIG_RPX8260
264#define PB3_TXDAT ((uint)0x0f000000)
265#define PC3_TXDAT ((uint)0x00000000)
266#else
267#define PB3_TXDAT ((uint)0x0f000000)
268#define PC3_TXDAT 0
269#endif
270#define PB3_RXDAT ((uint)0x00f00000)
271#define PB3_PSORB_BOUT (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
272 PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
273#define PB3_PSORB_BIN (0)
274#define PB3_DIRB_BOUT (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
275#define PB3_DIRB_BIN (PB3_TXDAT | PB3_TXEN | PB3_TXER)
276
277#define PC3_PSORC_BOUT (PC3_TXDAT)
278#define PC3_PSORC_BIN (0)
279#define PC3_DIRC_BOUT (0)
280#define PC3_DIRC_BIN (PC3_TXDAT)
281
282
283/* MII status/control serial interface.
284*/
285#if defined(CONFIG_RPX8260)
286/* The EP8260 doesn't use Port C for MDIO */
287#define PC_MDIO ((uint)0x00000000)
288#define PC_MDCK ((uint)0x00000000)
289#elif defined(CONFIG_TQM8260)
290/* TQM8260 has MDIO and MDCK on PC30 and PC31 respectively */
291#define PC_MDIO ((uint)0x00000002)
292#define PC_MDCK ((uint)0x00000001)
293#elif defined(CONFIG_ADS8272)
294#define PC_MDIO ((uint)0x00002000)
295#define PC_MDCK ((uint)0x00001000)
296#elif defined(CONFIG_EST8260) || defined(CONFIG_ADS8260) || defined(CONFIG_PQ2FADS)
297#define PC_MDIO ((uint)0x00400000)
298#define PC_MDCK ((uint)0x00200000)
299#else
300#define PC_MDIO ((uint)0x00000004)
301#define PC_MDCK ((uint)0x00000020)
302#endif
303
304#if defined(CONFIG_USE_MDIO) && (!defined(PC_MDIO) || !defined(PC_MDCK))
305#error "Must define PC_MDIO and PC_MDCK if using MDIO"
306#endif
307
308/* PHY addresses */
309/* default to dynamic config of phy addresses */
310#define FCC1_PHY_ADDR 0
311#ifdef CONFIG_PQ2FADS
312#define FCC2_PHY_ADDR 0
313#else
314#define FCC2_PHY_ADDR 2
315#endif
316#define FCC3_PHY_ADDR 3
317
318/* A table of information for supporting FCCs. This does two things.
319 * First, we know how many FCCs we have and they are always externally
320 * numbered from zero. Second, it holds control register and I/O
321 * information that could be different among board designs.
322 */
323typedef struct fcc_info {
324 uint fc_fccnum;
325 uint fc_phyaddr;
326 uint fc_cpmblock;
327 uint fc_cpmpage;
328 uint fc_proff;
329 uint fc_interrupt;
330 uint fc_trxclocks;
331 uint fc_clockroute;
332 uint fc_clockmask;
333 uint fc_mdio;
334 uint fc_mdck;
335} fcc_info_t;
336
337static fcc_info_t fcc_ports[] = {
338#ifdef CONFIG_FCC1_ENET
339 { 0, FCC1_PHY_ADDR, CPM_CR_FCC1_SBLOCK, CPM_CR_FCC1_PAGE, PROFF_FCC1, SIU_INT_FCC1,
340 (PC_F1RXCLK | PC_F1TXCLK), CMX1_CLK_ROUTE, CMX1_CLK_MASK,
341 PC_MDIO, PC_MDCK },
342#endif
343#ifdef CONFIG_FCC2_ENET
344 { 1, FCC2_PHY_ADDR, CPM_CR_FCC2_SBLOCK, CPM_CR_FCC2_PAGE, PROFF_FCC2, SIU_INT_FCC2,
345 (PC_F2RXCLK | PC_F2TXCLK), CMX2_CLK_ROUTE, CMX2_CLK_MASK,
346 PC_MDIO, PC_MDCK },
347#endif
348#ifdef CONFIG_FCC3_ENET
349 { 2, FCC3_PHY_ADDR, CPM_CR_FCC3_SBLOCK, CPM_CR_FCC3_PAGE, PROFF_FCC3, SIU_INT_FCC3,
350 (PC_F3RXCLK | PC_F3TXCLK), CMX3_CLK_ROUTE, CMX3_CLK_MASK,
351 PC_MDIO, PC_MDCK },
352#endif
353};
354
355/* The FCC buffer descriptors track the ring buffers. The rx_bd_base and
356 * tx_bd_base always point to the base of the buffer descriptors. The
357 * cur_rx and cur_tx point to the currently available buffer.
358 * The dirty_tx tracks the current buffer that is being sent by the
359 * controller. The cur_tx and dirty_tx are equal under both completely
360 * empty and completely full conditions. The empty/ready indicator in
361 * the buffer descriptor determines the actual condition.
362 */
363struct fcc_enet_private {
364 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
365 struct sk_buff* tx_skbuff[TX_RING_SIZE];
366 ushort skb_cur;
367 ushort skb_dirty;
368
369 /* CPM dual port RAM relative addresses.
370 */
371 cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */
372 cbd_t *tx_bd_base;
373 cbd_t *cur_rx, *cur_tx; /* The next free ring entry */
374 cbd_t *dirty_tx; /* The ring entries to be free()ed. */
375 volatile fcc_t *fccp;
376 volatile fcc_enet_t *ep;
377 struct net_device_stats stats;
378 uint tx_free;
379 spinlock_t lock;
380
381#ifdef CONFIG_USE_MDIO
382 uint phy_id;
383 uint phy_id_done;
384 uint phy_status;
385 phy_info_t *phy;
386 struct work_struct phy_relink;
387 struct work_struct phy_display_config;
388
389 uint sequence_done;
390
391 uint phy_addr;
392#endif /* CONFIG_USE_MDIO */
393
394 int link;
395 int old_link;
396 int full_duplex;
397
398 fcc_info_t *fip;
399};
400
401static void init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
402 volatile cpm2_map_t *immap);
403static void init_fcc_startup(fcc_info_t *fip, struct net_device *dev);
404static void init_fcc_ioports(fcc_info_t *fip, volatile iop_cpm2_t *io,
405 volatile cpm2_map_t *immap);
406static void init_fcc_param(fcc_info_t *fip, struct net_device *dev,
407 volatile cpm2_map_t *immap);
408
409#ifdef CONFIG_USE_MDIO
410static int mii_queue(struct net_device *dev, int request, void (*func)(uint, struct net_device *));
411static uint mii_send_receive(fcc_info_t *fip, uint cmd);
412static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c);
413
414/* Make MII read/write commands for the FCC.
415*/
416#define mk_mii_read(REG) (0x60020000 | (((REG) & 0x1f) << 18))
417#define mk_mii_write(REG, VAL) (0x50020000 | (((REG) & 0x1f) << 18) | \
418 ((VAL) & 0xffff))
419#define mk_mii_end 0
420#endif /* CONFIG_USE_MDIO */
421
422
423static int
424fcc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
425{
426 struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
427 volatile cbd_t *bdp;
428
429 /* Fill in a Tx ring entry */
430 bdp = cep->cur_tx;
431
432#ifndef final_version
433 if (!cep->tx_free || (bdp->cbd_sc & BD_ENET_TX_READY)) {
434 /* Ooops. All transmit buffers are full. Bail out.
435 * This should not happen, since the tx queue should be stopped.
436 */
437 printk("%s: tx queue full!.\n", dev->name);
438 return 1;
439 }
440#endif
441
442 /* Clear all of the status flags. */
443 bdp->cbd_sc &= ~BD_ENET_TX_STATS;
444
445 /* If the frame is short, tell CPM to pad it. */
446 if (skb->len <= ETH_ZLEN)
447 bdp->cbd_sc |= BD_ENET_TX_PAD;
448 else
449 bdp->cbd_sc &= ~BD_ENET_TX_PAD;
450
451 /* Set buffer length and buffer pointer. */
452 bdp->cbd_datlen = skb->len;
453 bdp->cbd_bufaddr = __pa(skb->data);
454
455 spin_lock_irq(&cep->lock);
456
457 /* Save skb pointer. */
458 cep->tx_skbuff[cep->skb_cur] = skb;
459
460 cep->stats.tx_bytes += skb->len;
461 cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
462
463 /* Send it on its way. Tell CPM its ready, interrupt when done,
464 * its the last BD of the frame, and to put the CRC on the end.
465 */
466 bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
467
468#if 0
469 /* Errata says don't do this. */
470 cep->fccp->fcc_ftodr = 0x8000;
471#endif
472 dev->trans_start = jiffies;
473
474 /* If this was the last BD in the ring, start at the beginning again. */
475 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
476 bdp = cep->tx_bd_base;
477 else
478 bdp++;
479
480 if (!--cep->tx_free)
481 netif_stop_queue(dev);
482
483 cep->cur_tx = (cbd_t *)bdp;
484
485 spin_unlock_irq(&cep->lock);
486
487 return 0;
488}
489
490
491static void
492fcc_enet_timeout(struct net_device *dev)
493{
494 struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
495
496 printk("%s: transmit timed out.\n", dev->name);
497 cep->stats.tx_errors++;
498#ifndef final_version
499 {
500 int i;
501 cbd_t *bdp;
502 printk(" Ring data dump: cur_tx %p tx_free %d cur_rx %p.\n",
503 cep->cur_tx, cep->tx_free,
504 cep->cur_rx);
505 bdp = cep->tx_bd_base;
506 printk(" Tx @base %p :\n", bdp);
507 for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
508 printk("%04x %04x %08x\n",
509 bdp->cbd_sc,
510 bdp->cbd_datlen,
511 bdp->cbd_bufaddr);
512 bdp = cep->rx_bd_base;
513 printk(" Rx @base %p :\n", bdp);
514 for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
515 printk("%04x %04x %08x\n",
516 bdp->cbd_sc,
517 bdp->cbd_datlen,
518 bdp->cbd_bufaddr);
519 }
520#endif
521 if (cep->tx_free)
522 netif_wake_queue(dev);
523}
524
525/* The interrupt handler. */
526static irqreturn_t
527fcc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs)
528{
529 struct net_device *dev = dev_id;
530 volatile struct fcc_enet_private *cep;
531 volatile cbd_t *bdp;
532 ushort int_events;
533 int must_restart;
534
535 cep = (struct fcc_enet_private *)dev->priv;
536
537 /* Get the interrupt events that caused us to be here.
538 */
539 int_events = cep->fccp->fcc_fcce;
540 cep->fccp->fcc_fcce = (int_events & cep->fccp->fcc_fccm);
541 must_restart = 0;
542
543#ifdef PHY_INTERRUPT
544 /* We have to be careful here to make sure that we aren't
545 * interrupted by a PHY interrupt.
546 */
547 disable_irq_nosync(PHY_INTERRUPT);
548#endif
549
550 /* Handle receive event in its own function.
551 */
552 if (int_events & FCC_ENET_RXF)
553 fcc_enet_rx(dev_id);
554
555 /* Check for a transmit error. The manual is a little unclear
556 * about this, so the debug code until I get it figured out. It
557 * appears that if TXE is set, then TXB is not set. However,
558 * if carrier sense is lost during frame transmission, the TXE
559 * bit is set, "and continues the buffer transmission normally."
560 * I don't know if "normally" implies TXB is set when the buffer
561 * descriptor is closed.....trial and error :-).
562 */
563
564 /* Transmit OK, or non-fatal error. Update the buffer descriptors.
565 */
566 if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) {
567 spin_lock(&cep->lock);
568 bdp = cep->dirty_tx;
569 while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
570 if (cep->tx_free == TX_RING_SIZE)
571 break;
572
573 if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */
574 cep->stats.tx_heartbeat_errors++;
575 if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */
576 cep->stats.tx_window_errors++;
577 if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */
578 cep->stats.tx_aborted_errors++;
579 if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */
580 cep->stats.tx_fifo_errors++;
581 if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
582 cep->stats.tx_carrier_errors++;
583
584
585 /* No heartbeat or Lost carrier are not really bad errors.
586 * The others require a restart transmit command.
587 */
588 if (bdp->cbd_sc &
589 (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
590 must_restart = 1;
591 cep->stats.tx_errors++;
592 }
593
594 cep->stats.tx_packets++;
595
596 /* Deferred means some collisions occurred during transmit,
597 * but we eventually sent the packet OK.
598 */
599 if (bdp->cbd_sc & BD_ENET_TX_DEF)
600 cep->stats.collisions++;
601
602 /* Free the sk buffer associated with this last transmit. */
603 dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]);
604 cep->tx_skbuff[cep->skb_dirty] = NULL;
605 cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
606
607 /* Update pointer to next buffer descriptor to be transmitted. */
608 if (bdp->cbd_sc & BD_ENET_TX_WRAP)
609 bdp = cep->tx_bd_base;
610 else
611 bdp++;
612
613 /* I don't know if we can be held off from processing these
614 * interrupts for more than one frame time. I really hope
615 * not. In such a case, we would now want to check the
616 * currently available BD (cur_tx) and determine if any
617 * buffers between the dirty_tx and cur_tx have also been
618 * sent. We would want to process anything in between that
619 * does not have BD_ENET_TX_READY set.
620 */
621
622 /* Since we have freed up a buffer, the ring is no longer
623 * full.
624 */
625 if (!cep->tx_free++) {
626 if (netif_queue_stopped(dev)) {
627 netif_wake_queue(dev);
628 }
629 }
630
631 cep->dirty_tx = (cbd_t *)bdp;
632 }
633
634 if (must_restart) {
635 volatile cpm_cpm2_t *cp;
636
637 /* Some transmit errors cause the transmitter to shut
638 * down. We now issue a restart transmit. Since the
639 * errors close the BD and update the pointers, the restart
640 * _should_ pick up without having to reset any of our
641 * pointers either. Also, To workaround 8260 device erratum
642 * CPM37, we must disable and then re-enable the transmitter
643 * following a Late Collision, Underrun, or Retry Limit error.
644 */
645 cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT;
646 udelay(10); /* wait a few microseconds just on principle */
647 cep->fccp->fcc_gfmr |= FCC_GFMR_ENT;
648
649 cp = cpmp;
650 cp->cp_cpcr =
651 mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock,
652 0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG;
653 while (cp->cp_cpcr & CPM_CR_FLG);
654 }
655 spin_unlock(&cep->lock);
656 }
657
658 /* Check for receive busy, i.e. packets coming but no place to
659 * put them.
660 */
661 if (int_events & FCC_ENET_BSY) {
662 cep->fccp->fcc_fcce = FCC_ENET_BSY;
663 cep->stats.rx_dropped++;
664 }
665
666#ifdef PHY_INTERRUPT
667 enable_irq(PHY_INTERRUPT);
668#endif
669 return IRQ_HANDLED;
670}
671
672/* During a receive, the cur_rx points to the current incoming buffer.
673 * When we update through the ring, if the next incoming buffer has
674 * not been given to the system, we just set the empty indicator,
675 * effectively tossing the packet.
676 */
677static int
678fcc_enet_rx(struct net_device *dev)
679{
680 struct fcc_enet_private *cep;
681 volatile cbd_t *bdp;
682 struct sk_buff *skb;
683 ushort pkt_len;
684
685 cep = (struct fcc_enet_private *)dev->priv;
686
687 /* First, grab all of the stats for the incoming packet.
688 * These get messed up if we get called due to a busy condition.
689 */
690 bdp = cep->cur_rx;
691
692for (;;) {
693 if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
694 break;
695
696#ifndef final_version
697 /* Since we have allocated space to hold a complete frame, both
698 * the first and last indicators should be set.
699 */
700 if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
701 (BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
702 printk("CPM ENET: rcv is not first+last\n");
703#endif
704
705 /* Frame too long or too short. */
706 if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
707 cep->stats.rx_length_errors++;
708 if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */
709 cep->stats.rx_frame_errors++;
710 if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */
711 cep->stats.rx_crc_errors++;
712 if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
713 cep->stats.rx_crc_errors++;
714 if (bdp->cbd_sc & BD_ENET_RX_CL) /* Late Collision */
715 cep->stats.rx_frame_errors++;
716
717 if (!(bdp->cbd_sc &
718 (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR
719 | BD_ENET_RX_OV | BD_ENET_RX_CL)))
720 {
721 /* Process the incoming frame. */
722 cep->stats.rx_packets++;
723
724 /* Remove the FCS from the packet length. */
725 pkt_len = bdp->cbd_datlen - 4;
726 cep->stats.rx_bytes += pkt_len;
727
728 /* This does 16 byte alignment, much more than we need. */
729 skb = dev_alloc_skb(pkt_len);
730
731 if (skb == NULL) {
732 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
733 cep->stats.rx_dropped++;
734 }
735 else {
736 skb->dev = dev;
737 skb_put(skb,pkt_len); /* Make room */
738 eth_copy_and_sum(skb,
739 (unsigned char *)__va(bdp->cbd_bufaddr),
740 pkt_len, 0);
741 skb->protocol=eth_type_trans(skb,dev);
742 netif_rx(skb);
743 }
744 }
745
746 /* Clear the status flags for this buffer. */
747 bdp->cbd_sc &= ~BD_ENET_RX_STATS;
748
749 /* Mark the buffer empty. */
750 bdp->cbd_sc |= BD_ENET_RX_EMPTY;
751
752 /* Update BD pointer to next entry. */
753 if (bdp->cbd_sc & BD_ENET_RX_WRAP)
754 bdp = cep->rx_bd_base;
755 else
756 bdp++;
757
758 }
759 cep->cur_rx = (cbd_t *)bdp;
760
761 return 0;
762}
763
764static int
765fcc_enet_close(struct net_device *dev)
766{
767#ifdef CONFIG_USE_MDIO
768 struct fcc_enet_private *fep = dev->priv;
769#endif
770
771 netif_stop_queue(dev);
772 fcc_stop(dev);
773#ifdef CONFIG_USE_MDIO
774 if (fep->phy)
775 mii_do_cmd(dev, fep->phy->shutdown);
776#endif
777
778 return 0;
779}
780
781static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev)
782{
783 struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
784
785 return &cep->stats;
786}
787
788#ifdef CONFIG_USE_MDIO
789
790/* NOTE: Most of the following comes from the FEC driver for 860. The
791 * overall structure of MII code has been retained (as it's proved stable
792 * and well-tested), but actual transfer requests are processed "at once"
793 * instead of being queued (there's no interrupt-driven MII transfer
794 * mechanism, one has to toggle the data/clock bits manually).
795 */
796static int
797mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
798{
799 struct fcc_enet_private *fep;
800 int retval, tmp;
801
802 /* Add PHY address to register command. */
803 fep = dev->priv;
804 regval |= fep->phy_addr << 23;
805
806 retval = 0;
807
808 tmp = mii_send_receive(fep->fip, regval);
809 if (func)
810 func(tmp, dev);
811
812 return retval;
813}
814
815static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
816{
817 int k;
818
819 if(!c)
820 return;
821
822 for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
823 mii_queue(dev, (c+k)->mii_data, (c+k)->funct);
824}
825
826static void mii_parse_sr(uint mii_reg, struct net_device *dev)
827{
828 volatile struct fcc_enet_private *fep = dev->priv;
829 uint s = fep->phy_status;
830
831 s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
832
833 if (mii_reg & BMSR_LSTATUS)
834 s |= PHY_STAT_LINK;
835 if (mii_reg & BMSR_RFAULT)
836 s |= PHY_STAT_FAULT;
837 if (mii_reg & BMSR_ANEGCOMPLETE)
838 s |= PHY_STAT_ANC;
839
840 fep->phy_status = s;
841}
842
843static void mii_parse_cr(uint mii_reg, struct net_device *dev)
844{
845 volatile struct fcc_enet_private *fep = dev->priv;
846 uint s = fep->phy_status;
847
848 s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
849
850 if (mii_reg & BMCR_ANENABLE)
851 s |= PHY_CONF_ANE;
852 if (mii_reg & BMCR_LOOPBACK)
853 s |= PHY_CONF_LOOP;
854
855 fep->phy_status = s;
856}
857
858static void mii_parse_anar(uint mii_reg, struct net_device *dev)
859{
860 volatile struct fcc_enet_private *fep = dev->priv;
861 uint s = fep->phy_status;
862
863 s &= ~(PHY_CONF_SPMASK);
864
865 if (mii_reg & ADVERTISE_10HALF)
866 s |= PHY_CONF_10HDX;
867 if (mii_reg & ADVERTISE_10FULL)
868 s |= PHY_CONF_10FDX;
869 if (mii_reg & ADVERTISE_100HALF)
870 s |= PHY_CONF_100HDX;
871 if (mii_reg & ADVERTISE_100FULL)
872 s |= PHY_CONF_100FDX;
873
874 fep->phy_status = s;
875}
876
877/* ------------------------------------------------------------------------- */
878/* Generic PHY support. Should work for all PHYs, but does not support link
879 * change interrupts.
880 */
881#ifdef CONFIG_FCC_GENERIC_PHY
882
883static phy_info_t phy_info_generic = {
884 0x00000000, /* 0-->match any PHY */
885 "GENERIC",
886
887 (const phy_cmd_t []) { /* config */
888 /* advertise only half-duplex capabilities */
889 { mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF),
890 mii_parse_anar },
891
892 /* enable auto-negotiation */
893 { mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr },
894 { mk_mii_end, }
895 },
896 (const phy_cmd_t []) { /* startup */
897 /* restart auto-negotiation */
898 { mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART),
899 NULL },
900 { mk_mii_end, }
901 },
902 (const phy_cmd_t []) { /* ack_int */
903 /* We don't actually use the ack_int table with a generic
904 * PHY, but putting a reference to mii_parse_sr here keeps
905 * us from getting a compiler warning about unused static
906 * functions in the case where we only compile in generic
907 * PHY support.
908 */
909 { mk_mii_read(MII_BMSR), mii_parse_sr },
910 { mk_mii_end, }
911 },
912 (const phy_cmd_t []) { /* shutdown */
913 { mk_mii_end, }
914 },
915};
916#endif /* ifdef CONFIG_FCC_GENERIC_PHY */
917
918/* ------------------------------------------------------------------------- */
919/* The Level one LXT970 is used by many boards */
920
921#ifdef CONFIG_FCC_LXT970
922
923#define MII_LXT970_MIRROR 16 /* Mirror register */
924#define MII_LXT970_IER 17 /* Interrupt Enable Register */
925#define MII_LXT970_ISR 18 /* Interrupt Status Register */
926#define MII_LXT970_CONFIG 19 /* Configuration Register */
927#define MII_LXT970_CSR 20 /* Chip Status Register */
928
929static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
930{
931 volatile struct fcc_enet_private *fep = dev->priv;
932 uint s = fep->phy_status;
933
934 s &= ~(PHY_STAT_SPMASK);
935
936 if (mii_reg & 0x0800) {
937 if (mii_reg & 0x1000)
938 s |= PHY_STAT_100FDX;
939 else
940 s |= PHY_STAT_100HDX;
941 } else {
942 if (mii_reg & 0x1000)
943 s |= PHY_STAT_10FDX;
944 else
945 s |= PHY_STAT_10HDX;
946 }
947
948 fep->phy_status = s;
949}
950
951static phy_info_t phy_info_lxt970 = {
952 0x07810000,
953 "LXT970",
954
955 (const phy_cmd_t []) { /* config */
956#if 0
957// { mk_mii_write(MII_ADVERTISE, 0x0021), NULL },
958
959 /* Set default operation of 100-TX....for some reason
960 * some of these bits are set on power up, which is wrong.
961 */
962 { mk_mii_write(MII_LXT970_CONFIG, 0), NULL },
963#endif
964 { mk_mii_read(MII_BMCR), mii_parse_cr },
965 { mk_mii_read(MII_ADVERTISE), mii_parse_anar },
966 { mk_mii_end, }
967 },
968 (const phy_cmd_t []) { /* startup - enable interrupts */
969 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
970 { mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */
971 { mk_mii_end, }
972 },
973 (const phy_cmd_t []) { /* ack_int */
974 /* read SR and ISR to acknowledge */
975
976 { mk_mii_read(MII_BMSR), mii_parse_sr },
977 { mk_mii_read(MII_LXT970_ISR), NULL },
978
979 /* find out the current status */
980
981 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
982 { mk_mii_end, }
983 },
984 (const phy_cmd_t []) { /* shutdown - disable interrupts */
985 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
986 { mk_mii_end, }
987 },
988};
989
990#endif /* CONFIG_FEC_LXT970 */
991
992/* ------------------------------------------------------------------------- */
993/* The Level one LXT971 is used on some of my custom boards */
994
995#ifdef CONFIG_FCC_LXT971
996
997/* register definitions for the 971 */
998
999#define MII_LXT971_PCR 16 /* Port Control Register */
1000#define MII_LXT971_SR2 17 /* Status Register 2 */
1001#define MII_LXT971_IER 18 /* Interrupt Enable Register */
1002#define MII_LXT971_ISR 19 /* Interrupt Status Register */
1003#define MII_LXT971_LCR 20 /* LED Control Register */
1004#define MII_LXT971_TCR 30 /* Transmit Control Register */
1005
1006/*
1007 * I had some nice ideas of running the MDIO faster...
1008 * The 971 should support 8MHz and I tried it, but things acted really
1009 * weird, so 2.5 MHz ought to be enough for anyone...
1010 */
1011
1012static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
1013{
1014 volatile struct fcc_enet_private *fep = dev->priv;
1015 uint s = fep->phy_status;
1016
1017 s &= ~(PHY_STAT_SPMASK);
1018
1019 if (mii_reg & 0x4000) {
1020 if (mii_reg & 0x0200)
1021 s |= PHY_STAT_100FDX;
1022 else
1023 s |= PHY_STAT_100HDX;
1024 } else {
1025 if (mii_reg & 0x0200)
1026 s |= PHY_STAT_10FDX;
1027 else
1028 s |= PHY_STAT_10HDX;
1029 }
1030 if (mii_reg & 0x0008)
1031 s |= PHY_STAT_FAULT;
1032
1033 fep->phy_status = s;
1034}
1035
1036static phy_info_t phy_info_lxt971 = {
1037 0x0001378e,
1038 "LXT971",
1039
1040 (const phy_cmd_t []) { /* config */
1041 /* configure link capabilities to advertise */
1042 { mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_DEFAULT),
1043 mii_parse_anar },
1044
1045 /* enable auto-negotiation */
1046 { mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr },
1047 { mk_mii_end, }
1048 },
1049 (const phy_cmd_t []) { /* startup - enable interrupts */
1050 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
1051
1052 /* restart auto-negotiation */
1053 { mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART),
1054 NULL },
1055 { mk_mii_end, }
1056 },
1057 (const phy_cmd_t []) { /* ack_int */
1058 /* find out the current status */
1059 { mk_mii_read(MII_BMSR), NULL },
1060 { mk_mii_read(MII_BMSR), mii_parse_sr },
1061 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
1062
1063 /* we only need to read ISR to acknowledge */
1064 { mk_mii_read(MII_LXT971_ISR), NULL },
1065 { mk_mii_end, }
1066 },
1067 (const phy_cmd_t []) { /* shutdown - disable interrupts */
1068 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
1069 { mk_mii_end, }
1070 },
1071};
1072
1073#endif /* CONFIG_FCC_LXT971 */
1074
1075/* ------------------------------------------------------------------------- */
1076/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
1077
1078#ifdef CONFIG_FCC_QS6612
1079
1080/* register definitions */
1081
1082#define MII_QS6612_MCR 17 /* Mode Control Register */
1083#define MII_QS6612_FTR 27 /* Factory Test Register */
1084#define MII_QS6612_MCO 28 /* Misc. Control Register */
1085#define MII_QS6612_ISR 29 /* Interrupt Source Register */
1086#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
1087#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
1088
1089static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
1090{
1091 volatile struct fcc_enet_private *fep = dev->priv;
1092 uint s = fep->phy_status;
1093
1094 s &= ~(PHY_STAT_SPMASK);
1095
1096 switch((mii_reg >> 2) & 7) {
1097 case 1: s |= PHY_STAT_10HDX; break;
1098 case 2: s |= PHY_STAT_100HDX; break;
1099 case 5: s |= PHY_STAT_10FDX; break;
1100 case 6: s |= PHY_STAT_100FDX; break;
1101 }
1102
1103 fep->phy_status = s;
1104}
1105
1106static phy_info_t phy_info_qs6612 = {
1107 0x00181440,
1108 "QS6612",
1109
1110 (const phy_cmd_t []) { /* config */
1111// { mk_mii_write(MII_ADVERTISE, 0x061), NULL }, /* 10 Mbps */
1112
1113 /* The PHY powers up isolated on the RPX,
1114 * so send a command to allow operation.
1115 */
1116
1117 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
1118
1119 /* parse cr and anar to get some info */
1120
1121 { mk_mii_read(MII_BMCR), mii_parse_cr },
1122 { mk_mii_read(MII_ADVERTISE), mii_parse_anar },
1123 { mk_mii_end, }
1124 },
1125 (const phy_cmd_t []) { /* startup - enable interrupts */
1126 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
1127 { mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */
1128 { mk_mii_end, }
1129 },
1130 (const phy_cmd_t []) { /* ack_int */
1131
1132 /* we need to read ISR, SR and ANER to acknowledge */
1133
1134 { mk_mii_read(MII_QS6612_ISR), NULL },
1135 { mk_mii_read(MII_BMSR), mii_parse_sr },
1136 { mk_mii_read(MII_EXPANSION), NULL },
1137
1138 /* read pcr to get info */
1139
1140 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
1141 { mk_mii_end, }
1142 },
1143 (const phy_cmd_t []) { /* shutdown - disable interrupts */
1144 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
1145 { mk_mii_end, }
1146 },
1147};
1148
1149
1150#endif /* CONFIG_FEC_QS6612 */
1151
1152
1153/* ------------------------------------------------------------------------- */
1154/* The Davicom DM9131 is used on the HYMOD board */
1155
1156#ifdef CONFIG_FCC_DM9131
1157
1158/* register definitions */
1159
1160#define MII_DM9131_ACR 16 /* Aux. Config Register */
1161#define MII_DM9131_ACSR 17 /* Aux. Config/Status Register */
1162#define MII_DM9131_10TCSR 18 /* 10BaseT Config/Status Reg. */
1163#define MII_DM9131_INTR 21 /* Interrupt Register */
1164#define MII_DM9131_RECR 22 /* Receive Error Counter Reg. */
1165#define MII_DM9131_DISCR 23 /* Disconnect Counter Register */
1166
1167static void mii_parse_dm9131_acsr(uint mii_reg, struct net_device *dev)
1168{
1169 volatile struct fcc_enet_private *fep = dev->priv;
1170 uint s = fep->phy_status;
1171
1172 s &= ~(PHY_STAT_SPMASK);
1173
1174 switch ((mii_reg >> 12) & 0xf) {
1175 case 1: s |= PHY_STAT_10HDX; break;
1176 case 2: s |= PHY_STAT_10FDX; break;
1177 case 4: s |= PHY_STAT_100HDX; break;
1178 case 8: s |= PHY_STAT_100FDX; break;
1179 }
1180
1181 fep->phy_status = s;
1182}
1183
1184static phy_info_t phy_info_dm9131 = {
1185 0x00181b80,
1186 "DM9131",
1187
1188 (const phy_cmd_t []) { /* config */
1189 /* parse cr and anar to get some info */
1190 { mk_mii_read(MII_BMCR), mii_parse_cr },
1191 { mk_mii_read(MII_ADVERTISE), mii_parse_anar },
1192 { mk_mii_end, }
1193 },
1194 (const phy_cmd_t []) { /* startup - enable interrupts */
1195 { mk_mii_write(MII_DM9131_INTR, 0x0002), NULL },
1196 { mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */
1197 { mk_mii_end, }
1198 },
1199 (const phy_cmd_t []) { /* ack_int */
1200
1201 /* we need to read INTR, SR and ANER to acknowledge */
1202
1203 { mk_mii_read(MII_DM9131_INTR), NULL },
1204 { mk_mii_read(MII_BMSR), mii_parse_sr },
1205 { mk_mii_read(MII_EXPANSION), NULL },
1206
1207 /* read acsr to get info */
1208
1209 { mk_mii_read(MII_DM9131_ACSR), mii_parse_dm9131_acsr },
1210 { mk_mii_end, }
1211 },
1212 (const phy_cmd_t []) { /* shutdown - disable interrupts */
1213 { mk_mii_write(MII_DM9131_INTR, 0x0f00), NULL },
1214 { mk_mii_end, }
1215 },
1216};
1217
1218
1219#endif /* CONFIG_FEC_DM9131 */
1220#ifdef CONFIG_FCC_DM9161
1221/* ------------------------------------------------------------------------- */
1222/* DM9161 Control register values */
1223#define MIIM_DM9161_CR_STOP 0x0400
1224#define MIIM_DM9161_CR_RSTAN 0x1200
1225
1226#define MIIM_DM9161_SCR 0x10
1227#define MIIM_DM9161_SCR_INIT 0x0610
1228
1229/* DM9161 Specified Configuration and Status Register */
1230#define MIIM_DM9161_SCSR 0x11
1231#define MIIM_DM9161_SCSR_100F 0x8000
1232#define MIIM_DM9161_SCSR_100H 0x4000
1233#define MIIM_DM9161_SCSR_10F 0x2000
1234#define MIIM_DM9161_SCSR_10H 0x1000
1235/* DM9161 10BT register */
1236#define MIIM_DM9161_10BTCSR 0x12
1237#define MIIM_DM9161_10BTCSR_INIT 0x7800
1238/* DM9161 Interrupt Register */
1239#define MIIM_DM9161_INTR 0x15
1240#define MIIM_DM9161_INTR_PEND 0x8000
1241#define MIIM_DM9161_INTR_DPLX_MASK 0x0800
1242#define MIIM_DM9161_INTR_SPD_MASK 0x0400
1243#define MIIM_DM9161_INTR_LINK_MASK 0x0200
1244#define MIIM_DM9161_INTR_MASK 0x0100
1245#define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010
1246#define MIIM_DM9161_INTR_SPD_CHANGE 0x0008
1247#define MIIM_DM9161_INTR_LINK_CHANGE 0x0004
1248#define MIIM_DM9161_INTR_INIT 0x0000
1249#define MIIM_DM9161_INTR_STOP \
1250(MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \
1251 | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK)
1252
1253static void mii_parse_dm9161_sr(uint mii_reg, struct net_device * dev)
1254{
1255 volatile struct fcc_enet_private *fep = dev->priv;
1256 uint regstat, timeout=0xffff;
1257
1258 while(!(mii_reg & 0x0020) && timeout--)
1259 {
1260 regstat=mk_mii_read(MII_BMSR);
1261 regstat |= fep->phy_addr <<23;
1262 mii_reg = mii_send_receive(fep->fip,regstat);
1263 }
1264
1265 mii_parse_sr(mii_reg, dev);
1266}
1267
1268static void mii_parse_dm9161_scsr(uint mii_reg, struct net_device * dev)
1269{
1270 volatile struct fcc_enet_private *fep = dev->priv;
1271 uint s = fep->phy_status;
1272
1273 s &= ~(PHY_STAT_SPMASK);
1274 switch((mii_reg >>12) & 0xf) {
1275 case 1:
1276 {
1277 s |= PHY_STAT_10HDX;
1278 printk("10BaseT Half Duplex\n");
1279 break;
1280 }
1281 case 2:
1282 {
1283 s |= PHY_STAT_10FDX;
1284 printk("10BaseT Full Duplex\n");
1285 break;
1286 }
1287 case 4:
1288 {
1289 s |= PHY_STAT_100HDX;
1290 printk("100BaseT Half Duplex\n");
1291 break;
1292 }
1293 case 8:
1294 {
1295 s |= PHY_STAT_100FDX;
1296 printk("100BaseT Full Duplex\n");
1297 break;
1298 }
1299 }
1300
1301 fep->phy_status = s;
1302
1303}
1304
1305static void mii_dm9161_wait(uint mii_reg, struct net_device *dev)
1306{
1307 int timeout = HZ;
1308
1309 /* Davicom takes a bit to come up after a reset,
1310 * so wait here for a bit */
Nishanth Aravamudan8f09f4a2005-11-07 01:01:13 -08001311 schedule_timeout_uninterruptible(timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312}
1313
1314static phy_info_t phy_info_dm9161 = {
1315 0x00181b88,
1316 "Davicom DM9161E",
1317 (const phy_cmd_t[]) { /* config */
1318 { mk_mii_write(MII_BMCR, MIIM_DM9161_CR_STOP), NULL},
1319 /* Do not bypass the scrambler/descrambler */
1320 { mk_mii_write(MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT), NULL},
1321 /* Configure 10BTCSR register */
1322 { mk_mii_write(MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT),NULL},
1323 /* Configure some basic stuff */
1324 { mk_mii_write(MII_BMCR, 0x1000), NULL},
1325 { mk_mii_read(MII_BMCR), mii_parse_cr },
1326 { mk_mii_read(MII_ADVERTISE), mii_parse_anar },
1327 { mk_mii_end,}
1328 },
1329 (const phy_cmd_t[]) { /* startup */
1330 /* Restart Auto Negotiation */
1331 { mk_mii_write(MII_BMCR, MIIM_DM9161_CR_RSTAN), NULL},
1332 /* Status is read once to clear old link state */
1333 { mk_mii_read(MII_BMSR), mii_dm9161_wait},
1334 /* Auto-negotiate */
1335 { mk_mii_read(MII_BMSR), mii_parse_dm9161_sr},
1336 /* Read the status */
1337 { mk_mii_read(MIIM_DM9161_SCSR), mii_parse_dm9161_scsr},
1338 /* Clear any pending interrupts */
1339 { mk_mii_read(MIIM_DM9161_INTR), NULL},
1340 /* Enable Interrupts */
1341 { mk_mii_write(MIIM_DM9161_INTR, MIIM_DM9161_INTR_INIT), NULL},
1342 { mk_mii_end,}
1343 },
1344 (const phy_cmd_t[]) { /* ack_int */
1345 { mk_mii_read(MIIM_DM9161_INTR), NULL},
1346#if 0
1347 { mk_mii_read(MII_BMSR), NULL},
1348 { mk_mii_read(MII_BMSR), mii_parse_dm9161_sr},
1349 { mk_mii_read(MIIM_DM9161_SCSR), mii_parse_dm9161_scsr},
1350#endif
1351 { mk_mii_end,}
1352 },
1353 (const phy_cmd_t[]) { /* shutdown */
1354 { mk_mii_read(MIIM_DM9161_INTR),NULL},
1355 { mk_mii_write(MIIM_DM9161_INTR, MIIM_DM9161_INTR_STOP), NULL},
1356 { mk_mii_end,}
1357 },
1358};
1359#endif /* CONFIG_FCC_DM9161 */
1360
1361static phy_info_t *phy_info[] = {
1362
1363#ifdef CONFIG_FCC_LXT970
1364 &phy_info_lxt970,
1365#endif /* CONFIG_FEC_LXT970 */
1366
1367#ifdef CONFIG_FCC_LXT971
1368 &phy_info_lxt971,
1369#endif /* CONFIG_FEC_LXT971 */
1370
1371#ifdef CONFIG_FCC_QS6612
1372 &phy_info_qs6612,
1373#endif /* CONFIG_FEC_QS6612 */
1374
1375#ifdef CONFIG_FCC_DM9131
1376 &phy_info_dm9131,
1377#endif /* CONFIG_FEC_DM9131 */
1378
1379#ifdef CONFIG_FCC_DM9161
1380 &phy_info_dm9161,
1381#endif /* CONFIG_FCC_DM9161 */
1382
1383#ifdef CONFIG_FCC_GENERIC_PHY
1384 /* Generic PHY support. This must be the last PHY in the table.
1385 * It will be used to support any PHY that doesn't match a previous
1386 * entry in the table.
1387 */
1388 &phy_info_generic,
1389#endif /* CONFIG_FCC_GENERIC_PHY */
1390
1391 NULL
1392};
1393
1394static void mii_display_status(void *data)
1395{
1396 struct net_device *dev = data;
1397 volatile struct fcc_enet_private *fep = dev->priv;
1398 uint s = fep->phy_status;
1399
1400 if (!fep->link && !fep->old_link) {
1401 /* Link is still down - don't print anything */
1402 return;
1403 }
1404
1405 printk("%s: status: ", dev->name);
1406
1407 if (!fep->link) {
1408 printk("link down");
1409 } else {
1410 printk("link up");
1411
1412 switch(s & PHY_STAT_SPMASK) {
1413 case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
1414 case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
1415 case PHY_STAT_10FDX: printk(", 10 Mbps Full Duplex"); break;
1416 case PHY_STAT_10HDX: printk(", 10 Mbps Half Duplex"); break;
1417 default:
1418 printk(", Unknown speed/duplex");
1419 }
1420
1421 if (s & PHY_STAT_ANC)
1422 printk(", auto-negotiation complete");
1423 }
1424
1425 if (s & PHY_STAT_FAULT)
1426 printk(", remote fault");
1427
1428 printk(".\n");
1429}
1430
1431static void mii_display_config(void *data)
1432{
1433 struct net_device *dev = data;
1434 volatile struct fcc_enet_private *fep = dev->priv;
1435 uint s = fep->phy_status;
1436
1437 printk("%s: config: auto-negotiation ", dev->name);
1438
1439 if (s & PHY_CONF_ANE)
1440 printk("on");
1441 else
1442 printk("off");
1443
1444 if (s & PHY_CONF_100FDX)
1445 printk(", 100FDX");
1446 if (s & PHY_CONF_100HDX)
1447 printk(", 100HDX");
1448 if (s & PHY_CONF_10FDX)
1449 printk(", 10FDX");
1450 if (s & PHY_CONF_10HDX)
1451 printk(", 10HDX");
1452 if (!(s & PHY_CONF_SPMASK))
1453 printk(", No speed/duplex selected?");
1454
1455 if (s & PHY_CONF_LOOP)
1456 printk(", loopback enabled");
1457
1458 printk(".\n");
1459
1460 fep->sequence_done = 1;
1461}
1462
1463static void mii_relink(struct net_device *dev)
1464{
1465 struct fcc_enet_private *fep = dev->priv;
1466 int duplex = 0;
1467
1468 fep->old_link = fep->link;
1469 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1470
1471#ifdef MDIO_DEBUG
1472 printk(" mii_relink: link=%d\n", fep->link);
1473#endif
1474
1475 if (fep->link) {
1476 if (fep->phy_status
1477 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1478 duplex = 1;
1479 fcc_restart(dev, duplex);
1480#ifdef MDIO_DEBUG
1481 printk(" mii_relink: duplex=%d\n", duplex);
1482#endif
1483 }
1484}
1485
1486static void mii_queue_relink(uint mii_reg, struct net_device *dev)
1487{
1488 struct fcc_enet_private *fep = dev->priv;
1489
1490 mii_relink(dev);
1491
1492 schedule_work(&fep->phy_relink);
1493}
1494
1495static void mii_queue_config(uint mii_reg, struct net_device *dev)
1496{
1497 struct fcc_enet_private *fep = dev->priv;
1498
1499 schedule_work(&fep->phy_display_config);
1500}
1501
1502phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_BMCR), mii_queue_relink },
1503 { mk_mii_end, } };
1504phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_BMCR), mii_queue_config },
1505 { mk_mii_end, } };
1506
1507
1508/* Read remainder of PHY ID.
1509*/
1510static void
1511mii_discover_phy3(uint mii_reg, struct net_device *dev)
1512{
1513 struct fcc_enet_private *fep;
1514 int i;
1515
1516 fep = dev->priv;
1517 printk("mii_reg: %08x\n", mii_reg);
1518 fep->phy_id |= (mii_reg & 0xffff);
1519
1520 for(i = 0; phy_info[i]; i++)
1521 if((phy_info[i]->id == (fep->phy_id >> 4)) || !phy_info[i]->id)
1522 break;
1523
1524 if(!phy_info[i])
1525 panic("%s: PHY id 0x%08x is not supported!\n",
1526 dev->name, fep->phy_id);
1527
1528 fep->phy = phy_info[i];
1529 fep->phy_id_done = 1;
1530
1531 printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
1532 dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
1533}
1534
1535/* Scan all of the MII PHY addresses looking for someone to respond
1536 * with a valid ID. This usually happens quickly.
1537 */
1538static void
1539mii_discover_phy(uint mii_reg, struct net_device *dev)
1540{
1541 struct fcc_enet_private *fep;
1542 uint phytype;
1543
1544 fep = dev->priv;
1545
1546 if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
1547
1548 /* Got first part of ID, now get remainder. */
1549 fep->phy_id = phytype << 16;
1550 mii_queue(dev, mk_mii_read(MII_PHYSID2), mii_discover_phy3);
1551 } else {
1552 fep->phy_addr++;
1553 if (fep->phy_addr < 32) {
1554 mii_queue(dev, mk_mii_read(MII_PHYSID1),
1555 mii_discover_phy);
1556 } else {
1557 printk("fec: No PHY device found.\n");
1558 }
1559 }
1560}
1561#endif /* CONFIG_USE_MDIO */
1562
1563#ifdef PHY_INTERRUPT
1564/* This interrupt occurs when the PHY detects a link change. */
1565static irqreturn_t
1566mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs)
1567{
1568 struct net_device *dev = dev_id;
1569 struct fcc_enet_private *fep = dev->priv;
1570 fcc_info_t *fip = fep->fip;
1571
1572 if (fep->phy) {
1573 /* We don't want to be interrupted by an FCC
1574 * interrupt here.
1575 */
1576 disable_irq_nosync(fip->fc_interrupt);
1577
1578 mii_do_cmd(dev, fep->phy->ack_int);
1579 /* restart and display status */
1580 mii_do_cmd(dev, phy_cmd_relink);
1581
1582 enable_irq(fip->fc_interrupt);
1583 }
1584 return IRQ_HANDLED;
1585}
1586#endif /* ifdef PHY_INTERRUPT */
1587
1588#if 0 /* This should be fixed someday */
1589/* Set or clear the multicast filter for this adaptor.
1590 * Skeleton taken from sunlance driver.
1591 * The CPM Ethernet implementation allows Multicast as well as individual
1592 * MAC address filtering. Some of the drivers check to make sure it is
1593 * a group multicast address, and discard those that are not. I guess I
1594 * will do the same for now, but just remove the test if you want
1595 * individual filtering as well (do the upper net layers want or support
1596 * this kind of feature?).
1597 */
1598static void
1599set_multicast_list(struct net_device *dev)
1600{
1601 struct fcc_enet_private *cep;
1602 struct dev_mc_list *dmi;
1603 u_char *mcptr, *tdptr;
1604 volatile fcc_enet_t *ep;
1605 int i, j;
1606
1607 cep = (struct fcc_enet_private *)dev->priv;
1608
1609return;
1610 /* Get pointer to FCC area in parameter RAM.
1611 */
1612 ep = (fcc_enet_t *)dev->base_addr;
1613
1614 if (dev->flags&IFF_PROMISC) {
1615
1616 /* Log any net taps. */
1617 printk("%s: Promiscuous mode enabled.\n", dev->name);
1618 cep->fccp->fcc_fpsmr |= FCC_PSMR_PRO;
1619 } else {
1620
1621 cep->fccp->fcc_fpsmr &= ~FCC_PSMR_PRO;
1622
1623 if (dev->flags & IFF_ALLMULTI) {
1624 /* Catch all multicast addresses, so set the
1625 * filter to all 1's.
1626 */
1627 ep->fen_gaddrh = 0xffffffff;
1628 ep->fen_gaddrl = 0xffffffff;
1629 }
1630 else {
1631 /* Clear filter and add the addresses in the list.
1632 */
1633 ep->fen_gaddrh = 0;
1634 ep->fen_gaddrl = 0;
1635
1636 dmi = dev->mc_list;
1637
1638 for (i=0; i<dev->mc_count; i++, dmi = dmi->next) {
1639
1640 /* Only support group multicast for now.
1641 */
1642 if (!(dmi->dmi_addr[0] & 1))
1643 continue;
1644
1645 /* The address in dmi_addr is LSB first,
1646 * and taddr is MSB first. We have to
1647 * copy bytes MSB first from dmi_addr.
1648 */
1649 mcptr = (u_char *)dmi->dmi_addr + 5;
1650 tdptr = (u_char *)&ep->fen_taddrh;
1651 for (j=0; j<6; j++)
1652 *tdptr++ = *mcptr--;
1653
1654 /* Ask CPM to run CRC and set bit in
1655 * filter mask.
1656 */
1657 cpmp->cp_cpcr = mk_cr_cmd(cep->fip->fc_cpmpage,
1658 cep->fip->fc_cpmblock, 0x0c,
1659 CPM_CR_SET_GADDR) | CPM_CR_FLG;
1660 udelay(10);
1661 while (cpmp->cp_cpcr & CPM_CR_FLG);
1662 }
1663 }
1664 }
1665}
1666#endif /* if 0 */
1667
1668
1669/* Set the individual MAC address.
1670 */
1671int fcc_enet_set_mac_address(struct net_device *dev, void *p)
1672{
1673 struct sockaddr *addr= (struct sockaddr *) p;
1674 struct fcc_enet_private *cep;
1675 volatile fcc_enet_t *ep;
1676 unsigned char *eap;
1677 int i;
1678
1679 cep = (struct fcc_enet_private *)(dev->priv);
1680 ep = cep->ep;
1681
1682 if (netif_running(dev))
1683 return -EBUSY;
1684
1685 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1686
1687 eap = (unsigned char *) &(ep->fen_paddrh);
1688 for (i=5; i>=0; i--)
1689 *eap++ = addr->sa_data[i];
1690
1691 return 0;
1692}
1693
1694
1695/* Initialize the CPM Ethernet on FCC.
1696 */
1697static int __init fec_enet_init(void)
1698{
1699 struct net_device *dev;
1700 struct fcc_enet_private *cep;
1701 fcc_info_t *fip;
1702 int i, np, err;
1703 volatile cpm2_map_t *immap;
1704 volatile iop_cpm2_t *io;
1705
1706 immap = (cpm2_map_t *)CPM_MAP_ADDR; /* and to internal registers */
1707 io = &immap->im_ioport;
1708
1709 np = sizeof(fcc_ports) / sizeof(fcc_info_t);
1710 fip = fcc_ports;
1711
1712 while (np-- > 0) {
1713 /* Create an Ethernet device instance.
1714 */
1715 dev = alloc_etherdev(sizeof(*cep));
1716 if (!dev)
1717 return -ENOMEM;
1718
1719 cep = dev->priv;
1720 spin_lock_init(&cep->lock);
1721 cep->fip = fip;
1722
1723 init_fcc_shutdown(fip, cep, immap);
1724 init_fcc_ioports(fip, io, immap);
1725 init_fcc_param(fip, dev, immap);
1726
1727 dev->base_addr = (unsigned long)(cep->ep);
1728
1729 /* The CPM Ethernet specific entries in the device
1730 * structure.
1731 */
1732 dev->open = fcc_enet_open;
1733 dev->hard_start_xmit = fcc_enet_start_xmit;
1734 dev->tx_timeout = fcc_enet_timeout;
1735 dev->watchdog_timeo = TX_TIMEOUT;
1736 dev->stop = fcc_enet_close;
1737 dev->get_stats = fcc_enet_get_stats;
1738 /* dev->set_multicast_list = set_multicast_list; */
1739 dev->set_mac_address = fcc_enet_set_mac_address;
1740
1741 init_fcc_startup(fip, dev);
1742
1743 err = register_netdev(dev);
1744 if (err) {
1745 free_netdev(dev);
1746 return err;
1747 }
1748
1749 printk("%s: FCC ENET Version 0.3, ", dev->name);
1750 for (i=0; i<5; i++)
1751 printk("%02x:", dev->dev_addr[i]);
1752 printk("%02x\n", dev->dev_addr[5]);
1753
1754#ifdef CONFIG_USE_MDIO
1755 /* Queue up command to detect the PHY and initialize the
1756 * remainder of the interface.
1757 */
1758 cep->phy_id_done = 0;
1759 cep->phy_addr = fip->fc_phyaddr;
1760 mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy);
1761 INIT_WORK(&cep->phy_relink, mii_display_status, dev);
1762 INIT_WORK(&cep->phy_display_config, mii_display_config, dev);
1763#endif /* CONFIG_USE_MDIO */
1764
1765 fip++;
1766 }
1767
1768 return 0;
1769}
1770module_init(fec_enet_init);
1771
1772/* Make sure the device is shut down during initialization.
1773*/
1774static void __init
1775init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
1776 volatile cpm2_map_t *immap)
1777{
1778 volatile fcc_enet_t *ep;
1779 volatile fcc_t *fccp;
1780
1781 /* Get pointer to FCC area in parameter RAM.
1782 */
1783 ep = (fcc_enet_t *)(&immap->im_dprambase[fip->fc_proff]);
1784
1785 /* And another to the FCC register area.
1786 */
1787 fccp = (volatile fcc_t *)(&immap->im_fcc[fip->fc_fccnum]);
1788 cep->fccp = fccp; /* Keep the pointers handy */
1789 cep->ep = ep;
1790
1791 /* Disable receive and transmit in case someone left it running.
1792 */
1793 fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
1794}
1795
1796/* Initialize the I/O pins for the FCC Ethernet.
1797*/
1798static void __init
1799init_fcc_ioports(fcc_info_t *fip, volatile iop_cpm2_t *io,
1800 volatile cpm2_map_t *immap)
1801{
1802
1803 /* FCC1 pins are on port A/C. FCC2/3 are port B/C.
1804 */
1805 if (fip->fc_proff == PROFF_FCC1) {
1806 /* Configure port A and C pins for FCC1 Ethernet.
1807 */
1808 io->iop_pdira &= ~PA1_DIRA_BOUT;
1809 io->iop_pdira |= PA1_DIRA_BIN;
1810 io->iop_psora &= ~PA1_PSORA_BOUT;
1811 io->iop_psora |= PA1_PSORA_BIN;
1812 io->iop_ppara |= (PA1_DIRA_BOUT | PA1_DIRA_BIN);
1813 }
1814 if (fip->fc_proff == PROFF_FCC2) {
1815 /* Configure port B and C pins for FCC Ethernet.
1816 */
1817 io->iop_pdirb &= ~PB2_DIRB_BOUT;
1818 io->iop_pdirb |= PB2_DIRB_BIN;
1819 io->iop_psorb &= ~PB2_PSORB_BOUT;
1820 io->iop_psorb |= PB2_PSORB_BIN;
1821 io->iop_pparb |= (PB2_DIRB_BOUT | PB2_DIRB_BIN);
1822 }
1823 if (fip->fc_proff == PROFF_FCC3) {
1824 /* Configure port B and C pins for FCC Ethernet.
1825 */
1826 io->iop_pdirb &= ~PB3_DIRB_BOUT;
1827 io->iop_pdirb |= PB3_DIRB_BIN;
1828 io->iop_psorb &= ~PB3_PSORB_BOUT;
1829 io->iop_psorb |= PB3_PSORB_BIN;
1830 io->iop_pparb |= (PB3_DIRB_BOUT | PB3_DIRB_BIN);
1831
1832 io->iop_pdirc &= ~PC3_DIRC_BOUT;
1833 io->iop_pdirc |= PC3_DIRC_BIN;
1834 io->iop_psorc &= ~PC3_PSORC_BOUT;
1835 io->iop_psorc |= PC3_PSORC_BIN;
1836 io->iop_pparc |= (PC3_DIRC_BOUT | PC3_DIRC_BIN);
1837
1838 }
1839
1840 /* Port C has clocks......
1841 */
1842 io->iop_psorc &= ~(fip->fc_trxclocks);
1843 io->iop_pdirc &= ~(fip->fc_trxclocks);
1844 io->iop_pparc |= fip->fc_trxclocks;
1845
1846#ifdef CONFIG_USE_MDIO
1847 /* ....and the MII serial clock/data.
1848 */
1849 io->iop_pdatc |= (fip->fc_mdio | fip->fc_mdck);
1850 io->iop_podrc &= ~(fip->fc_mdio | fip->fc_mdck);
1851 io->iop_pdirc |= (fip->fc_mdio | fip->fc_mdck);
1852 io->iop_pparc &= ~(fip->fc_mdio | fip->fc_mdck);
1853#endif /* CONFIG_USE_MDIO */
1854
1855 /* Configure Serial Interface clock routing.
1856 * First, clear all FCC bits to zero,
1857 * then set the ones we want.
1858 */
1859 immap->im_cpmux.cmx_fcr &= ~(fip->fc_clockmask);
1860 immap->im_cpmux.cmx_fcr |= fip->fc_clockroute;
1861}
1862
1863static void __init
1864init_fcc_param(fcc_info_t *fip, struct net_device *dev,
1865 volatile cpm2_map_t *immap)
1866{
1867 unsigned char *eap;
1868 unsigned long mem_addr;
1869 bd_t *bd;
1870 int i, j;
1871 struct fcc_enet_private *cep;
1872 volatile fcc_enet_t *ep;
1873 volatile cbd_t *bdp;
1874 volatile cpm_cpm2_t *cp;
1875
1876 cep = (struct fcc_enet_private *)(dev->priv);
1877 ep = cep->ep;
1878 cp = cpmp;
1879
1880 bd = (bd_t *)__res;
1881
1882 /* Zero the whole thing.....I must have missed some individually.
1883 * It works when I do this.
1884 */
1885 memset((char *)ep, 0, sizeof(fcc_enet_t));
1886
1887 /* Allocate space for the buffer descriptors from regular memory.
1888 * Initialize base addresses for the buffer descriptors.
1889 */
1890 cep->rx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * RX_RING_SIZE,
1891 GFP_KERNEL | GFP_DMA);
1892 ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base);
1893 cep->tx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * TX_RING_SIZE,
1894 GFP_KERNEL | GFP_DMA);
1895 ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base);
1896
1897 cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
1898 cep->cur_rx = cep->rx_bd_base;
1899
1900 ep->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
1901 ep->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
1902
1903 /* Set maximum bytes per receive buffer.
1904 * It must be a multiple of 32.
1905 */
1906 ep->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE;
1907
1908 /* Allocate space in the reserved FCC area of DPRAM for the
1909 * internal buffers. No one uses this space (yet), so we
1910 * can do this. Later, we will add resource management for
1911 * this area.
1912 */
1913 mem_addr = CPM_FCC_SPECIAL_BASE + (fip->fc_fccnum * 128);
1914 ep->fen_genfcc.fcc_riptr = mem_addr;
1915 ep->fen_genfcc.fcc_tiptr = mem_addr+32;
1916 ep->fen_padptr = mem_addr+64;
1917 memset((char *)(&(immap->im_dprambase[(mem_addr+64)])), 0x88, 32);
1918
1919 ep->fen_genfcc.fcc_rbptr = 0;
1920 ep->fen_genfcc.fcc_tbptr = 0;
1921 ep->fen_genfcc.fcc_rcrc = 0;
1922 ep->fen_genfcc.fcc_tcrc = 0;
1923 ep->fen_genfcc.fcc_res1 = 0;
1924 ep->fen_genfcc.fcc_res2 = 0;
1925
1926 ep->fen_camptr = 0; /* CAM isn't used in this driver */
1927
1928 /* Set CRC preset and mask.
1929 */
1930 ep->fen_cmask = 0xdebb20e3;
1931 ep->fen_cpres = 0xffffffff;
1932
1933 ep->fen_crcec = 0; /* CRC Error counter */
1934 ep->fen_alec = 0; /* alignment error counter */
1935 ep->fen_disfc = 0; /* discard frame counter */
1936 ep->fen_retlim = 15; /* Retry limit threshold */
1937 ep->fen_pper = 0; /* Normal persistence */
1938
1939 /* Clear hash filter tables.
1940 */
1941 ep->fen_gaddrh = 0;
1942 ep->fen_gaddrl = 0;
1943 ep->fen_iaddrh = 0;
1944 ep->fen_iaddrl = 0;
1945
1946 /* Clear the Out-of-sequence TxBD.
1947 */
1948 ep->fen_tfcstat = 0;
1949 ep->fen_tfclen = 0;
1950 ep->fen_tfcptr = 0;
1951
1952 ep->fen_mflr = PKT_MAXBUF_SIZE; /* maximum frame length register */
1953 ep->fen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */
1954
1955 /* Set Ethernet station address.
1956 *
1957 * This is supplied in the board information structure, so we
1958 * copy that into the controller.
1959 * So, far we have only been given one Ethernet address. We make
1960 * it unique by setting a few bits in the upper byte of the
1961 * non-static part of the address.
1962 */
1963 eap = (unsigned char *)&(ep->fen_paddrh);
1964 for (i=5; i>=0; i--) {
1965
1966/*
1967 * The EP8260 only uses FCC3, so we can safely give it the real
1968 * MAC address.
1969 */
1970#ifdef CONFIG_SBC82xx
1971 if (i == 5) {
1972 /* bd->bi_enetaddr holds the SCC0 address; the FCC
1973 devices count up from there */
1974 dev->dev_addr[i] = bd->bi_enetaddr[i] & ~3;
1975 dev->dev_addr[i] += 1 + fip->fc_fccnum;
1976 *eap++ = dev->dev_addr[i];
1977 }
1978#else
1979#ifndef CONFIG_RPX8260
1980 if (i == 3) {
1981 dev->dev_addr[i] = bd->bi_enetaddr[i];
1982 dev->dev_addr[i] |= (1 << (7 - fip->fc_fccnum));
1983 *eap++ = dev->dev_addr[i];
1984 } else
1985#endif
1986 {
1987 *eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
1988 }
1989#endif
1990 }
1991
1992 ep->fen_taddrh = 0;
1993 ep->fen_taddrm = 0;
1994 ep->fen_taddrl = 0;
1995
1996 ep->fen_maxd1 = PKT_MAXDMA_SIZE; /* maximum DMA1 length */
1997 ep->fen_maxd2 = PKT_MAXDMA_SIZE; /* maximum DMA2 length */
1998
1999 /* Clear stat counters, in case we ever enable RMON.
2000 */
2001 ep->fen_octc = 0;
2002 ep->fen_colc = 0;
2003 ep->fen_broc = 0;
2004 ep->fen_mulc = 0;
2005 ep->fen_uspc = 0;
2006 ep->fen_frgc = 0;
2007 ep->fen_ospc = 0;
2008 ep->fen_jbrc = 0;
2009 ep->fen_p64c = 0;
2010 ep->fen_p65c = 0;
2011 ep->fen_p128c = 0;
2012 ep->fen_p256c = 0;
2013 ep->fen_p512c = 0;
2014 ep->fen_p1024c = 0;
2015
2016 ep->fen_rfthr = 0; /* Suggested by manual */
2017 ep->fen_rfcnt = 0;
2018 ep->fen_cftype = 0;
2019
2020 /* Now allocate the host memory pages and initialize the
2021 * buffer descriptors.
2022 */
2023 bdp = cep->tx_bd_base;
2024 for (i=0; i<TX_RING_SIZE; i++) {
2025
2026 /* Initialize the BD for every fragment in the page.
2027 */
2028 bdp->cbd_sc = 0;
2029 bdp->cbd_datlen = 0;
2030 bdp->cbd_bufaddr = 0;
2031 bdp++;
2032 }
2033
2034 /* Set the last buffer to wrap.
2035 */
2036 bdp--;
2037 bdp->cbd_sc |= BD_SC_WRAP;
2038
2039 bdp = cep->rx_bd_base;
2040 for (i=0; i<FCC_ENET_RX_PAGES; i++) {
2041
2042 /* Allocate a page.
2043 */
2044 mem_addr = __get_free_page(GFP_KERNEL);
2045
2046 /* Initialize the BD for every fragment in the page.
2047 */
2048 for (j=0; j<FCC_ENET_RX_FRPPG; j++) {
2049 bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
2050 bdp->cbd_datlen = 0;
2051 bdp->cbd_bufaddr = __pa(mem_addr);
2052 mem_addr += FCC_ENET_RX_FRSIZE;
2053 bdp++;
2054 }
2055 }
2056
2057 /* Set the last buffer to wrap.
2058 */
2059 bdp--;
2060 bdp->cbd_sc |= BD_SC_WRAP;
2061
2062 /* Let's re-initialize the channel now. We have to do it later
2063 * than the manual describes because we have just now finished
2064 * the BD initialization.
2065 */
2066 cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, 0x0c,
2067 CPM_CR_INIT_TRX) | CPM_CR_FLG;
2068 while (cp->cp_cpcr & CPM_CR_FLG);
2069
2070 cep->skb_cur = cep->skb_dirty = 0;
2071}
2072
2073/* Let 'er rip.
2074*/
2075static void __init
2076init_fcc_startup(fcc_info_t *fip, struct net_device *dev)
2077{
2078 volatile fcc_t *fccp;
2079 struct fcc_enet_private *cep;
2080
2081 cep = (struct fcc_enet_private *)(dev->priv);
2082 fccp = cep->fccp;
2083
2084#ifdef CONFIG_RPX8260
2085#ifdef PHY_INTERRUPT
2086 /* Route PHY interrupt to IRQ. The following code only works for
2087 * IRQ1 - IRQ7. It does not work for Port C interrupts.
2088 */
2089 *((volatile u_char *) (RPX_CSR_ADDR + 13)) &= ~BCSR13_FETH_IRQMASK;
2090 *((volatile u_char *) (RPX_CSR_ADDR + 13)) |=
2091 ((PHY_INTERRUPT - SIU_INT_IRQ1 + 1) << 4);
2092#endif
2093 /* Initialize MDIO pins. */
2094 *((volatile u_char *) (RPX_CSR_ADDR + 4)) &= ~BCSR4_MII_MDC;
2095 *((volatile u_char *) (RPX_CSR_ADDR + 4)) |=
2096 BCSR4_MII_READ | BCSR4_MII_MDIO;
2097 /* Enable external LXT971 PHY. */
2098 *((volatile u_char *) (RPX_CSR_ADDR + 4)) |= BCSR4_EN_PHY;
2099 udelay(1000);
2100 *((volatile u_char *) (RPX_CSR_ADDR+ 4)) |= BCSR4_EN_MII;
2101 udelay(1000);
2102#endif /* ifdef CONFIG_RPX8260 */
2103
2104 fccp->fcc_fcce = 0xffff; /* Clear any pending events */
2105
2106 /* Leave FCC interrupts masked for now. Will be unmasked by
2107 * fcc_restart().
2108 */
2109 fccp->fcc_fccm = 0;
2110
2111 /* Install our interrupt handler.
2112 */
2113 if (request_irq(fip->fc_interrupt, fcc_enet_interrupt, 0, "fenet",
2114 dev) < 0)
2115 printk("Can't get FCC IRQ %d\n", fip->fc_interrupt);
2116
2117#ifdef PHY_INTERRUPT
2118#ifdef CONFIG_ADS8272
2119 if (request_irq(PHY_INTERRUPT, mii_link_interrupt, SA_SHIRQ,
2120 "mii", dev) < 0)
2121 printk(KERN_CRIT "Can't get MII IRQ %d\n", PHY_INTERRUPT);
2122#else
2123 /* Make IRQn edge triggered. This does not work if PHY_INTERRUPT is
2124 * on Port C.
2125 */
2126 ((volatile cpm2_map_t *) CPM_MAP_ADDR)->im_intctl.ic_siexr |=
2127 (1 << (14 - (PHY_INTERRUPT - SIU_INT_IRQ1)));
2128
2129 if (request_irq(PHY_INTERRUPT, mii_link_interrupt, 0,
2130 "mii", dev) < 0)
2131 printk(KERN_CRIT "Can't get MII IRQ %d\n", PHY_INTERRUPT);
2132#endif
2133#endif /* PHY_INTERRUPT */
2134
2135 /* Set GFMR to enable Ethernet operating mode.
2136 */
2137 fccp->fcc_gfmr = (FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
2138
2139 /* Set sync/delimiters.
2140 */
2141 fccp->fcc_fdsr = 0xd555;
2142
2143 /* Set protocol specific processing mode for Ethernet.
2144 * This has to be adjusted for Full Duplex operation after we can
2145 * determine how to detect that.
2146 */
2147 fccp->fcc_fpsmr = FCC_PSMR_ENCRC;
2148
2149#ifdef CONFIG_PQ2ADS
2150 /* Enable the PHY. */
2151 *(volatile uint *)(BCSR_ADDR + 4) &= ~BCSR1_FETHIEN;
2152 *(volatile uint *)(BCSR_ADDR + 4) |= BCSR1_FETH_RST;
2153#endif
2154#if defined(CONFIG_PQ2ADS) || defined(CONFIG_PQ2FADS)
2155 /* Enable the 2nd PHY. */
2156 *(volatile uint *)(BCSR_ADDR + 12) &= ~BCSR3_FETHIEN2;
2157 *(volatile uint *)(BCSR_ADDR + 12) |= BCSR3_FETH2_RST;
2158#endif
2159
2160#if defined(CONFIG_USE_MDIO) || defined(CONFIG_TQM8260)
2161 /* start in full duplex mode, and negotiate speed
2162 */
2163 fcc_restart (dev, 1);
2164#else
2165 /* start in half duplex mode
2166 */
2167 fcc_restart (dev, 0);
2168#endif
2169}
2170
2171#ifdef CONFIG_USE_MDIO
2172/* MII command/status interface.
2173 * I'm not going to describe all of the details. You can find the
2174 * protocol definition in many other places, including the data sheet
2175 * of most PHY parts.
2176 * I wonder what "they" were thinking (maybe weren't) when they leave
2177 * the I2C in the CPM but I have to toggle these bits......
2178 */
2179#ifdef CONFIG_RPX8260
2180 /* The EP8260 has the MDIO pins in a BCSR instead of on Port C
2181 * like most other boards.
2182 */
2183#define MDIO_ADDR ((volatile u_char *)(RPX_CSR_ADDR + 4))
2184#define MAKE_MDIO_OUTPUT *MDIO_ADDR &= ~BCSR4_MII_READ
2185#define MAKE_MDIO_INPUT *MDIO_ADDR |= BCSR4_MII_READ | BCSR4_MII_MDIO
2186#define OUT_MDIO(bit) \
2187 if (bit) \
2188 *MDIO_ADDR |= BCSR4_MII_MDIO; \
2189 else \
2190 *MDIO_ADDR &= ~BCSR4_MII_MDIO;
2191#define IN_MDIO (*MDIO_ADDR & BCSR4_MII_MDIO)
2192#define OUT_MDC(bit) \
2193 if (bit) \
2194 *MDIO_ADDR |= BCSR4_MII_MDC; \
2195 else \
2196 *MDIO_ADDR &= ~BCSR4_MII_MDC;
2197#else /* ifdef CONFIG_RPX8260 */
2198 /* This is for the usual case where the MDIO pins are on Port C.
2199 */
2200#define MDIO_ADDR (((volatile cpm2_map_t *)CPM_MAP_ADDR)->im_ioport)
2201#define MAKE_MDIO_OUTPUT MDIO_ADDR.iop_pdirc |= fip->fc_mdio
2202#define MAKE_MDIO_INPUT MDIO_ADDR.iop_pdirc &= ~fip->fc_mdio
2203#define OUT_MDIO(bit) \
2204 if (bit) \
2205 MDIO_ADDR.iop_pdatc |= fip->fc_mdio; \
2206 else \
2207 MDIO_ADDR.iop_pdatc &= ~fip->fc_mdio;
2208#define IN_MDIO ((MDIO_ADDR.iop_pdatc) & fip->fc_mdio)
2209#define OUT_MDC(bit) \
2210 if (bit) \
2211 MDIO_ADDR.iop_pdatc |= fip->fc_mdck; \
2212 else \
2213 MDIO_ADDR.iop_pdatc &= ~fip->fc_mdck;
2214#endif /* ifdef CONFIG_RPX8260 */
2215
2216static uint
2217mii_send_receive(fcc_info_t *fip, uint cmd)
2218{
2219 uint retval;
2220 int read_op, i, off;
2221 const int us = 1;
2222
2223 read_op = ((cmd & 0xf0000000) == 0x60000000);
2224
2225 /* Write preamble
2226 */
2227 OUT_MDIO(1);
2228 MAKE_MDIO_OUTPUT;
2229 OUT_MDIO(1);
2230 for (i = 0; i < 32; i++)
2231 {
2232 udelay(us);
2233 OUT_MDC(1);
2234 udelay(us);
2235 OUT_MDC(0);
2236 }
2237
2238 /* Write data
2239 */
2240 for (i = 0, off = 31; i < (read_op ? 14 : 32); i++, --off)
2241 {
2242 OUT_MDIO((cmd >> off) & 0x00000001);
2243 udelay(us);
2244 OUT_MDC(1);
2245 udelay(us);
2246 OUT_MDC(0);
2247 }
2248
2249 retval = cmd;
2250
2251 if (read_op)
2252 {
2253 retval >>= 16;
2254
2255 MAKE_MDIO_INPUT;
2256 udelay(us);
2257 OUT_MDC(1);
2258 udelay(us);
2259 OUT_MDC(0);
2260
2261 for (i = 0; i < 16; i++)
2262 {
2263 udelay(us);
2264 OUT_MDC(1);
2265 udelay(us);
2266 retval <<= 1;
2267 if (IN_MDIO)
2268 retval++;
2269 OUT_MDC(0);
2270 }
2271 }
2272
2273 MAKE_MDIO_INPUT;
2274 udelay(us);
2275 OUT_MDC(1);
2276 udelay(us);
2277 OUT_MDC(0);
2278
2279 return retval;
2280}
2281#endif /* CONFIG_USE_MDIO */
2282
2283static void
2284fcc_stop(struct net_device *dev)
2285{
2286 struct fcc_enet_private *fep= (struct fcc_enet_private *)(dev->priv);
2287 volatile fcc_t *fccp = fep->fccp;
2288 fcc_info_t *fip = fep->fip;
2289 volatile fcc_enet_t *ep = fep->ep;
2290 volatile cpm_cpm2_t *cp = cpmp;
2291 volatile cbd_t *bdp;
2292 int i;
2293
2294 if ((fccp->fcc_gfmr & (FCC_GFMR_ENR | FCC_GFMR_ENT)) == 0)
2295 return; /* already down */
2296
2297 fccp->fcc_fccm = 0;
2298
2299 /* issue the graceful stop tx command */
2300 while (cp->cp_cpcr & CPM_CR_FLG);
2301 cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock,
2302 0x0c, CPM_CR_GRA_STOP_TX) | CPM_CR_FLG;
2303 while (cp->cp_cpcr & CPM_CR_FLG);
2304
2305 /* Disable transmit/receive */
2306 fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
2307
2308 /* issue the restart tx command */
2309 fccp->fcc_fcce = FCC_ENET_GRA;
2310 while (cp->cp_cpcr & CPM_CR_FLG);
2311 cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock,
2312 0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG;
2313 while (cp->cp_cpcr & CPM_CR_FLG);
2314
2315 /* free tx buffers */
2316 fep->skb_cur = fep->skb_dirty = 0;
2317 for (i=0; i<=TX_RING_MOD_MASK; i++) {
2318 if (fep->tx_skbuff[i] != NULL) {
2319 dev_kfree_skb(fep->tx_skbuff[i]);
2320 fep->tx_skbuff[i] = NULL;
2321 }
2322 }
2323 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
2324 fep->tx_free = TX_RING_SIZE;
2325 ep->fen_genfcc.fcc_tbptr = ep->fen_genfcc.fcc_tbase;
2326
2327 /* Initialize the tx buffer descriptors. */
2328 bdp = fep->tx_bd_base;
2329 for (i=0; i<TX_RING_SIZE; i++) {
2330 bdp->cbd_sc = 0;
2331 bdp->cbd_datlen = 0;
2332 bdp->cbd_bufaddr = 0;
2333 bdp++;
2334 }
2335 /* Set the last buffer to wrap. */
2336 bdp--;
2337 bdp->cbd_sc |= BD_SC_WRAP;
2338}
2339
2340static void
2341fcc_restart(struct net_device *dev, int duplex)
2342{
2343 struct fcc_enet_private *fep = (struct fcc_enet_private *)(dev->priv);
2344 volatile fcc_t *fccp = fep->fccp;
2345
2346 /* stop any transmissions in progress */
2347 fcc_stop(dev);
2348
2349 if (duplex)
2350 fccp->fcc_fpsmr |= FCC_PSMR_FDE | FCC_PSMR_LPB;
2351 else
2352 fccp->fcc_fpsmr &= ~(FCC_PSMR_FDE | FCC_PSMR_LPB);
2353
2354 /* Enable interrupts for transmit error, complete frame
2355 * received, and any transmit buffer we have also set the
2356 * interrupt flag.
2357 */
2358 fccp->fcc_fccm = (FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
2359
2360 /* Enable transmit/receive */
2361 fccp->fcc_gfmr |= FCC_GFMR_ENR | FCC_GFMR_ENT;
2362}
2363
2364static int
2365fcc_enet_open(struct net_device *dev)
2366{
2367 struct fcc_enet_private *fep = dev->priv;
2368
2369#ifdef CONFIG_USE_MDIO
2370 fep->sequence_done = 0;
2371 fep->link = 0;
2372
2373 if (fep->phy) {
2374 fcc_restart(dev, 0); /* always start in half-duplex */
2375 mii_do_cmd(dev, fep->phy->ack_int);
2376 mii_do_cmd(dev, fep->phy->config);
2377 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
2378 while(!fep->sequence_done)
2379 schedule();
2380
2381 mii_do_cmd(dev, fep->phy->startup);
2382 netif_start_queue(dev);
2383 return 0; /* Success */
2384 }
2385 return -ENODEV; /* No PHY we understand */
2386#else
2387 fep->link = 1;
2388 fcc_restart(dev, 0); /* always start in half-duplex */
2389 netif_start_queue(dev);
2390 return 0; /* Always succeed */
2391#endif /* CONFIG_USE_MDIO */
2392}
2393