blob: dcbe83c773ee39ffcd3ec605d9c462a2e03ca1fc [file] [log] [blame]
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04004 * Copyright (c) 2003 Intracom S.A.
Pantelis Antoniou48257c42005-10-28 16:25:58 -04005 * by Pantelis Antoniou <panto@intracom.gr>
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04006 *
7 * 2005 (c) MontaVista Software, Inc.
Pantelis Antoniou48257c42005-10-28 16:25:58 -04008 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +040013 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
Pantelis Antoniou48257c42005-10-28 16:25:58 -040015 * kind, whether express or implied.
16 */
17
Pantelis Antoniou48257c42005-10-28 16:25:58 -040018#include <linux/module.h>
19#include <linux/kernel.h>
20#include <linux/types.h>
Pantelis Antoniou48257c42005-10-28 16:25:58 -040021#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
Pantelis Antoniou48257c42005-10-28 16:25:58 -040027#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36#include <linux/fs.h>
Marcelo Tosattif7b99962005-11-09 11:00:16 -020037#include <linux/platform_device.h>
Vitaly Bordug5b4b8452006-08-14 23:00:30 -070038#include <linux/phy.h>
Pantelis Antoniou48257c42005-10-28 16:25:58 -040039
40#include <linux/vmalloc.h>
41#include <asm/pgtable.h>
Pantelis Antoniou48257c42005-10-28 16:25:58 -040042#include <asm/irq.h>
43#include <asm/uaccess.h>
44
45#include "fs_enet.h"
46
47/*************************************************/
48
49static char version[] __devinitdata =
50 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")" "\n";
51
52MODULE_AUTHOR("Pantelis Antoniou <panto@intracom.gr>");
53MODULE_DESCRIPTION("Freescale Ethernet Driver");
54MODULE_LICENSE("GPL");
55MODULE_VERSION(DRV_MODULE_VERSION);
56
Rusty Russell8d3b33f2006-03-25 03:07:05 -080057int fs_enet_debug = -1; /* -1 == use FS_ENET_DEF_MSG_ENABLE as value */
58module_param(fs_enet_debug, int, 0);
Pantelis Antoniou48257c42005-10-28 16:25:58 -040059MODULE_PARM_DESC(fs_enet_debug,
60 "Freescale bitmapped debugging message enable value");
61
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +040062#ifdef CONFIG_NET_POLL_CONTROLLER
63static void fs_enet_netpoll(struct net_device *dev);
64#endif
Pantelis Antoniou48257c42005-10-28 16:25:58 -040065
66static void fs_set_multicast_list(struct net_device *dev)
67{
68 struct fs_enet_private *fep = netdev_priv(dev);
69
70 (*fep->ops->set_multicast_list)(dev);
71}
72
73/* NAPI receive function */
Stephen Hemmingerbea33482007-10-03 16:41:36 -070074static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
Pantelis Antoniou48257c42005-10-28 16:25:58 -040075{
Stephen Hemmingerbea33482007-10-03 16:41:36 -070076 struct fs_enet_private *fep = container_of(napi, struct fs_enet_private, napi);
77 struct net_device *dev = to_net_dev(fep->dev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -040078 const struct fs_platform_info *fpi = fep->fpi;
79 cbd_t *bdp;
80 struct sk_buff *skb, *skbn, *skbt;
81 int received = 0;
82 u16 pkt_len, sc;
83 int curidx;
Pantelis Antoniou48257c42005-10-28 16:25:58 -040084
85 if (!netif_running(dev))
86 return 0;
87
88 /*
89 * First, grab all of the stats for the incoming packet.
90 * These get messed up if we get called due to a busy condition.
91 */
92 bdp = fep->cur_rx;
93
94 /* clear RX status bits for napi*/
95 (*fep->ops->napi_clear_rx_event)(dev);
96
97 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
Pantelis Antoniou48257c42005-10-28 16:25:58 -040098 curidx = bdp - fep->rx_bd_base;
99
100 /*
101 * Since we have allocated space to hold a complete frame,
102 * the last indicator should be set.
103 */
104 if ((sc & BD_ENET_RX_LAST) == 0)
105 printk(KERN_WARNING DRV_MODULE_NAME
106 ": %s rcv is not +last\n",
107 dev->name);
108
109 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400110 * Check for errors.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400111 */
112 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
113 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
114 fep->stats.rx_errors++;
115 /* Frame too long or too short. */
116 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
117 fep->stats.rx_length_errors++;
118 /* Frame alignment */
119 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
120 fep->stats.rx_frame_errors++;
121 /* CRC Error */
122 if (sc & BD_ENET_RX_CR)
123 fep->stats.rx_crc_errors++;
124 /* FIFO overrun */
125 if (sc & BD_ENET_RX_OV)
126 fep->stats.rx_crc_errors++;
127
128 skb = fep->rx_skbuff[curidx];
129
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300130 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400131 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
132 DMA_FROM_DEVICE);
133
134 skbn = skb;
135
136 } else {
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400137 skb = fep->rx_skbuff[curidx];
138
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300139 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400140 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
141 DMA_FROM_DEVICE);
142
143 /*
144 * Process the incoming frame.
145 */
146 fep->stats.rx_packets++;
147 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
148 fep->stats.rx_bytes += pkt_len + 4;
149
150 if (pkt_len <= fpi->rx_copybreak) {
151 /* +2 to make IP header L1 cache aligned */
152 skbn = dev_alloc_skb(pkt_len + 2);
153 if (skbn != NULL) {
154 skb_reserve(skbn, 2); /* align IP header */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300155 skb_copy_from_linear_data(skb,
156 skbn->data, pkt_len);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400157 /* swap */
158 skbt = skb;
159 skb = skbn;
160 skbn = skbt;
161 }
162 } else
163 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
164
165 if (skbn != NULL) {
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400166 skb_put(skb, pkt_len); /* Make room */
167 skb->protocol = eth_type_trans(skb, dev);
168 received++;
169 netif_receive_skb(skb);
170 } else {
171 printk(KERN_WARNING DRV_MODULE_NAME
172 ": %s Memory squeeze, dropping packet.\n",
173 dev->name);
174 fep->stats.rx_dropped++;
175 skbn = skb;
176 }
177 }
178
179 fep->rx_skbuff[curidx] = skbn;
180 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
181 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
182 DMA_FROM_DEVICE));
183 CBDW_DATLEN(bdp, 0);
184 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
185
186 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400187 * Update BD pointer to next entry.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400188 */
189 if ((sc & BD_ENET_RX_WRAP) == 0)
190 bdp++;
191 else
192 bdp = fep->rx_bd_base;
193
194 (*fep->ops->rx_bd_done)(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700195
196 if (received >= budget)
197 break;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400198 }
199
200 fep->cur_rx = bdp;
201
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700202 if (received >= budget) {
203 /* done */
204 netif_rx_complete(dev, napi);
205 (*fep->ops->napi_enable_rx)(dev);
206 }
207 return received;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400208}
209
210/* non NAPI receive function */
211static int fs_enet_rx_non_napi(struct net_device *dev)
212{
213 struct fs_enet_private *fep = netdev_priv(dev);
214 const struct fs_platform_info *fpi = fep->fpi;
215 cbd_t *bdp;
216 struct sk_buff *skb, *skbn, *skbt;
217 int received = 0;
218 u16 pkt_len, sc;
219 int curidx;
220 /*
221 * First, grab all of the stats for the incoming packet.
222 * These get messed up if we get called due to a busy condition.
223 */
224 bdp = fep->cur_rx;
225
226 while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
227
228 curidx = bdp - fep->rx_bd_base;
229
230 /*
231 * Since we have allocated space to hold a complete frame,
232 * the last indicator should be set.
233 */
234 if ((sc & BD_ENET_RX_LAST) == 0)
235 printk(KERN_WARNING DRV_MODULE_NAME
236 ": %s rcv is not +last\n",
237 dev->name);
238
239 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400240 * Check for errors.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400241 */
242 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
243 BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
244 fep->stats.rx_errors++;
245 /* Frame too long or too short. */
246 if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
247 fep->stats.rx_length_errors++;
248 /* Frame alignment */
249 if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
250 fep->stats.rx_frame_errors++;
251 /* CRC Error */
252 if (sc & BD_ENET_RX_CR)
253 fep->stats.rx_crc_errors++;
254 /* FIFO overrun */
255 if (sc & BD_ENET_RX_OV)
256 fep->stats.rx_crc_errors++;
257
258 skb = fep->rx_skbuff[curidx];
259
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300260 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400261 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
262 DMA_FROM_DEVICE);
263
264 skbn = skb;
265
266 } else {
267
268 skb = fep->rx_skbuff[curidx];
269
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300270 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400271 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
272 DMA_FROM_DEVICE);
273
274 /*
275 * Process the incoming frame.
276 */
277 fep->stats.rx_packets++;
278 pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
279 fep->stats.rx_bytes += pkt_len + 4;
280
281 if (pkt_len <= fpi->rx_copybreak) {
282 /* +2 to make IP header L1 cache aligned */
283 skbn = dev_alloc_skb(pkt_len + 2);
284 if (skbn != NULL) {
285 skb_reserve(skbn, 2); /* align IP header */
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300286 skb_copy_from_linear_data(skb,
287 skbn->data, pkt_len);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400288 /* swap */
289 skbt = skb;
290 skb = skbn;
291 skbn = skbt;
292 }
293 } else
294 skbn = dev_alloc_skb(ENET_RX_FRSIZE);
295
296 if (skbn != NULL) {
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400297 skb_put(skb, pkt_len); /* Make room */
298 skb->protocol = eth_type_trans(skb, dev);
299 received++;
300 netif_rx(skb);
301 } else {
302 printk(KERN_WARNING DRV_MODULE_NAME
303 ": %s Memory squeeze, dropping packet.\n",
304 dev->name);
305 fep->stats.rx_dropped++;
306 skbn = skb;
307 }
308 }
309
310 fep->rx_skbuff[curidx] = skbn;
311 CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
312 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
313 DMA_FROM_DEVICE));
314 CBDW_DATLEN(bdp, 0);
315 CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
316
317 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400318 * Update BD pointer to next entry.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400319 */
320 if ((sc & BD_ENET_RX_WRAP) == 0)
321 bdp++;
322 else
323 bdp = fep->rx_bd_base;
324
325 (*fep->ops->rx_bd_done)(dev);
326 }
327
328 fep->cur_rx = bdp;
329
330 return 0;
331}
332
333static void fs_enet_tx(struct net_device *dev)
334{
335 struct fs_enet_private *fep = netdev_priv(dev);
336 cbd_t *bdp;
337 struct sk_buff *skb;
338 int dirtyidx, do_wake, do_restart;
339 u16 sc;
340
Vitaly Bordugaa90f502007-09-18 20:05:27 +0400341 spin_lock(&fep->tx_lock);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400342 bdp = fep->dirty_tx;
343
344 do_wake = do_restart = 0;
345 while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
346
347 dirtyidx = bdp - fep->tx_bd_base;
348
349 if (fep->tx_free == fep->tx_ring)
350 break;
351
352 skb = fep->tx_skbuff[dirtyidx];
353
354 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400355 * Check for errors.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400356 */
357 if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
358 BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) {
359
360 if (sc & BD_ENET_TX_HB) /* No heartbeat */
361 fep->stats.tx_heartbeat_errors++;
362 if (sc & BD_ENET_TX_LC) /* Late collision */
363 fep->stats.tx_window_errors++;
364 if (sc & BD_ENET_TX_RL) /* Retrans limit */
365 fep->stats.tx_aborted_errors++;
366 if (sc & BD_ENET_TX_UN) /* Underrun */
367 fep->stats.tx_fifo_errors++;
368 if (sc & BD_ENET_TX_CSL) /* Carrier lost */
369 fep->stats.tx_carrier_errors++;
370
371 if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
372 fep->stats.tx_errors++;
373 do_restart = 1;
374 }
375 } else
376 fep->stats.tx_packets++;
377
378 if (sc & BD_ENET_TX_READY)
379 printk(KERN_WARNING DRV_MODULE_NAME
380 ": %s HEY! Enet xmit interrupt and TX_READY.\n",
381 dev->name);
382
383 /*
384 * Deferred means some collisions occurred during transmit,
385 * but we eventually sent the packet OK.
386 */
387 if (sc & BD_ENET_TX_DEF)
388 fep->stats.collisions++;
389
390 /* unmap */
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300391 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
392 skb->len, DMA_TO_DEVICE);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400393
394 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400395 * Free the sk buffer associated with this last transmit.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400396 */
397 dev_kfree_skb_irq(skb);
398 fep->tx_skbuff[dirtyidx] = NULL;
399
400 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400401 * Update pointer to next buffer descriptor to be transmitted.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400402 */
403 if ((sc & BD_ENET_TX_WRAP) == 0)
404 bdp++;
405 else
406 bdp = fep->tx_bd_base;
407
408 /*
409 * Since we have freed up a buffer, the ring is no longer
410 * full.
411 */
412 if (!fep->tx_free++)
413 do_wake = 1;
414 }
415
416 fep->dirty_tx = bdp;
417
418 if (do_restart)
419 (*fep->ops->tx_restart)(dev);
420
Vitaly Bordugaa90f502007-09-18 20:05:27 +0400421 spin_unlock(&fep->tx_lock);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400422
423 if (do_wake)
424 netif_wake_queue(dev);
425}
426
427/*
428 * The interrupt handler.
429 * This is called from the MPC core interrupt.
430 */
431static irqreturn_t
David Howells7d12e782006-10-05 14:55:46 +0100432fs_enet_interrupt(int irq, void *dev_id)
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400433{
434 struct net_device *dev = dev_id;
435 struct fs_enet_private *fep;
436 const struct fs_platform_info *fpi;
437 u32 int_events;
438 u32 int_clr_events;
439 int nr, napi_ok;
440 int handled;
441
442 fep = netdev_priv(dev);
443 fpi = fep->fpi;
444
445 nr = 0;
446 while ((int_events = (*fep->ops->get_int_events)(dev)) != 0) {
447
448 nr++;
449
450 int_clr_events = int_events;
451 if (fpi->use_napi)
452 int_clr_events &= ~fep->ev_napi_rx;
453
454 (*fep->ops->clear_int_events)(dev, int_clr_events);
455
456 if (int_events & fep->ev_err)
457 (*fep->ops->ev_error)(dev, int_events);
458
459 if (int_events & fep->ev_rx) {
460 if (!fpi->use_napi)
461 fs_enet_rx_non_napi(dev);
462 else {
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700463 napi_ok = napi_schedule_prep(&fep->napi);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400464
465 (*fep->ops->napi_disable_rx)(dev);
466 (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
467
468 /* NOTE: it is possible for FCCs in NAPI mode */
469 /* to submit a spurious interrupt while in poll */
470 if (napi_ok)
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700471 __netif_rx_schedule(dev, &fep->napi);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400472 }
473 }
474
475 if (int_events & fep->ev_tx)
476 fs_enet_tx(dev);
477 }
478
479 handled = nr > 0;
480 return IRQ_RETVAL(handled);
481}
482
483void fs_init_bds(struct net_device *dev)
484{
485 struct fs_enet_private *fep = netdev_priv(dev);
486 cbd_t *bdp;
487 struct sk_buff *skb;
488 int i;
489
490 fs_cleanup_bds(dev);
491
492 fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
493 fep->tx_free = fep->tx_ring;
494 fep->cur_rx = fep->rx_bd_base;
495
496 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400497 * Initialize the receive buffer descriptors.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400498 */
499 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
500 skb = dev_alloc_skb(ENET_RX_FRSIZE);
501 if (skb == NULL) {
502 printk(KERN_WARNING DRV_MODULE_NAME
503 ": %s Memory squeeze, unable to allocate skb\n",
504 dev->name);
505 break;
506 }
507 fep->rx_skbuff[i] = skb;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400508 CBDW_BUFADDR(bdp,
509 dma_map_single(fep->dev, skb->data,
510 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
511 DMA_FROM_DEVICE));
512 CBDW_DATLEN(bdp, 0); /* zero */
513 CBDW_SC(bdp, BD_ENET_RX_EMPTY |
514 ((i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP));
515 }
516 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400517 * if we failed, fillup remainder
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400518 */
519 for (; i < fep->rx_ring; i++, bdp++) {
520 fep->rx_skbuff[i] = NULL;
521 CBDW_SC(bdp, (i < fep->rx_ring - 1) ? 0 : BD_SC_WRAP);
522 }
523
524 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400525 * ...and the same for transmit.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400526 */
527 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
528 fep->tx_skbuff[i] = NULL;
529 CBDW_BUFADDR(bdp, 0);
530 CBDW_DATLEN(bdp, 0);
531 CBDW_SC(bdp, (i < fep->tx_ring - 1) ? 0 : BD_SC_WRAP);
532 }
533}
534
535void fs_cleanup_bds(struct net_device *dev)
536{
537 struct fs_enet_private *fep = netdev_priv(dev);
538 struct sk_buff *skb;
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300539 cbd_t *bdp;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400540 int i;
541
542 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400543 * Reset SKB transmit buffers.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400544 */
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300545 for (i = 0, bdp = fep->tx_bd_base; i < fep->tx_ring; i++, bdp++) {
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400546 if ((skb = fep->tx_skbuff[i]) == NULL)
547 continue;
548
549 /* unmap */
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300550 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
551 skb->len, DMA_TO_DEVICE);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400552
553 fep->tx_skbuff[i] = NULL;
554 dev_kfree_skb(skb);
555 }
556
557 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400558 * Reset SKB receive buffers
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400559 */
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300560 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400561 if ((skb = fep->rx_skbuff[i]) == NULL)
562 continue;
563
564 /* unmap */
Pantelis Antoniou34e30d62005-10-30 01:22:40 +0300565 dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400566 L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
567 DMA_FROM_DEVICE);
568
569 fep->rx_skbuff[i] = NULL;
570
571 dev_kfree_skb(skb);
572 }
573}
574
575/**********************************************************************************/
576
577static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
578{
579 struct fs_enet_private *fep = netdev_priv(dev);
580 cbd_t *bdp;
581 int curidx;
582 u16 sc;
583 unsigned long flags;
584
585 spin_lock_irqsave(&fep->tx_lock, flags);
586
587 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400588 * Fill in a Tx ring entry
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400589 */
590 bdp = fep->cur_tx;
591
592 if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
593 netif_stop_queue(dev);
594 spin_unlock_irqrestore(&fep->tx_lock, flags);
595
596 /*
597 * Ooops. All transmit buffers are full. Bail out.
598 * This should not happen, since the tx queue should be stopped.
599 */
600 printk(KERN_WARNING DRV_MODULE_NAME
601 ": %s tx queue full!.\n", dev->name);
602 return NETDEV_TX_BUSY;
603 }
604
605 curidx = bdp - fep->tx_bd_base;
606 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400607 * Clear all of the status flags.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400608 */
609 CBDC_SC(bdp, BD_ENET_TX_STATS);
610
611 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400612 * Save skb pointer.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400613 */
614 fep->tx_skbuff[curidx] = skb;
615
616 fep->stats.tx_bytes += skb->len;
617
618 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400619 * Push the data cache so the CPM does not get stale memory data.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400620 */
621 CBDW_BUFADDR(bdp, dma_map_single(fep->dev,
622 skb->data, skb->len, DMA_TO_DEVICE));
623 CBDW_DATLEN(bdp, skb->len);
624
625 dev->trans_start = jiffies;
626
627 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +0400628 * If this was the last BD in the ring, start at the beginning again.
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400629 */
630 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
631 fep->cur_tx++;
632 else
633 fep->cur_tx = fep->tx_bd_base;
634
635 if (!--fep->tx_free)
636 netif_stop_queue(dev);
637
638 /* Trigger transmission start */
639 sc = BD_ENET_TX_READY | BD_ENET_TX_INTR |
640 BD_ENET_TX_LAST | BD_ENET_TX_TC;
641
642 /* note that while FEC does not have this bit
643 * it marks it as available for software use
644 * yay for hw reuse :) */
645 if (skb->len <= 60)
646 sc |= BD_ENET_TX_PAD;
647 CBDS_SC(bdp, sc);
648
649 (*fep->ops->tx_kickstart)(dev);
650
651 spin_unlock_irqrestore(&fep->tx_lock, flags);
652
653 return NETDEV_TX_OK;
654}
655
656static int fs_request_irq(struct net_device *dev, int irq, const char *name,
David Howells7d12e782006-10-05 14:55:46 +0100657 irq_handler_t irqf)
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400658{
659 struct fs_enet_private *fep = netdev_priv(dev);
660
661 (*fep->ops->pre_request_irq)(dev, irq);
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700662 return request_irq(irq, irqf, IRQF_SHARED, name, dev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400663}
664
665static void fs_free_irq(struct net_device *dev, int irq)
666{
667 struct fs_enet_private *fep = netdev_priv(dev);
668
669 free_irq(irq, dev);
670 (*fep->ops->post_free_irq)(dev, irq);
671}
672
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400673static void fs_timeout(struct net_device *dev)
674{
675 struct fs_enet_private *fep = netdev_priv(dev);
676 unsigned long flags;
677 int wake = 0;
678
679 fep->stats.tx_errors++;
680
681 spin_lock_irqsave(&fep->lock, flags);
682
683 if (dev->flags & IFF_UP) {
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700684 phy_stop(fep->phydev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400685 (*fep->ops->stop)(dev);
686 (*fep->ops->restart)(dev);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700687 phy_start(fep->phydev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400688 }
689
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700690 phy_start(fep->phydev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400691 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
692 spin_unlock_irqrestore(&fep->lock, flags);
693
694 if (wake)
695 netif_wake_queue(dev);
696}
697
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700698/*-----------------------------------------------------------------------------
699 * generic link-change handler - should be sufficient for most cases
700 *-----------------------------------------------------------------------------*/
701static void generic_adjust_link(struct net_device *dev)
702{
703 struct fs_enet_private *fep = netdev_priv(dev);
704 struct phy_device *phydev = fep->phydev;
705 int new_state = 0;
706
707 if (phydev->link) {
708
709 /* adjust to duplex mode */
710 if (phydev->duplex != fep->oldduplex){
711 new_state = 1;
712 fep->oldduplex = phydev->duplex;
713 }
714
715 if (phydev->speed != fep->oldspeed) {
716 new_state = 1;
717 fep->oldspeed = phydev->speed;
718 }
719
720 if (!fep->oldlink) {
721 new_state = 1;
722 fep->oldlink = 1;
723 netif_schedule(dev);
724 netif_carrier_on(dev);
725 netif_start_queue(dev);
726 }
727
728 if (new_state)
729 fep->ops->restart(dev);
730
731 } else if (fep->oldlink) {
732 new_state = 1;
733 fep->oldlink = 0;
734 fep->oldspeed = 0;
735 fep->oldduplex = -1;
736 netif_carrier_off(dev);
737 netif_stop_queue(dev);
738 }
739
740 if (new_state && netif_msg_link(fep))
741 phy_print_status(phydev);
742}
743
744
745static void fs_adjust_link(struct net_device *dev)
746{
747 struct fs_enet_private *fep = netdev_priv(dev);
748 unsigned long flags;
749
750 spin_lock_irqsave(&fep->lock, flags);
751
752 if(fep->ops->adjust_link)
753 fep->ops->adjust_link(dev);
754 else
755 generic_adjust_link(dev);
756
757 spin_unlock_irqrestore(&fep->lock, flags);
758}
759
760static int fs_init_phy(struct net_device *dev)
761{
762 struct fs_enet_private *fep = netdev_priv(dev);
763 struct phy_device *phydev;
764
765 fep->oldlink = 0;
766 fep->oldspeed = 0;
767 fep->oldduplex = -1;
768 if(fep->fpi->bus_id)
Andy Fleminge8a2b6a2006-12-01 12:01:06 -0600769 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0,
770 PHY_INTERFACE_MODE_MII);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700771 else {
772 printk("No phy bus ID specified in BSP code\n");
773 return -EINVAL;
774 }
775 if (IS_ERR(phydev)) {
776 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
777 return PTR_ERR(phydev);
778 }
779
780 fep->phydev = phydev;
781
782 return 0;
783}
784
785
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400786static int fs_enet_open(struct net_device *dev)
787{
788 struct fs_enet_private *fep = netdev_priv(dev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400789 int r;
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700790 int err;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400791
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700792 napi_enable(&fep->napi);
793
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400794 /* Install our interrupt handler. */
795 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
796 if (r != 0) {
797 printk(KERN_ERR DRV_MODULE_NAME
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700798 ": %s Could not allocate FS_ENET IRQ!", dev->name);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700799 napi_disable(&fep->napi);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400800 return -EINVAL;
801 }
802
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700803 err = fs_init_phy(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700804 if(err) {
805 napi_disable(&fep->napi);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700806 return err;
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700807 }
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700808 phy_start(fep->phydev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400809
810 return 0;
811}
812
813static int fs_enet_close(struct net_device *dev)
814{
815 struct fs_enet_private *fep = netdev_priv(dev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400816 unsigned long flags;
817
818 netif_stop_queue(dev);
819 netif_carrier_off(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700820 napi_disable(&fep->napi);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700821 phy_stop(fep->phydev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400822
823 spin_lock_irqsave(&fep->lock, flags);
Vitaly Bordugaa90f502007-09-18 20:05:27 +0400824 spin_lock(&fep->tx_lock);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400825 (*fep->ops->stop)(dev);
Vitaly Bordugaa90f502007-09-18 20:05:27 +0400826 spin_unlock(&fep->tx_lock);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400827 spin_unlock_irqrestore(&fep->lock, flags);
828
829 /* release any irqs */
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700830 phy_disconnect(fep->phydev);
831 fep->phydev = NULL;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400832 fs_free_irq(dev, fep->interrupt);
833
834 return 0;
835}
836
837static struct net_device_stats *fs_enet_get_stats(struct net_device *dev)
838{
839 struct fs_enet_private *fep = netdev_priv(dev);
840 return &fep->stats;
841}
842
843/*************************************************************************/
844
845static void fs_get_drvinfo(struct net_device *dev,
846 struct ethtool_drvinfo *info)
847{
848 strcpy(info->driver, DRV_MODULE_NAME);
849 strcpy(info->version, DRV_MODULE_VERSION);
850}
851
852static int fs_get_regs_len(struct net_device *dev)
853{
854 struct fs_enet_private *fep = netdev_priv(dev);
855
856 return (*fep->ops->get_regs_len)(dev);
857}
858
859static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
860 void *p)
861{
862 struct fs_enet_private *fep = netdev_priv(dev);
863 unsigned long flags;
864 int r, len;
865
866 len = regs->len;
867
868 spin_lock_irqsave(&fep->lock, flags);
869 r = (*fep->ops->get_regs)(dev, p, &len);
870 spin_unlock_irqrestore(&fep->lock, flags);
871
872 if (r == 0)
873 regs->version = 0;
874}
875
876static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
877{
878 struct fs_enet_private *fep = netdev_priv(dev);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700879 return phy_ethtool_gset(fep->phydev, cmd);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400880}
881
882static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
883{
884 struct fs_enet_private *fep = netdev_priv(dev);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700885 phy_ethtool_sset(fep->phydev, cmd);
886 return 0;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400887}
888
889static int fs_nway_reset(struct net_device *dev)
890{
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700891 return 0;
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400892}
893
894static u32 fs_get_msglevel(struct net_device *dev)
895{
896 struct fs_enet_private *fep = netdev_priv(dev);
897 return fep->msg_enable;
898}
899
900static void fs_set_msglevel(struct net_device *dev, u32 value)
901{
902 struct fs_enet_private *fep = netdev_priv(dev);
903 fep->msg_enable = value;
904}
905
Jeff Garzik7282d492006-09-13 14:30:00 -0400906static const struct ethtool_ops fs_ethtool_ops = {
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400907 .get_drvinfo = fs_get_drvinfo,
908 .get_regs_len = fs_get_regs_len,
909 .get_settings = fs_get_settings,
910 .set_settings = fs_set_settings,
911 .nway_reset = fs_nway_reset,
912 .get_link = ethtool_op_get_link,
913 .get_msglevel = fs_get_msglevel,
914 .set_msglevel = fs_set_msglevel,
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400915 .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400916 .set_sg = ethtool_op_set_sg,
917 .get_regs = fs_get_regs,
918};
919
920static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
921{
922 struct fs_enet_private *fep = netdev_priv(dev);
923 struct mii_ioctl_data *mii = (struct mii_ioctl_data *)&rq->ifr_data;
924 unsigned long flags;
925 int rc;
926
927 if (!netif_running(dev))
928 return -EINVAL;
929
930 spin_lock_irqsave(&fep->lock, flags);
Vitaly Bordug5b4b8452006-08-14 23:00:30 -0700931 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400932 spin_unlock_irqrestore(&fep->lock, flags);
933 return rc;
934}
935
936extern int fs_mii_connect(struct net_device *dev);
937extern void fs_mii_disconnect(struct net_device *dev);
938
939static struct net_device *fs_init_instance(struct device *dev,
Vitaly Bordug611a15a2006-09-21 22:38:05 +0400940 struct fs_platform_info *fpi)
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400941{
942 struct net_device *ndev = NULL;
943 struct fs_enet_private *fep = NULL;
944 int privsize, i, r, err = 0, registered = 0;
945
Vitaly Bordug611a15a2006-09-21 22:38:05 +0400946 fpi->fs_no = fs_get_id(fpi);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400947 /* guard */
948 if ((unsigned int)fpi->fs_no >= FS_MAX_INDEX)
949 return ERR_PTR(-EINVAL);
950
951 privsize = sizeof(*fep) + (sizeof(struct sk_buff **) *
952 (fpi->rx_ring + fpi->tx_ring));
953
954 ndev = alloc_etherdev(privsize);
955 if (!ndev) {
956 err = -ENOMEM;
957 goto err;
958 }
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400959
960 fep = netdev_priv(ndev);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400961
962 fep->dev = dev;
963 dev_set_drvdata(dev, ndev);
964 fep->fpi = fpi;
965 if (fpi->init_ioports)
Vitaly Bordugd3465c92006-09-21 22:38:05 +0400966 fpi->init_ioports((struct fs_platform_info *)fpi);
Pantelis Antoniou48257c42005-10-28 16:25:58 -0400967
968#ifdef CONFIG_FS_ENET_HAS_FEC
969 if (fs_get_fec_index(fpi->fs_no) >= 0)
970 fep->ops = &fs_fec_ops;
971#endif
972
973#ifdef CONFIG_FS_ENET_HAS_SCC
974 if (fs_get_scc_index(fpi->fs_no) >=0 )
975 fep->ops = &fs_scc_ops;
976#endif
977
978#ifdef CONFIG_FS_ENET_HAS_FCC
979 if (fs_get_fcc_index(fpi->fs_no) >= 0)
980 fep->ops = &fs_fcc_ops;
981#endif
982
983 if (fep->ops == NULL) {
984 printk(KERN_ERR DRV_MODULE_NAME
985 ": %s No matching ops found (%d).\n",
986 ndev->name, fpi->fs_no);
987 err = -EINVAL;
988 goto err;
989 }
990
991 r = (*fep->ops->setup_data)(ndev);
992 if (r != 0) {
993 printk(KERN_ERR DRV_MODULE_NAME
994 ": %s setup_data failed\n",
995 ndev->name);
996 err = r;
997 goto err;
998 }
999
1000 /* point rx_skbuff, tx_skbuff */
1001 fep->rx_skbuff = (struct sk_buff **)&fep[1];
1002 fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring;
1003
1004 /* init locks */
1005 spin_lock_init(&fep->lock);
1006 spin_lock_init(&fep->tx_lock);
1007
1008 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001009 * Set the Ethernet address.
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001010 */
1011 for (i = 0; i < 6; i++)
1012 ndev->dev_addr[i] = fpi->macaddr[i];
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001013
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001014 r = (*fep->ops->allocate_bd)(ndev);
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001015
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001016 if (fep->ring_base == NULL) {
1017 printk(KERN_ERR DRV_MODULE_NAME
1018 ": %s buffer descriptor alloc failed (%d).\n", ndev->name, r);
1019 err = r;
1020 goto err;
1021 }
1022
1023 /*
1024 * Set receive and transmit descriptor base.
1025 */
1026 fep->rx_bd_base = fep->ring_base;
1027 fep->tx_bd_base = fep->rx_bd_base + fpi->rx_ring;
1028
1029 /* initialize ring size variables */
1030 fep->tx_ring = fpi->tx_ring;
1031 fep->rx_ring = fpi->rx_ring;
1032
1033 /*
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001034 * The FEC Ethernet specific entries in the device structure.
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001035 */
1036 ndev->open = fs_enet_open;
1037 ndev->hard_start_xmit = fs_enet_start_xmit;
1038 ndev->tx_timeout = fs_timeout;
1039 ndev->watchdog_timeo = 2 * HZ;
1040 ndev->stop = fs_enet_close;
1041 ndev->get_stats = fs_enet_get_stats;
1042 ndev->set_multicast_list = fs_set_multicast_list;
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001043
1044#ifdef CONFIG_NET_POLL_CONTROLLER
1045 ndev->poll_controller = fs_enet_netpoll;
1046#endif
1047
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001048 netif_napi_add(ndev, &fep->napi,
1049 fs_enet_rx_napi, fpi->napi_weight);
1050
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001051 ndev->ethtool_ops = &fs_ethtool_ops;
1052 ndev->do_ioctl = fs_ioctl;
1053
1054 init_timer(&fep->phy_timer_list);
1055
1056 netif_carrier_off(ndev);
1057
1058 err = register_netdev(ndev);
1059 if (err != 0) {
1060 printk(KERN_ERR DRV_MODULE_NAME
1061 ": %s register_netdev failed.\n", ndev->name);
1062 goto err;
1063 }
1064 registered = 1;
1065
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001066
1067 return ndev;
1068
1069 err:
1070 if (ndev != NULL) {
1071
1072 if (registered)
1073 unregister_netdev(ndev);
1074
1075 if (fep != NULL) {
1076 (*fep->ops->free_bd)(ndev);
1077 (*fep->ops->cleanup_data)(ndev);
1078 }
1079
1080 free_netdev(ndev);
1081 }
1082
1083 dev_set_drvdata(dev, NULL);
1084
1085 return ERR_PTR(err);
1086}
1087
1088static int fs_cleanup_instance(struct net_device *ndev)
1089{
1090 struct fs_enet_private *fep;
1091 const struct fs_platform_info *fpi;
1092 struct device *dev;
1093
1094 if (ndev == NULL)
1095 return -EINVAL;
1096
1097 fep = netdev_priv(ndev);
1098 if (fep == NULL)
1099 return -EINVAL;
1100
1101 fpi = fep->fpi;
1102
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001103 unregister_netdev(ndev);
1104
1105 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
1106 fep->ring_base, fep->ring_mem_addr);
1107
1108 /* reset it */
1109 (*fep->ops->cleanup_data)(ndev);
1110
1111 dev = fep->dev;
1112 if (dev != NULL) {
1113 dev_set_drvdata(dev, NULL);
1114 fep->dev = NULL;
1115 }
1116
1117 free_netdev(ndev);
1118
1119 return 0;
1120}
1121
1122/**************************************************************************************/
1123
1124/* handy pointer to the immap */
1125void *fs_enet_immap = NULL;
1126
1127static int setup_immap(void)
1128{
1129 phys_addr_t paddr = 0;
1130 unsigned long size = 0;
1131
1132#ifdef CONFIG_CPM1
1133 paddr = IMAP_ADDR;
1134 size = 0x10000; /* map 64K */
1135#endif
1136
1137#ifdef CONFIG_CPM2
1138 paddr = CPM_MAP_ADDR;
1139 size = 0x40000; /* map 256 K */
1140#endif
1141 fs_enet_immap = ioremap(paddr, size);
1142 if (fs_enet_immap == NULL)
1143 return -EBADF; /* XXX ahem; maybe just BUG_ON? */
1144
1145 return 0;
1146}
1147
1148static void cleanup_immap(void)
1149{
1150 if (fs_enet_immap != NULL) {
1151 iounmap(fs_enet_immap);
1152 fs_enet_immap = NULL;
1153 }
1154}
1155
1156/**************************************************************************************/
1157
1158static int __devinit fs_enet_probe(struct device *dev)
1159{
1160 struct net_device *ndev;
1161
1162 /* no fixup - no device */
1163 if (dev->platform_data == NULL) {
1164 printk(KERN_INFO "fs_enet: "
1165 "probe called with no platform data; "
1166 "remove unused devices\n");
1167 return -ENODEV;
1168 }
1169
1170 ndev = fs_init_instance(dev, dev->platform_data);
1171 if (IS_ERR(ndev))
1172 return PTR_ERR(ndev);
1173 return 0;
1174}
1175
1176static int fs_enet_remove(struct device *dev)
1177{
1178 return fs_cleanup_instance(dev_get_drvdata(dev));
1179}
1180
1181static struct device_driver fs_enet_fec_driver = {
1182 .name = "fsl-cpm-fec",
1183 .bus = &platform_bus_type,
1184 .probe = fs_enet_probe,
1185 .remove = fs_enet_remove,
1186#ifdef CONFIG_PM
1187/* .suspend = fs_enet_suspend, TODO */
1188/* .resume = fs_enet_resume, TODO */
1189#endif
1190};
1191
1192static struct device_driver fs_enet_scc_driver = {
1193 .name = "fsl-cpm-scc",
1194 .bus = &platform_bus_type,
1195 .probe = fs_enet_probe,
1196 .remove = fs_enet_remove,
1197#ifdef CONFIG_PM
1198/* .suspend = fs_enet_suspend, TODO */
1199/* .resume = fs_enet_resume, TODO */
1200#endif
1201};
1202
1203static struct device_driver fs_enet_fcc_driver = {
1204 .name = "fsl-cpm-fcc",
1205 .bus = &platform_bus_type,
1206 .probe = fs_enet_probe,
1207 .remove = fs_enet_remove,
1208#ifdef CONFIG_PM
1209/* .suspend = fs_enet_suspend, TODO */
1210/* .resume = fs_enet_resume, TODO */
1211#endif
1212};
1213
1214static int __init fs_init(void)
1215{
1216 int r;
1217
1218 printk(KERN_INFO
1219 "%s", version);
1220
1221 r = setup_immap();
1222 if (r != 0)
1223 return r;
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001224
Vitaly Bordug5b4b8452006-08-14 23:00:30 -07001225#ifdef CONFIG_FS_ENET_HAS_FCC
1226 /* let's insert mii stuff */
1227 r = fs_enet_mdio_bb_init();
1228
1229 if (r != 0) {
1230 printk(KERN_ERR DRV_MODULE_NAME
1231 "BB PHY init failed.\n");
1232 return r;
1233 }
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001234 r = driver_register(&fs_enet_fcc_driver);
1235 if (r != 0)
1236 goto err;
Vitaly Bordug5b4b8452006-08-14 23:00:30 -07001237#endif
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001238
Vitaly Bordug5b4b8452006-08-14 23:00:30 -07001239#ifdef CONFIG_FS_ENET_HAS_FEC
1240 r = fs_enet_mdio_fec_init();
1241 if (r != 0) {
1242 printk(KERN_ERR DRV_MODULE_NAME
1243 "FEC PHY init failed.\n");
1244 return r;
1245 }
1246
1247 r = driver_register(&fs_enet_fec_driver);
1248 if (r != 0)
1249 goto err;
1250#endif
1251
1252#ifdef CONFIG_FS_ENET_HAS_SCC
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001253 r = driver_register(&fs_enet_scc_driver);
1254 if (r != 0)
1255 goto err;
Vitaly Bordug5b4b8452006-08-14 23:00:30 -07001256#endif
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001257
1258 return 0;
1259err:
1260 cleanup_immap();
1261 return r;
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001262
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001263}
1264
1265static void __exit fs_cleanup(void)
1266{
1267 driver_unregister(&fs_enet_fec_driver);
1268 driver_unregister(&fs_enet_fcc_driver);
1269 driver_unregister(&fs_enet_scc_driver);
1270 cleanup_immap();
1271}
1272
Vitaly Bordug9b8ee8e2007-09-18 20:05:35 +04001273#ifdef CONFIG_NET_POLL_CONTROLLER
1274static void fs_enet_netpoll(struct net_device *dev)
1275{
1276 disable_irq(dev->irq);
1277 fs_enet_interrupt(dev->irq, dev, NULL);
1278 enable_irq(dev->irq);
1279}
1280#endif
1281
Pantelis Antoniou48257c42005-10-28 16:25:58 -04001282/**************************************************************************************/
1283
1284module_init(fs_init);
1285module_exit(fs_cleanup);