blob: f752e5fc65ba31d43a7267a5cb721275dbf0e89a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Eugene Surovegin37448f72005-10-10 16:58:14 -07002 * drivers/net/ibm_emac/ibm_emac_core.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Eugene Surovegin37448f72005-10-10 16:58:14 -07004 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Based on original work by
Eugene Surovegin37448f72005-10-10 16:58:14 -070010 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
Eugene Surovegin37448f72005-10-10 16:58:14 -070019 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Eugene Surovegin37448f72005-10-10 16:58:14 -070021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/module.h>
23#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/init.h>
29#include <linux/types.h>
Eugene Surovegin37448f72005-10-10 16:58:14 -070030#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/crc32.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/bitops.h>
37
38#include <asm/processor.h>
39#include <asm/io.h>
40#include <asm/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/uaccess.h>
42#include <asm/ocp.h>
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include "ibm_emac_core.h"
Eugene Surovegin37448f72005-10-10 16:58:14 -070045#include "ibm_emac_debug.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
47/*
Eugene Surovegin37448f72005-10-10 16:58:14 -070048 * Lack of dma_unmap_???? calls is intentional.
49 *
50 * API-correct usage requires additional support state information to be
51 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
52 * EMAC design (e.g. TX buffer passed from network stack can be split into
53 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
54 * maintaining such information will add additional overhead.
55 * Current DMA API implementation for 4xx processors only ensures cache coherency
56 * and dma_unmap_???? routines are empty and are likely to stay this way.
57 * I decided to omit dma_unmap_??? calls because I don't want to add additional
58 * complexity just for the sake of following some abstract API, when it doesn't
59 * add any real benefit to the driver. I understand that this decision maybe
60 * controversial, but I really tried to make code API-correct and efficient
61 * at the same time and didn't come up with code I liked :(. --ebs
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Eugene Surovegin37448f72005-10-10 16:58:14 -070064#define DRV_NAME "emac"
Eugene Surovegin8169bd9192005-11-24 14:48:40 -080065#define DRV_VERSION "3.54"
Eugene Surovegin37448f72005-10-10 16:58:14 -070066#define DRV_DESC "PPC 4xx OCP EMAC driver"
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068MODULE_DESCRIPTION(DRV_DESC);
Eugene Surovegin37448f72005-10-10 16:58:14 -070069MODULE_AUTHOR
70 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
Linus Torvalds1da177e2005-04-16 15:20:36 -070071MODULE_LICENSE("GPL");
72
Eugene Surovegin37448f72005-10-10 16:58:14 -070073/* minimum number of free TX descriptors required to wake up TX process */
74#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Eugene Surovegin37448f72005-10-10 16:58:14 -070076/* If packet size is less than this number, we allocate small skb and copy packet
77 * contents into it instead of just sending original big skb up
78 */
79#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81/* Since multiple EMACs share MDIO lines in various ways, we need
82 * to avoid re-using the same PHY ID in cases where the arch didn't
83 * setup precise phy_map entries
84 */
Eugene Surovegin37448f72005-10-10 16:58:14 -070085static u32 busy_phy_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Eugene Surovegin1b195912005-10-29 12:45:31 -070087#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
88 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
Eugene Surovegin37448f72005-10-10 16:58:14 -070089/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
90 * with PHY RX clock problem.
Eugene Surovegin1b195912005-10-29 12:45:31 -070091 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
Eugene Surovegin37448f72005-10-10 16:58:14 -070092 * also allows controlling each EMAC clock
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 */
Eugene Surovegin37448f72005-10-10 16:58:14 -070094static inline void EMAC_RX_CLK_TX(int idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 unsigned long flags;
Eugene Surovegin37448f72005-10-10 16:58:14 -070097 local_irq_save(flags);
98
99#if defined(CONFIG_405EP)
100 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
Eugene Surovegin1b195912005-10-29 12:45:31 -0700101#else /* CONFIG_440EP || CONFIG_440GR */
Eugene Surovegin37448f72005-10-10 16:58:14 -0700102 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
103#endif
104
105 local_irq_restore(flags);
106}
107
108static inline void EMAC_RX_CLK_DEFAULT(int idx)
109{
110 unsigned long flags;
111 local_irq_save(flags);
112
113#if defined(CONFIG_405EP)
114 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
115#else /* CONFIG_440EP */
116 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
117#endif
118
119 local_irq_restore(flags);
120}
121#else
122#define EMAC_RX_CLK_TX(idx) ((void)0)
123#define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
124#endif
125
126#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
127/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
128 * unfortunately this is less flexible than 440EP case, because it's a global
129 * setting for all EMACs, therefore we do this clock trick only during probe.
130 */
131#define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
132 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
133#define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
134 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
135#else
136#define EMAC_CLK_INTERNAL ((void)0)
137#define EMAC_CLK_EXTERNAL ((void)0)
138#endif
139
140/* I don't want to litter system log with timeout errors
141 * when we have brain-damaged PHY.
142 */
143static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
144 const char *error)
145{
146#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
147 DBG("%d: %s" NL, dev->def->index, error);
148#else
149 if (net_ratelimit())
150 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
151#endif
152}
153
154/* PHY polling intervals */
155#define PHY_POLL_LINK_ON HZ
156#define PHY_POLL_LINK_OFF (HZ / 5)
157
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800158/* Graceful stop timeouts in us.
159 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
160 */
161#define STOP_TIMEOUT_10 1230
162#define STOP_TIMEOUT_100 124
163#define STOP_TIMEOUT_1000 13
164#define STOP_TIMEOUT_1000_JUMBO 73
165
Eugene Surovegin37448f72005-10-10 16:58:14 -0700166/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
167static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
168 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
169 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
170 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
171 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
172 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
173 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
174 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
175 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
176 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
177 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
178 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
179 "tx_bd_excessive_collisions", "tx_bd_late_collision",
180 "tx_bd_multple_collisions", "tx_bd_single_collision",
181 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
182 "tx_errors"
183};
184
David Howells7d12e782006-10-05 14:55:46 +0100185static irqreturn_t emac_irq(int irq, void *dev_instance);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700186static void emac_clean_tx_ring(struct ocp_enet_private *dev);
187
188static inline int emac_phy_supports_gige(int phy_mode)
189{
190 return phy_mode == PHY_MODE_GMII ||
191 phy_mode == PHY_MODE_RGMII ||
192 phy_mode == PHY_MODE_TBI ||
193 phy_mode == PHY_MODE_RTBI;
194}
195
196static inline int emac_phy_gpcs(int phy_mode)
197{
198 return phy_mode == PHY_MODE_TBI ||
199 phy_mode == PHY_MODE_RTBI;
200}
201
202static inline void emac_tx_enable(struct ocp_enet_private *dev)
203{
Al Virob43de2d2005-12-01 10:15:21 -0500204 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700205 unsigned long flags;
206 u32 r;
207
208 local_irq_save(flags);
209
210 DBG("%d: tx_enable" NL, dev->def->index);
211
212 r = in_be32(&p->mr0);
213 if (!(r & EMAC_MR0_TXE))
214 out_be32(&p->mr0, r | EMAC_MR0_TXE);
215 local_irq_restore(flags);
216}
217
218static void emac_tx_disable(struct ocp_enet_private *dev)
219{
Al Virob43de2d2005-12-01 10:15:21 -0500220 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700221 unsigned long flags;
222 u32 r;
223
224 local_irq_save(flags);
225
226 DBG("%d: tx_disable" NL, dev->def->index);
227
228 r = in_be32(&p->mr0);
229 if (r & EMAC_MR0_TXE) {
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800230 int n = dev->stop_timeout;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700231 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800232 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
233 udelay(1);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700234 --n;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800235 }
Eugene Surovegin37448f72005-10-10 16:58:14 -0700236 if (unlikely(!n))
237 emac_report_timeout_error(dev, "TX disable timeout");
238 }
239 local_irq_restore(flags);
240}
241
242static void emac_rx_enable(struct ocp_enet_private *dev)
243{
Al Virob43de2d2005-12-01 10:15:21 -0500244 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700245 unsigned long flags;
246 u32 r;
247
248 local_irq_save(flags);
249 if (unlikely(dev->commac.rx_stopped))
250 goto out;
251
252 DBG("%d: rx_enable" NL, dev->def->index);
253
254 r = in_be32(&p->mr0);
255 if (!(r & EMAC_MR0_RXE)) {
256 if (unlikely(!(r & EMAC_MR0_RXI))) {
257 /* Wait if previous async disable is still in progress */
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800258 int n = dev->stop_timeout;
259 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
260 udelay(1);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700261 --n;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800262 }
Eugene Surovegin37448f72005-10-10 16:58:14 -0700263 if (unlikely(!n))
264 emac_report_timeout_error(dev,
265 "RX disable timeout");
266 }
267 out_be32(&p->mr0, r | EMAC_MR0_RXE);
268 }
269 out:
270 local_irq_restore(flags);
271}
272
273static void emac_rx_disable(struct ocp_enet_private *dev)
274{
Al Virob43de2d2005-12-01 10:15:21 -0500275 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700276 unsigned long flags;
277 u32 r;
278
279 local_irq_save(flags);
280
281 DBG("%d: rx_disable" NL, dev->def->index);
282
283 r = in_be32(&p->mr0);
284 if (r & EMAC_MR0_RXE) {
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800285 int n = dev->stop_timeout;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700286 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800287 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
288 udelay(1);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700289 --n;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800290 }
Eugene Surovegin37448f72005-10-10 16:58:14 -0700291 if (unlikely(!n))
292 emac_report_timeout_error(dev, "RX disable timeout");
293 }
294 local_irq_restore(flags);
295}
296
297static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
298{
Al Virob43de2d2005-12-01 10:15:21 -0500299 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700300 unsigned long flags;
301 u32 r;
302
303 local_irq_save(flags);
304
305 DBG("%d: rx_disable_async" NL, dev->def->index);
306
307 r = in_be32(&p->mr0);
308 if (r & EMAC_MR0_RXE)
309 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
310 local_irq_restore(flags);
311}
312
313static int emac_reset(struct ocp_enet_private *dev)
314{
Al Virob43de2d2005-12-01 10:15:21 -0500315 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700316 unsigned long flags;
317 int n = 20;
318
319 DBG("%d: reset" NL, dev->def->index);
320
321 local_irq_save(flags);
322
323 if (!dev->reset_failed) {
324 /* 40x erratum suggests stopping RX channel before reset,
325 * we stop TX as well
326 */
327 emac_rx_disable(dev);
328 emac_tx_disable(dev);
329 }
330
331 out_be32(&p->mr0, EMAC_MR0_SRST);
332 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
333 --n;
334 local_irq_restore(flags);
335
336 if (n) {
337 dev->reset_failed = 0;
338 return 0;
339 } else {
340 emac_report_timeout_error(dev, "reset timeout");
341 dev->reset_failed = 1;
342 return -ETIMEDOUT;
343 }
344}
345
346static void emac_hash_mc(struct ocp_enet_private *dev)
347{
Al Virob43de2d2005-12-01 10:15:21 -0500348 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700349 u16 gaht[4] = { 0 };
350 struct dev_mc_list *dmi;
351
352 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
353
354 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
355 int bit;
356 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
357 dev->def->index,
358 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
359 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
360
361 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
362 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
363 }
364 out_be32(&p->gaht1, gaht[0]);
365 out_be32(&p->gaht2, gaht[1]);
366 out_be32(&p->gaht3, gaht[2]);
367 out_be32(&p->gaht4, gaht[3]);
368}
369
370static inline u32 emac_iff2rmr(struct net_device *ndev)
371{
372 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
373 EMAC_RMR_BASE;
374
375 if (ndev->flags & IFF_PROMISC)
376 r |= EMAC_RMR_PME;
377 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
378 r |= EMAC_RMR_PMME;
379 else if (ndev->mc_count > 0)
380 r |= EMAC_RMR_MAE;
381
382 return r;
383}
384
385static inline int emac_opb_mhz(void)
386{
387 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
388}
389
390/* BHs disabled */
391static int emac_configure(struct ocp_enet_private *dev)
392{
Al Virob43de2d2005-12-01 10:15:21 -0500393 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700394 struct net_device *ndev = dev->ndev;
395 int gige;
396 u32 r;
397
398 DBG("%d: configure" NL, dev->def->index);
399
400 if (emac_reset(dev) < 0)
401 return -ETIMEDOUT;
402
403 tah_reset(dev->tah_dev);
404
405 /* Mode register */
406 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
407 if (dev->phy.duplex == DUPLEX_FULL)
Eugene Surovegin38843882005-12-27 12:36:41 -0800408 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800409 dev->stop_timeout = STOP_TIMEOUT_10;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700410 switch (dev->phy.speed) {
411 case SPEED_1000:
412 if (emac_phy_gpcs(dev->phy.mode)) {
413 r |= EMAC_MR1_MF_1000GPCS |
414 EMAC_MR1_MF_IPPA(dev->phy.address);
415
416 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
417 * identify this GPCS PHY later.
418 */
419 out_be32(&p->ipcr, 0xdeadbeef);
420 } else
421 r |= EMAC_MR1_MF_1000;
422 r |= EMAC_MR1_RFS_16K;
423 gige = 1;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800424
425 if (dev->ndev->mtu > ETH_DATA_LEN) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700426 r |= EMAC_MR1_JPSM;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800427 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
428 } else
429 dev->stop_timeout = STOP_TIMEOUT_1000;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700430 break;
431 case SPEED_100:
432 r |= EMAC_MR1_MF_100;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -0800433 dev->stop_timeout = STOP_TIMEOUT_100;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700434 /* Fall through */
435 default:
436 r |= EMAC_MR1_RFS_4K;
437 gige = 0;
438 break;
439 }
440
441 if (dev->rgmii_dev)
442 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
443 dev->phy.speed);
444 else
445 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
446
447#if !defined(CONFIG_40x)
448 /* on 40x erratum forces us to NOT use integrated flow control,
449 * let's hope it works on 44x ;)
450 */
451 if (dev->phy.duplex == DUPLEX_FULL) {
452 if (dev->phy.pause)
453 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
454 else if (dev->phy.asym_pause)
455 r |= EMAC_MR1_APP;
456 }
457#endif
458 out_be32(&p->mr1, r);
459
460 /* Set individual MAC address */
461 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
462 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
463 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
464 ndev->dev_addr[5]);
465
466 /* VLAN Tag Protocol ID */
467 out_be32(&p->vtpid, 0x8100);
468
469 /* Receive mode register */
470 r = emac_iff2rmr(ndev);
471 if (r & EMAC_RMR_MAE)
472 emac_hash_mc(dev);
473 out_be32(&p->rmr, r);
474
475 /* FIFOs thresholds */
476 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
477 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
478 out_be32(&p->tmr1, r);
479 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
480
481 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
482 there should be still enough space in FIFO to allow the our link
483 partner time to process this frame and also time to send PAUSE
484 frame itself.
485
486 Here is the worst case scenario for the RX FIFO "headroom"
487 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
488
489 1) One maximum-length frame on TX 1522 bytes
490 2) One PAUSE frame time 64 bytes
491 3) PAUSE frame decode time allowance 64 bytes
492 4) One maximum-length frame on RX 1522 bytes
493 5) Round-trip propagation delay of the link (100Mb) 15 bytes
494 ----------
495 3187 bytes
496
497 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
498 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
499 */
500 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
501 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
502 out_be32(&p->rwmr, r);
503
504 /* Set PAUSE timer to the maximum */
505 out_be32(&p->ptr, 0xffff);
506
507 /* IRQ sources */
508 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
509 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
510 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
511 EMAC_ISR_IRE | EMAC_ISR_TE);
512
513 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
514 if (emac_phy_gpcs(dev->phy.mode))
515 mii_reset_phy(&dev->phy);
516
517 return 0;
518}
519
520/* BHs disabled */
521static void emac_reinitialize(struct ocp_enet_private *dev)
522{
523 DBG("%d: reinitialize" NL, dev->def->index);
524
525 if (!emac_configure(dev)) {
526 emac_tx_enable(dev);
527 emac_rx_enable(dev);
528 }
529}
530
531/* BHs disabled */
532static void emac_full_tx_reset(struct net_device *ndev)
533{
534 struct ocp_enet_private *dev = ndev->priv;
535 struct ocp_func_emac_data *emacdata = dev->def->additions;
536
537 DBG("%d: full_tx_reset" NL, dev->def->index);
538
539 emac_tx_disable(dev);
540 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
541 emac_clean_tx_ring(dev);
542 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
543
544 emac_configure(dev);
545
546 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
547 emac_tx_enable(dev);
548 emac_rx_enable(dev);
549
550 netif_wake_queue(ndev);
551}
552
553static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
554{
Al Virob43de2d2005-12-01 10:15:21 -0500555 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700556 u32 r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 int n;
558
Eugene Surovegin37448f72005-10-10 16:58:14 -0700559 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560
Eugene Surovegin37448f72005-10-10 16:58:14 -0700561 /* Enable proper MDIO port */
562 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Eugene Surovegin37448f72005-10-10 16:58:14 -0700564 /* Wait for management interface to become idle */
565 n = 10;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700566 while (!emac_phy_done(in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700567 udelay(1);
568 if (!--n)
569 goto to;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571
Eugene Surovegin37448f72005-10-10 16:58:14 -0700572 /* Issue read command */
573 out_be32(&p->stacr,
574 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
575 (reg & EMAC_STACR_PRA_MASK)
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700576 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
577 | EMAC_STACR_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Eugene Surovegin37448f72005-10-10 16:58:14 -0700579 /* Wait for read to complete */
580 n = 100;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700581 while (!emac_phy_done(r = in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700582 udelay(1);
583 if (!--n)
584 goto to;
585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
Eugene Surovegin37448f72005-10-10 16:58:14 -0700587 if (unlikely(r & EMAC_STACR_PHYE)) {
588 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
589 id, reg);
590 return -EREMOTEIO;
591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Eugene Surovegin37448f72005-10-10 16:58:14 -0700593 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
594 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
595 return r;
596 to:
597 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
598 return -ETIMEDOUT;
599}
600
601static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
602 u16 val)
603{
Al Virob43de2d2005-12-01 10:15:21 -0500604 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700605 int n;
606
607 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
608 val);
609
610 /* Enable proper MDIO port */
611 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
612
613 /* Wait for management interface to be idle */
614 n = 10;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700615 while (!emac_phy_done(in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700616 udelay(1);
617 if (!--n)
618 goto to;
619 }
620
621 /* Issue write command */
622 out_be32(&p->stacr,
623 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
624 (reg & EMAC_STACR_PRA_MASK) |
625 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700626 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700627
628 /* Wait for write to complete */
629 n = 100;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700630 while (!emac_phy_done(in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700631 udelay(1);
632 if (!--n)
633 goto to;
634 }
635 return;
636 to:
637 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
638}
639
640static int emac_mdio_read(struct net_device *ndev, int id, int reg)
641{
642 struct ocp_enet_private *dev = ndev->priv;
643 int res;
644
645 local_bh_disable();
646 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
647 (u8) reg);
648 local_bh_enable();
649 return res;
650}
651
652static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
653{
654 struct ocp_enet_private *dev = ndev->priv;
655
656 local_bh_disable();
657 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
658 (u8) reg, (u16) val);
659 local_bh_enable();
660}
661
662/* BHs disabled */
663static void emac_set_multicast_list(struct net_device *ndev)
664{
665 struct ocp_enet_private *dev = ndev->priv;
Al Virob43de2d2005-12-01 10:15:21 -0500666 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700667 u32 rmr = emac_iff2rmr(ndev);
668
669 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
670 BUG_ON(!netif_running(dev->ndev));
671
672 /* I decided to relax register access rules here to avoid
673 * full EMAC reset.
674 *
675 * There is a real problem with EMAC4 core if we use MWSW_001 bit
676 * in MR1 register and do a full EMAC reset.
677 * One TX BD status update is delayed and, after EMAC reset, it
678 * never happens, resulting in TX hung (it'll be recovered by TX
679 * timeout handler eventually, but this is just gross).
680 * So we either have to do full TX reset or try to cheat here :)
681 *
682 * The only required change is to RX mode register, so I *think* all
683 * we need is just to stop RX channel. This seems to work on all
684 * tested SoCs. --ebs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 */
Eugene Surovegin37448f72005-10-10 16:58:14 -0700686 emac_rx_disable(dev);
687 if (rmr & EMAC_RMR_MAE)
688 emac_hash_mc(dev);
689 out_be32(&p->rmr, rmr);
690 emac_rx_enable(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691}
692
Eugene Surovegin37448f72005-10-10 16:58:14 -0700693/* BHs disabled */
694static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
Eugene Surovegin37448f72005-10-10 16:58:14 -0700696 struct ocp_func_emac_data *emacdata = dev->def->additions;
697 int rx_sync_size = emac_rx_sync_size(new_mtu);
698 int rx_skb_size = emac_rx_skb_size(new_mtu);
699 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Eugene Surovegin37448f72005-10-10 16:58:14 -0700701 emac_rx_disable(dev);
702 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
Eugene Surovegin37448f72005-10-10 16:58:14 -0700704 if (dev->rx_sg_skb) {
705 ++dev->estats.rx_dropped_resize;
706 dev_kfree_skb(dev->rx_sg_skb);
707 dev->rx_sg_skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
709
Eugene Surovegin37448f72005-10-10 16:58:14 -0700710 /* Make a first pass over RX ring and mark BDs ready, dropping
711 * non-processed packets on the way. We need this as a separate pass
712 * to simplify error recovery in the case of allocation failure later.
713 */
714 for (i = 0; i < NUM_RX_BUFF; ++i) {
715 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
716 ++dev->estats.rx_dropped_resize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
Eugene Surovegin37448f72005-10-10 16:58:14 -0700718 dev->rx_desc[i].data_len = 0;
719 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
720 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
721 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Eugene Surovegin37448f72005-10-10 16:58:14 -0700723 /* Reallocate RX ring only if bigger skb buffers are required */
724 if (rx_skb_size <= dev->rx_skb_size)
725 goto skip;
726
727 /* Second pass, allocate new skbs */
728 for (i = 0; i < NUM_RX_BUFF; ++i) {
729 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
730 if (!skb) {
731 ret = -ENOMEM;
732 goto oom;
733 }
734
735 BUG_ON(!dev->rx_skb[i]);
736 dev_kfree_skb(dev->rx_skb[i]);
737
738 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
739 dev->rx_desc[i].data_ptr =
740 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
741 DMA_FROM_DEVICE) + 2;
742 dev->rx_skb[i] = skb;
743 }
744 skip:
745 /* Check if we need to change "Jumbo" bit in MR1 */
746 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
747 /* This is to prevent starting RX channel in emac_rx_enable() */
748 dev->commac.rx_stopped = 1;
749
750 dev->ndev->mtu = new_mtu;
751 emac_full_tx_reset(dev->ndev);
752 }
753
754 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
755 oom:
756 /* Restart RX */
757 dev->commac.rx_stopped = dev->rx_slot = 0;
758 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
759 emac_rx_enable(dev);
760
761 return ret;
762}
763
764/* Process ctx, rtnl_lock semaphore */
765static int emac_change_mtu(struct net_device *ndev, int new_mtu)
766{
767 struct ocp_enet_private *dev = ndev->priv;
768 int ret = 0;
769
770 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
771 return -EINVAL;
772
773 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
774
775 local_bh_disable();
776 if (netif_running(ndev)) {
777 /* Check if we really need to reinitalize RX ring */
778 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
779 ret = emac_resize_rx_ring(dev, new_mtu);
780 }
781
782 if (!ret) {
783 ndev->mtu = new_mtu;
784 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
785 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
786 }
787 local_bh_enable();
788
789 return ret;
790}
791
792static void emac_clean_tx_ring(struct ocp_enet_private *dev)
793{
794 int i;
795 for (i = 0; i < NUM_TX_BUFF; ++i) {
796 if (dev->tx_skb[i]) {
797 dev_kfree_skb(dev->tx_skb[i]);
798 dev->tx_skb[i] = NULL;
799 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
800 ++dev->estats.tx_dropped;
801 }
802 dev->tx_desc[i].ctrl = 0;
803 dev->tx_desc[i].data_ptr = 0;
804 }
805}
806
807static void emac_clean_rx_ring(struct ocp_enet_private *dev)
808{
809 int i;
810 for (i = 0; i < NUM_RX_BUFF; ++i)
811 if (dev->rx_skb[i]) {
812 dev->rx_desc[i].ctrl = 0;
813 dev_kfree_skb(dev->rx_skb[i]);
814 dev->rx_skb[i] = NULL;
815 dev->rx_desc[i].data_ptr = 0;
816 }
817
818 if (dev->rx_sg_skb) {
819 dev_kfree_skb(dev->rx_sg_skb);
820 dev->rx_sg_skb = NULL;
821 }
822}
823
824static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
Al Virob43de2d2005-12-01 10:15:21 -0500825 gfp_t flags)
Eugene Surovegin37448f72005-10-10 16:58:14 -0700826{
827 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
828 if (unlikely(!skb))
829 return -ENOMEM;
830
831 dev->rx_skb[slot] = skb;
832 dev->rx_desc[slot].data_len = 0;
833
834 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
835 dev->rx_desc[slot].data_ptr =
836 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
837 DMA_FROM_DEVICE) + 2;
838 barrier();
839 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
840 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
841
842 return 0;
843}
844
845static void emac_print_link_status(struct ocp_enet_private *dev)
846{
847 if (netif_carrier_ok(dev->ndev))
848 printk(KERN_INFO "%s: link is up, %d %s%s\n",
849 dev->ndev->name, dev->phy.speed,
850 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
851 dev->phy.pause ? ", pause enabled" :
852 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
853 else
854 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
855}
856
857/* Process ctx, rtnl_lock semaphore */
858static int emac_open(struct net_device *ndev)
859{
860 struct ocp_enet_private *dev = ndev->priv;
861 struct ocp_func_emac_data *emacdata = dev->def->additions;
862 int err, i;
863
864 DBG("%d: open" NL, dev->def->index);
865
866 /* Setup error IRQ handler */
867 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
868 if (err) {
869 printk(KERN_ERR "%s: failed to request IRQ %d\n",
870 ndev->name, dev->def->irq);
871 return err;
872 }
873
874 /* Allocate RX ring */
875 for (i = 0; i < NUM_RX_BUFF; ++i)
876 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
877 printk(KERN_ERR "%s: failed to allocate RX ring\n",
878 ndev->name);
879 goto oom;
880 }
881
882 local_bh_disable();
883 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
884 dev->commac.rx_stopped = 0;
885 dev->rx_sg_skb = NULL;
886
887 if (dev->phy.address >= 0) {
888 int link_poll_interval;
889 if (dev->phy.def->ops->poll_link(&dev->phy)) {
890 dev->phy.def->ops->read_link(&dev->phy);
891 EMAC_RX_CLK_DEFAULT(dev->def->index);
892 netif_carrier_on(dev->ndev);
893 link_poll_interval = PHY_POLL_LINK_ON;
894 } else {
895 EMAC_RX_CLK_TX(dev->def->index);
896 netif_carrier_off(dev->ndev);
897 link_poll_interval = PHY_POLL_LINK_OFF;
898 }
899 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
900 emac_print_link_status(dev);
901 } else
902 netif_carrier_on(dev->ndev);
903
904 emac_configure(dev);
905 mal_poll_add(dev->mal, &dev->commac);
906 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
907 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
908 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
909 emac_tx_enable(dev);
910 emac_rx_enable(dev);
911 netif_start_queue(ndev);
912 local_bh_enable();
913
914 return 0;
915 oom:
916 emac_clean_rx_ring(dev);
917 free_irq(dev->def->irq, dev);
918 return -ENOMEM;
919}
920
921/* BHs disabled */
922static int emac_link_differs(struct ocp_enet_private *dev)
923{
924 u32 r = in_be32(&dev->emacp->mr1);
925
926 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
927 int speed, pause, asym_pause;
928
Eugene Surovegindbf2e852007-05-16 12:01:05 -0700929 if (r & EMAC_MR1_MF_1000)
Eugene Surovegin37448f72005-10-10 16:58:14 -0700930 speed = SPEED_1000;
931 else if (r & EMAC_MR1_MF_100)
932 speed = SPEED_100;
933 else
934 speed = SPEED_10;
935
936 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
937 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
938 pause = 1;
939 asym_pause = 0;
940 break;
941 case EMAC_MR1_APP:
942 pause = 0;
943 asym_pause = 1;
944 break;
945 default:
946 pause = asym_pause = 0;
947 }
948 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
949 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
950}
951
952/* BHs disabled */
953static void emac_link_timer(unsigned long data)
954{
955 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
956 int link_poll_interval;
957
958 DBG2("%d: link timer" NL, dev->def->index);
959
960 if (dev->phy.def->ops->poll_link(&dev->phy)) {
961 if (!netif_carrier_ok(dev->ndev)) {
962 EMAC_RX_CLK_DEFAULT(dev->def->index);
963
964 /* Get new link parameters */
965 dev->phy.def->ops->read_link(&dev->phy);
966
967 if (dev->tah_dev || emac_link_differs(dev))
968 emac_full_tx_reset(dev->ndev);
969
970 netif_carrier_on(dev->ndev);
971 emac_print_link_status(dev);
972 }
973 link_poll_interval = PHY_POLL_LINK_ON;
974 } else {
975 if (netif_carrier_ok(dev->ndev)) {
976 EMAC_RX_CLK_TX(dev->def->index);
977#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
978 emac_reinitialize(dev);
979#endif
980 netif_carrier_off(dev->ndev);
981 emac_print_link_status(dev);
982 }
983
984 /* Retry reset if the previous attempt failed.
985 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
986 * case, but I left it here because it shouldn't trigger for
987 * sane PHYs anyway.
988 */
989 if (unlikely(dev->reset_failed))
990 emac_reinitialize(dev);
991
992 link_poll_interval = PHY_POLL_LINK_OFF;
993 }
994 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
995}
996
997/* BHs disabled */
998static void emac_force_link_update(struct ocp_enet_private *dev)
999{
1000 netif_carrier_off(dev->ndev);
1001 if (timer_pending(&dev->link_timer))
1002 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1003}
1004
1005/* Process ctx, rtnl_lock semaphore */
1006static int emac_close(struct net_device *ndev)
1007{
1008 struct ocp_enet_private *dev = ndev->priv;
1009 struct ocp_func_emac_data *emacdata = dev->def->additions;
1010
1011 DBG("%d: close" NL, dev->def->index);
1012
1013 local_bh_disable();
1014
1015 if (dev->phy.address >= 0)
1016 del_timer_sync(&dev->link_timer);
1017
1018 netif_stop_queue(ndev);
1019 emac_rx_disable(dev);
1020 emac_tx_disable(dev);
1021 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1022 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1023 mal_poll_del(dev->mal, &dev->commac);
1024 local_bh_enable();
1025
1026 emac_clean_tx_ring(dev);
1027 emac_clean_rx_ring(dev);
1028 free_irq(dev->def->irq, dev);
1029
1030 return 0;
1031}
1032
1033static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1034 struct sk_buff *skb)
1035{
1036#if defined(CONFIG_IBM_EMAC_TAH)
Patrick McHardy84fa7932006-08-29 16:44:56 -07001037 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Eugene Surovegin37448f72005-10-10 16:58:14 -07001038 ++dev->stats.tx_packets_csum;
1039 return EMAC_TX_CTRL_TAH_CSUM;
1040 }
1041#endif
1042 return 0;
1043}
1044
1045static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1046{
Al Virob43de2d2005-12-01 10:15:21 -05001047 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001048 struct net_device *ndev = dev->ndev;
1049
1050 /* Send the packet out */
1051 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1052
1053 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1054 netif_stop_queue(ndev);
1055 DBG2("%d: stopped TX queue" NL, dev->def->index);
1056 }
1057
1058 ndev->trans_start = jiffies;
1059 ++dev->stats.tx_packets;
1060 dev->stats.tx_bytes += len;
1061
1062 return 0;
1063}
1064
1065/* BHs disabled */
1066static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1067{
1068 struct ocp_enet_private *dev = ndev->priv;
1069 unsigned int len = skb->len;
1070 int slot;
1071
1072 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1073 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1074
1075 slot = dev->tx_slot++;
1076 if (dev->tx_slot == NUM_TX_BUFF) {
1077 dev->tx_slot = 0;
1078 ctrl |= MAL_TX_CTRL_WRAP;
1079 }
1080
1081 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1082
1083 dev->tx_skb[slot] = skb;
1084 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1085 DMA_TO_DEVICE);
1086 dev->tx_desc[slot].data_len = (u16) len;
1087 barrier();
1088 dev->tx_desc[slot].ctrl = ctrl;
1089
1090 return emac_xmit_finish(dev, len);
1091}
1092
1093#if defined(CONFIG_IBM_EMAC_TAH)
1094static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1095 u32 pd, int len, int last, u16 base_ctrl)
1096{
1097 while (1) {
1098 u16 ctrl = base_ctrl;
1099 int chunk = min(len, MAL_MAX_TX_SIZE);
1100 len -= chunk;
1101
1102 slot = (slot + 1) % NUM_TX_BUFF;
1103
1104 if (last && !len)
1105 ctrl |= MAL_TX_CTRL_LAST;
1106 if (slot == NUM_TX_BUFF - 1)
1107 ctrl |= MAL_TX_CTRL_WRAP;
1108
1109 dev->tx_skb[slot] = NULL;
1110 dev->tx_desc[slot].data_ptr = pd;
1111 dev->tx_desc[slot].data_len = (u16) chunk;
1112 dev->tx_desc[slot].ctrl = ctrl;
1113 ++dev->tx_cnt;
1114
1115 if (!len)
1116 break;
1117
1118 pd += chunk;
1119 }
1120 return slot;
1121}
1122
1123/* BHs disabled (SG version for TAH equipped EMACs) */
1124static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1125{
1126 struct ocp_enet_private *dev = ndev->priv;
1127 int nr_frags = skb_shinfo(skb)->nr_frags;
1128 int len = skb->len, chunk;
1129 int slot, i;
1130 u16 ctrl;
1131 u32 pd;
1132
1133 /* This is common "fast" path */
1134 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1135 return emac_start_xmit(skb, ndev);
1136
1137 len -= skb->data_len;
1138
1139 /* Note, this is only an *estimation*, we can still run out of empty
1140 * slots because of the additional fragmentation into
1141 * MAL_MAX_TX_SIZE-sized chunks
1142 */
1143 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1144 goto stop_queue;
1145
1146 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1147 emac_tx_csum(dev, skb);
1148 slot = dev->tx_slot;
1149
1150 /* skb data */
1151 dev->tx_skb[slot] = NULL;
1152 chunk = min(len, MAL_MAX_TX_SIZE);
1153 dev->tx_desc[slot].data_ptr = pd =
1154 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1155 dev->tx_desc[slot].data_len = (u16) chunk;
1156 len -= chunk;
1157 if (unlikely(len))
1158 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1159 ctrl);
1160 /* skb fragments */
1161 for (i = 0; i < nr_frags; ++i) {
1162 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1163 len = frag->size;
1164
1165 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1166 goto undo_frame;
1167
1168 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1169 DMA_TO_DEVICE);
1170
1171 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1172 ctrl);
1173 }
1174
1175 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1176 dev->tx_slot, slot);
1177
1178 /* Attach skb to the last slot so we don't release it too early */
1179 dev->tx_skb[slot] = skb;
1180
1181 /* Send the packet out */
1182 if (dev->tx_slot == NUM_TX_BUFF - 1)
1183 ctrl |= MAL_TX_CTRL_WRAP;
1184 barrier();
1185 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1186 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1187
1188 return emac_xmit_finish(dev, skb->len);
1189
1190 undo_frame:
1191 /* Well, too bad. Our previous estimation was overly optimistic.
1192 * Undo everything.
1193 */
1194 while (slot != dev->tx_slot) {
1195 dev->tx_desc[slot].ctrl = 0;
1196 --dev->tx_cnt;
1197 if (--slot < 0)
1198 slot = NUM_TX_BUFF - 1;
1199 }
1200 ++dev->estats.tx_undo;
1201
1202 stop_queue:
1203 netif_stop_queue(ndev);
1204 DBG2("%d: stopped TX queue" NL, dev->def->index);
1205 return 1;
1206}
1207#else
1208# define emac_start_xmit_sg emac_start_xmit
1209#endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1210
1211/* BHs disabled */
1212static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1213{
1214 struct ibm_emac_error_stats *st = &dev->estats;
1215 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1216
1217 ++st->tx_bd_errors;
1218 if (ctrl & EMAC_TX_ST_BFCS)
1219 ++st->tx_bd_bad_fcs;
1220 if (ctrl & EMAC_TX_ST_LCS)
1221 ++st->tx_bd_carrier_loss;
1222 if (ctrl & EMAC_TX_ST_ED)
1223 ++st->tx_bd_excessive_deferral;
1224 if (ctrl & EMAC_TX_ST_EC)
1225 ++st->tx_bd_excessive_collisions;
1226 if (ctrl & EMAC_TX_ST_LC)
1227 ++st->tx_bd_late_collision;
1228 if (ctrl & EMAC_TX_ST_MC)
1229 ++st->tx_bd_multple_collisions;
1230 if (ctrl & EMAC_TX_ST_SC)
1231 ++st->tx_bd_single_collision;
1232 if (ctrl & EMAC_TX_ST_UR)
1233 ++st->tx_bd_underrun;
1234 if (ctrl & EMAC_TX_ST_SQE)
1235 ++st->tx_bd_sqe;
1236}
1237
1238static void emac_poll_tx(void *param)
1239{
1240 struct ocp_enet_private *dev = param;
1241 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1242 dev->ack_slot);
1243
1244 if (dev->tx_cnt) {
1245 u16 ctrl;
1246 int slot = dev->ack_slot, n = 0;
1247 again:
1248 ctrl = dev->tx_desc[slot].ctrl;
1249 if (!(ctrl & MAL_TX_CTRL_READY)) {
1250 struct sk_buff *skb = dev->tx_skb[slot];
1251 ++n;
1252
1253 if (skb) {
1254 dev_kfree_skb(skb);
1255 dev->tx_skb[slot] = NULL;
1256 }
1257 slot = (slot + 1) % NUM_TX_BUFF;
1258
1259 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1260 emac_parse_tx_error(dev, ctrl);
1261
1262 if (--dev->tx_cnt)
1263 goto again;
1264 }
1265 if (n) {
1266 dev->ack_slot = slot;
1267 if (netif_queue_stopped(dev->ndev) &&
1268 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1269 netif_wake_queue(dev->ndev);
1270
1271 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1272 }
1273 }
1274}
1275
1276static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1277 int len)
1278{
1279 struct sk_buff *skb = dev->rx_skb[slot];
1280 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1281
1282 if (len)
1283 dma_map_single(dev->ldev, skb->data - 2,
1284 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1285
1286 dev->rx_desc[slot].data_len = 0;
1287 barrier();
1288 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1289 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1290}
1291
1292static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1293{
1294 struct ibm_emac_error_stats *st = &dev->estats;
1295 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1296
1297 ++st->rx_bd_errors;
1298 if (ctrl & EMAC_RX_ST_OE)
1299 ++st->rx_bd_overrun;
1300 if (ctrl & EMAC_RX_ST_BP)
1301 ++st->rx_bd_bad_packet;
1302 if (ctrl & EMAC_RX_ST_RP)
1303 ++st->rx_bd_runt_packet;
1304 if (ctrl & EMAC_RX_ST_SE)
1305 ++st->rx_bd_short_event;
1306 if (ctrl & EMAC_RX_ST_AE)
1307 ++st->rx_bd_alignment_error;
1308 if (ctrl & EMAC_RX_ST_BFCS)
1309 ++st->rx_bd_bad_fcs;
1310 if (ctrl & EMAC_RX_ST_PTL)
1311 ++st->rx_bd_packet_too_long;
1312 if (ctrl & EMAC_RX_ST_ORE)
1313 ++st->rx_bd_out_of_range;
1314 if (ctrl & EMAC_RX_ST_IRE)
1315 ++st->rx_bd_in_range;
1316}
1317
1318static inline void emac_rx_csum(struct ocp_enet_private *dev,
1319 struct sk_buff *skb, u16 ctrl)
1320{
1321#if defined(CONFIG_IBM_EMAC_TAH)
1322 if (!ctrl && dev->tah_dev) {
1323 skb->ip_summed = CHECKSUM_UNNECESSARY;
1324 ++dev->stats.rx_packets_csum;
1325 }
1326#endif
1327}
1328
1329static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1330{
1331 if (likely(dev->rx_sg_skb != NULL)) {
1332 int len = dev->rx_desc[slot].data_len;
1333 int tot_len = dev->rx_sg_skb->len + len;
1334
1335 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1336 ++dev->estats.rx_dropped_mtu;
1337 dev_kfree_skb(dev->rx_sg_skb);
1338 dev->rx_sg_skb = NULL;
1339 } else {
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001340 cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb),
Eugene Surovegin37448f72005-10-10 16:58:14 -07001341 dev->rx_skb[slot]->data, len);
1342 skb_put(dev->rx_sg_skb, len);
1343 emac_recycle_rx_skb(dev, slot, len);
1344 return 0;
1345 }
1346 }
1347 emac_recycle_rx_skb(dev, slot, 0);
1348 return -1;
1349}
1350
1351/* BHs disabled */
1352static int emac_poll_rx(void *param, int budget)
1353{
1354 struct ocp_enet_private *dev = param;
1355 int slot = dev->rx_slot, received = 0;
1356
1357 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1358
1359 again:
1360 while (budget > 0) {
1361 int len;
1362 struct sk_buff *skb;
1363 u16 ctrl = dev->rx_desc[slot].ctrl;
1364
1365 if (ctrl & MAL_RX_CTRL_EMPTY)
1366 break;
1367
1368 skb = dev->rx_skb[slot];
1369 barrier();
1370 len = dev->rx_desc[slot].data_len;
1371
1372 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1373 goto sg;
1374
1375 ctrl &= EMAC_BAD_RX_MASK;
1376 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1377 emac_parse_rx_error(dev, ctrl);
1378 ++dev->estats.rx_dropped_error;
1379 emac_recycle_rx_skb(dev, slot, 0);
1380 len = 0;
1381 goto next;
1382 }
1383
1384 if (len && len < EMAC_RX_COPY_THRESH) {
1385 struct sk_buff *copy_skb =
1386 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1387 if (unlikely(!copy_skb))
1388 goto oom;
1389
1390 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1391 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1392 len + 2);
1393 emac_recycle_rx_skb(dev, slot, len);
1394 skb = copy_skb;
1395 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1396 goto oom;
1397
1398 skb_put(skb, len);
1399 push_packet:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001400 skb->protocol = eth_type_trans(skb, dev->ndev);
1401 emac_rx_csum(dev, skb, ctrl);
1402
1403 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1404 ++dev->estats.rx_dropped_stack;
1405 next:
1406 ++dev->stats.rx_packets;
1407 skip:
1408 dev->stats.rx_bytes += len;
1409 slot = (slot + 1) % NUM_RX_BUFF;
1410 --budget;
1411 ++received;
1412 continue;
1413 sg:
1414 if (ctrl & MAL_RX_CTRL_FIRST) {
1415 BUG_ON(dev->rx_sg_skb);
1416 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1417 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1418 ++dev->estats.rx_dropped_oom;
1419 emac_recycle_rx_skb(dev, slot, 0);
1420 } else {
1421 dev->rx_sg_skb = skb;
1422 skb_put(skb, len);
1423 }
1424 } else if (!emac_rx_sg_append(dev, slot) &&
1425 (ctrl & MAL_RX_CTRL_LAST)) {
1426
1427 skb = dev->rx_sg_skb;
1428 dev->rx_sg_skb = NULL;
1429
1430 ctrl &= EMAC_BAD_RX_MASK;
1431 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1432 emac_parse_rx_error(dev, ctrl);
1433 ++dev->estats.rx_dropped_error;
1434 dev_kfree_skb(skb);
1435 len = 0;
1436 } else
1437 goto push_packet;
1438 }
1439 goto skip;
1440 oom:
1441 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1442 /* Drop the packet and recycle skb */
1443 ++dev->estats.rx_dropped_oom;
1444 emac_recycle_rx_skb(dev, slot, 0);
1445 goto next;
1446 }
1447
1448 if (received) {
1449 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1450 dev->rx_slot = slot;
1451 }
1452
1453 if (unlikely(budget && dev->commac.rx_stopped)) {
1454 struct ocp_func_emac_data *emacdata = dev->def->additions;
1455
1456 barrier();
1457 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1458 DBG2("%d: rx restart" NL, dev->def->index);
1459 received = 0;
1460 goto again;
1461 }
1462
1463 if (dev->rx_sg_skb) {
1464 DBG2("%d: dropping partial rx packet" NL,
1465 dev->def->index);
1466 ++dev->estats.rx_dropped_error;
1467 dev_kfree_skb(dev->rx_sg_skb);
1468 dev->rx_sg_skb = NULL;
1469 }
1470
1471 dev->commac.rx_stopped = 0;
1472 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1473 emac_rx_enable(dev);
1474 dev->rx_slot = 0;
1475 }
1476 return received;
1477}
1478
1479/* BHs disabled */
1480static int emac_peek_rx(void *param)
1481{
1482 struct ocp_enet_private *dev = param;
1483 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1484}
1485
1486/* BHs disabled */
1487static int emac_peek_rx_sg(void *param)
1488{
1489 struct ocp_enet_private *dev = param;
1490 int slot = dev->rx_slot;
1491 while (1) {
1492 u16 ctrl = dev->rx_desc[slot].ctrl;
1493 if (ctrl & MAL_RX_CTRL_EMPTY)
1494 return 0;
1495 else if (ctrl & MAL_RX_CTRL_LAST)
1496 return 1;
1497
1498 slot = (slot + 1) % NUM_RX_BUFF;
1499
1500 /* I'm just being paranoid here :) */
1501 if (unlikely(slot == dev->rx_slot))
1502 return 0;
1503 }
1504}
1505
1506/* Hard IRQ */
1507static void emac_rxde(void *param)
1508{
1509 struct ocp_enet_private *dev = param;
1510 ++dev->estats.rx_stopped;
1511 emac_rx_disable_async(dev);
1512}
1513
1514/* Hard IRQ */
David Howells7d12e782006-10-05 14:55:46 +01001515static irqreturn_t emac_irq(int irq, void *dev_instance)
Eugene Surovegin37448f72005-10-10 16:58:14 -07001516{
1517 struct ocp_enet_private *dev = dev_instance;
Al Virob43de2d2005-12-01 10:15:21 -05001518 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001519 struct ibm_emac_error_stats *st = &dev->estats;
1520
1521 u32 isr = in_be32(&p->isr);
1522 out_be32(&p->isr, isr);
1523
1524 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1525
1526 if (isr & EMAC_ISR_TXPE)
1527 ++st->tx_parity;
1528 if (isr & EMAC_ISR_RXPE)
1529 ++st->rx_parity;
1530 if (isr & EMAC_ISR_TXUE)
1531 ++st->tx_underrun;
1532 if (isr & EMAC_ISR_RXOE)
1533 ++st->rx_fifo_overrun;
1534 if (isr & EMAC_ISR_OVR)
1535 ++st->rx_overrun;
1536 if (isr & EMAC_ISR_BP)
1537 ++st->rx_bad_packet;
1538 if (isr & EMAC_ISR_RP)
1539 ++st->rx_runt_packet;
1540 if (isr & EMAC_ISR_SE)
1541 ++st->rx_short_event;
1542 if (isr & EMAC_ISR_ALE)
1543 ++st->rx_alignment_error;
1544 if (isr & EMAC_ISR_BFCS)
1545 ++st->rx_bad_fcs;
1546 if (isr & EMAC_ISR_PTLE)
1547 ++st->rx_packet_too_long;
1548 if (isr & EMAC_ISR_ORE)
1549 ++st->rx_out_of_range;
1550 if (isr & EMAC_ISR_IRE)
1551 ++st->rx_in_range;
1552 if (isr & EMAC_ISR_SQE)
1553 ++st->tx_sqe;
1554 if (isr & EMAC_ISR_TE)
1555 ++st->tx_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
1557 return IRQ_HANDLED;
1558}
1559
Eugene Surovegin37448f72005-10-10 16:58:14 -07001560static struct net_device_stats *emac_stats(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001562 struct ocp_enet_private *dev = ndev->priv;
1563 struct ibm_emac_stats *st = &dev->stats;
1564 struct ibm_emac_error_stats *est = &dev->estats;
1565 struct net_device_stats *nst = &dev->nstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Eugene Surovegin37448f72005-10-10 16:58:14 -07001567 DBG2("%d: stats" NL, dev->def->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Eugene Surovegin37448f72005-10-10 16:58:14 -07001569 /* Compute "legacy" statistics */
1570 local_irq_disable();
1571 nst->rx_packets = (unsigned long)st->rx_packets;
1572 nst->rx_bytes = (unsigned long)st->rx_bytes;
1573 nst->tx_packets = (unsigned long)st->tx_packets;
1574 nst->tx_bytes = (unsigned long)st->tx_bytes;
1575 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1576 est->rx_dropped_error +
1577 est->rx_dropped_resize +
1578 est->rx_dropped_mtu);
1579 nst->tx_dropped = (unsigned long)est->tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580
Eugene Surovegin37448f72005-10-10 16:58:14 -07001581 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1582 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1583 est->rx_fifo_overrun +
1584 est->rx_overrun);
1585 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1586 est->rx_alignment_error);
1587 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1588 est->rx_bad_fcs);
1589 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1590 est->rx_bd_short_event +
1591 est->rx_bd_packet_too_long +
1592 est->rx_bd_out_of_range +
1593 est->rx_bd_in_range +
1594 est->rx_runt_packet +
1595 est->rx_short_event +
1596 est->rx_packet_too_long +
1597 est->rx_out_of_range +
1598 est->rx_in_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599
Eugene Surovegin37448f72005-10-10 16:58:14 -07001600 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1601 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1602 est->tx_underrun);
1603 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1604 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1605 est->tx_bd_excessive_collisions +
1606 est->tx_bd_late_collision +
1607 est->tx_bd_multple_collisions);
1608 local_irq_enable();
1609 return nst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610}
1611
Eugene Surovegin37448f72005-10-10 16:58:14 -07001612static void emac_remove(struct ocp_device *ocpdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001614 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Eugene Surovegin37448f72005-10-10 16:58:14 -07001616 DBG("%d: remove" NL, dev->def->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Al Virob43de2d2005-12-01 10:15:21 -05001618 ocp_set_drvdata(ocpdev, NULL);
Eugene Surovegin37448f72005-10-10 16:58:14 -07001619 unregister_netdev(dev->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Eugene Surovegin37448f72005-10-10 16:58:14 -07001621 tah_fini(dev->tah_dev);
1622 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1623 zmii_fini(dev->zmii_dev, dev->zmii_input);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
Al Virob43de2d2005-12-01 10:15:21 -05001625 emac_dbg_register(dev->def->index, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
Eugene Surovegin37448f72005-10-10 16:58:14 -07001627 mal_unregister_commac(dev->mal, &dev->commac);
Al Virob43de2d2005-12-01 10:15:21 -05001628 iounmap(dev->emacp);
Eugene Surovegin37448f72005-10-10 16:58:14 -07001629 kfree(dev->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630}
1631
Eugene Surovegin37448f72005-10-10 16:58:14 -07001632static struct mal_commac_ops emac_commac_ops = {
1633 .poll_tx = &emac_poll_tx,
1634 .poll_rx = &emac_poll_rx,
1635 .peek_rx = &emac_peek_rx,
1636 .rxde = &emac_rxde,
1637};
1638
1639static struct mal_commac_ops emac_commac_sg_ops = {
1640 .poll_tx = &emac_poll_tx,
1641 .poll_rx = &emac_poll_rx,
1642 .peek_rx = &emac_peek_rx_sg,
1643 .rxde = &emac_rxde,
1644};
1645
1646/* Ethtool support */
1647static int emac_ethtool_get_settings(struct net_device *ndev,
1648 struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001650 struct ocp_enet_private *dev = ndev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Eugene Surovegin37448f72005-10-10 16:58:14 -07001652 cmd->supported = dev->phy.features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 cmd->port = PORT_MII;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001654 cmd->phy_address = dev->phy.address;
1655 cmd->transceiver =
1656 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1657
1658 local_bh_disable();
1659 cmd->advertising = dev->phy.advertising;
1660 cmd->autoneg = dev->phy.autoneg;
1661 cmd->speed = dev->phy.speed;
1662 cmd->duplex = dev->phy.duplex;
1663 local_bh_enable();
1664
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 return 0;
1666}
1667
Eugene Surovegin37448f72005-10-10 16:58:14 -07001668static int emac_ethtool_set_settings(struct net_device *ndev,
1669 struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001671 struct ocp_enet_private *dev = ndev->priv;
1672 u32 f = dev->phy.features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673
Eugene Surovegin37448f72005-10-10 16:58:14 -07001674 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1675 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
Eugene Surovegin37448f72005-10-10 16:58:14 -07001677 /* Basic sanity checks */
1678 if (dev->phy.address < 0)
1679 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1681 return -EINVAL;
1682 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1683 return -EINVAL;
1684 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1685 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001686
1687 if (cmd->autoneg == AUTONEG_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 switch (cmd->speed) {
1689 case SPEED_10:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001690 if (cmd->duplex == DUPLEX_HALF
1691 && !(f & SUPPORTED_10baseT_Half))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001693 if (cmd->duplex == DUPLEX_FULL
1694 && !(f & SUPPORTED_10baseT_Full))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 return -EINVAL;
1696 break;
1697 case SPEED_100:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001698 if (cmd->duplex == DUPLEX_HALF
1699 && !(f & SUPPORTED_100baseT_Half))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001701 if (cmd->duplex == DUPLEX_FULL
1702 && !(f & SUPPORTED_100baseT_Full))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 return -EINVAL;
1704 break;
1705 case SPEED_1000:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001706 if (cmd->duplex == DUPLEX_HALF
1707 && !(f & SUPPORTED_1000baseT_Half))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001709 if (cmd->duplex == DUPLEX_FULL
1710 && !(f & SUPPORTED_1000baseT_Full))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 return -EINVAL;
1712 break;
1713 default:
1714 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001715 }
1716
1717 local_bh_disable();
1718 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1719 cmd->duplex);
1720
1721 } else {
1722 if (!(f & SUPPORTED_Autoneg))
1723 return -EINVAL;
1724
1725 local_bh_disable();
1726 dev->phy.def->ops->setup_aneg(&dev->phy,
1727 (cmd->advertising & f) |
1728 (dev->phy.advertising &
1729 (ADVERTISED_Pause |
1730 ADVERTISED_Asym_Pause)));
1731 }
1732 emac_force_link_update(dev);
1733 local_bh_enable();
1734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 return 0;
1736}
1737
Eugene Surovegin37448f72005-10-10 16:58:14 -07001738static void emac_ethtool_get_ringparam(struct net_device *ndev,
1739 struct ethtool_ringparam *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001741 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1742 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1743}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Eugene Surovegin37448f72005-10-10 16:58:14 -07001745static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1746 struct ethtool_pauseparam *pp)
1747{
1748 struct ocp_enet_private *dev = ndev->priv;
1749
1750 local_bh_disable();
1751 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1752 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1753 pp->autoneg = 1;
1754
1755 if (dev->phy.duplex == DUPLEX_FULL) {
1756 if (dev->phy.pause)
1757 pp->rx_pause = pp->tx_pause = 1;
1758 else if (dev->phy.asym_pause)
1759 pp->tx_pause = 1;
1760 }
1761 local_bh_enable();
1762}
1763
1764static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1765{
1766 struct ocp_enet_private *dev = ndev->priv;
1767 return dev->tah_dev != 0;
1768}
1769
1770static int emac_get_regs_len(struct ocp_enet_private *dev)
1771{
1772 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1773}
1774
1775static int emac_ethtool_get_regs_len(struct net_device *ndev)
1776{
1777 struct ocp_enet_private *dev = ndev->priv;
1778 return sizeof(struct emac_ethtool_regs_hdr) +
1779 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1780 zmii_get_regs_len(dev->zmii_dev) +
1781 rgmii_get_regs_len(dev->rgmii_dev) +
1782 tah_get_regs_len(dev->tah_dev);
1783}
1784
1785static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1786{
1787 struct emac_ethtool_regs_subhdr *hdr = buf;
1788
1789 hdr->version = EMAC_ETHTOOL_REGS_VER;
1790 hdr->index = dev->def->index;
1791 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1792 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1793}
1794
1795static void emac_ethtool_get_regs(struct net_device *ndev,
1796 struct ethtool_regs *regs, void *buf)
1797{
1798 struct ocp_enet_private *dev = ndev->priv;
1799 struct emac_ethtool_regs_hdr *hdr = buf;
1800
1801 hdr->components = 0;
1802 buf = hdr + 1;
1803
1804 local_irq_disable();
1805 buf = mal_dump_regs(dev->mal, buf);
1806 buf = emac_dump_regs(dev, buf);
1807 if (dev->zmii_dev) {
1808 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1809 buf = zmii_dump_regs(dev->zmii_dev, buf);
1810 }
1811 if (dev->rgmii_dev) {
1812 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1813 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1814 }
1815 if (dev->tah_dev) {
1816 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1817 buf = tah_dump_regs(dev->tah_dev, buf);
1818 }
1819 local_irq_enable();
1820}
1821
1822static int emac_ethtool_nway_reset(struct net_device *ndev)
1823{
1824 struct ocp_enet_private *dev = ndev->priv;
1825 int res = 0;
1826
1827 DBG("%d: nway_reset" NL, dev->def->index);
1828
1829 if (dev->phy.address < 0)
1830 return -EOPNOTSUPP;
1831
1832 local_bh_disable();
1833 if (!dev->phy.autoneg) {
1834 res = -EINVAL;
1835 goto out;
1836 }
1837
1838 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1839 emac_force_link_update(dev);
1840
1841 out:
1842 local_bh_enable();
1843 return res;
1844}
1845
1846static int emac_ethtool_get_stats_count(struct net_device *ndev)
1847{
1848 return EMAC_ETHTOOL_STATS_COUNT;
1849}
1850
1851static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1852 u8 * buf)
1853{
1854 if (stringset == ETH_SS_STATS)
1855 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1856}
1857
1858static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1859 struct ethtool_stats *estats,
1860 u64 * tmp_stats)
1861{
1862 struct ocp_enet_private *dev = ndev->priv;
1863 local_irq_disable();
1864 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1865 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1866 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1867 local_irq_enable();
1868}
1869
1870static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1871 struct ethtool_drvinfo *info)
1872{
1873 struct ocp_enet_private *dev = ndev->priv;
1874
1875 strcpy(info->driver, "ibm_emac");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 strcpy(info->version, DRV_VERSION);
1877 info->fw_version[0] = '\0';
Eugene Surovegin37448f72005-10-10 16:58:14 -07001878 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1879 info->n_stats = emac_ethtool_get_stats_count(ndev);
1880 info->regdump_len = emac_ethtool_get_regs_len(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881}
1882
Jeff Garzik7282d492006-09-13 14:30:00 -04001883static const struct ethtool_ops emac_ethtool_ops = {
Eugene Surovegin37448f72005-10-10 16:58:14 -07001884 .get_settings = emac_ethtool_get_settings,
1885 .set_settings = emac_ethtool_set_settings,
1886 .get_drvinfo = emac_ethtool_get_drvinfo,
1887
1888 .get_regs_len = emac_ethtool_get_regs_len,
1889 .get_regs = emac_ethtool_get_regs,
1890
1891 .nway_reset = emac_ethtool_nway_reset,
1892
1893 .get_ringparam = emac_ethtool_get_ringparam,
1894 .get_pauseparam = emac_ethtool_get_pauseparam,
1895
1896 .get_rx_csum = emac_ethtool_get_rx_csum,
1897
1898 .get_strings = emac_ethtool_get_strings,
1899 .get_stats_count = emac_ethtool_get_stats_count,
1900 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1901
1902 .get_link = ethtool_op_get_link,
1903 .get_tx_csum = ethtool_op_get_tx_csum,
1904 .get_sg = ethtool_op_get_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905};
1906
Eugene Surovegin37448f72005-10-10 16:58:14 -07001907static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001909 struct ocp_enet_private *dev = ndev->priv;
Geoff Levand99718692005-04-14 11:20:32 -07001910 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
Eugene Surovegin37448f72005-10-10 16:58:14 -07001912 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1913
1914 if (dev->phy.address < 0)
1915 return -EOPNOTSUPP;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 switch (cmd) {
1918 case SIOCGMIIPHY:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001919 case SIOCDEVPRIVATE:
1920 data[0] = dev->phy.address;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 /* Fall through */
1922 case SIOCGMIIREG:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001923 case SIOCDEVPRIVATE + 1:
1924 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 return 0;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 case SIOCSMIIREG:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001928 case SIOCDEVPRIVATE + 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 if (!capable(CAP_NET_ADMIN))
1930 return -EPERM;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001931 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 return 0;
1933 default:
1934 return -EOPNOTSUPP;
1935 }
1936}
1937
Eugene Surovegin37448f72005-10-10 16:58:14 -07001938static int __init emac_probe(struct ocp_device *ocpdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001940 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 struct net_device *ndev;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001942 struct ocp_device *maldev;
1943 struct ocp_enet_private *dev;
1944 int err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Eugene Surovegin37448f72005-10-10 16:58:14 -07001946 DBG("%d: probe" NL, ocpdev->def->index);
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 if (!emacdata) {
1949 printk(KERN_ERR "emac%d: Missing additional data!\n",
1950 ocpdev->def->index);
1951 return -ENODEV;
1952 }
1953
1954 /* Allocate our net_device structure */
1955 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
Eugene Surovegin37448f72005-10-10 16:58:14 -07001956 if (!ndev) {
1957 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 ocpdev->def->index);
1959 return -ENOMEM;
1960 }
Eugene Surovegin37448f72005-10-10 16:58:14 -07001961 dev = ndev->priv;
1962 dev->ndev = ndev;
1963 dev->ldev = &ocpdev->dev;
1964 dev->def = ocpdev->def;
1965 SET_MODULE_OWNER(ndev);
1966
1967 /* Find MAL device we are connected to */
1968 maldev =
1969 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1970 if (!maldev) {
1971 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1972 dev->def->index, emacdata->mal_idx);
1973 err = -ENODEV;
1974 goto out;
1975 }
1976 dev->mal = ocp_get_drvdata(maldev);
1977 if (!dev->mal) {
1978 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1979 dev->def->index, emacdata->mal_idx);
1980 err = -ENODEV;
1981 goto out;
1982 }
1983
1984 /* Register with MAL */
1985 dev->commac.ops = &emac_commac_ops;
1986 dev->commac.dev = dev;
1987 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1988 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1989 err = mal_register_commac(dev->mal, &dev->commac);
1990 if (err) {
1991 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1992 dev->def->index, emacdata->mal_idx);
1993 goto out;
1994 }
1995 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1996 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1997
1998 /* Get pointers to BD rings */
1999 dev->tx_desc =
2000 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2001 emacdata->mal_tx_chan);
2002 dev->rx_desc =
2003 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2004 emacdata->mal_rx_chan);
2005
2006 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2007 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2008
2009 /* Clean rings */
2010 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2011 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2012
2013 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2014 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2015 struct ocp_device *mdiodev =
2016 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2017 emacdata->mdio_idx);
2018 if (!mdiodev) {
2019 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2020 dev->def->index, emacdata->mdio_idx);
2021 err = -ENODEV;
2022 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 }
Eugene Surovegin37448f72005-10-10 16:58:14 -07002024 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2025 if (!dev->mdio_dev) {
2026 printk(KERN_ERR
2027 "emac%d: emac%d hasn't been initialized yet!\n",
2028 dev->def->index, emacdata->mdio_idx);
2029 err = -ENODEV;
2030 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 }
2032 }
2033
Eugene Surovegin37448f72005-10-10 16:58:14 -07002034 /* Attach to ZMII, if needed */
2035 if ((err = zmii_attach(dev)) != 0)
2036 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Eugene Surovegin37448f72005-10-10 16:58:14 -07002038 /* Attach to RGMII, if needed */
2039 if ((err = rgmii_attach(dev)) != 0)
2040 goto out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Eugene Surovegin37448f72005-10-10 16:58:14 -07002042 /* Attach to TAH, if needed */
2043 if ((err = tah_attach(dev)) != 0)
2044 goto out4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
Eugene Surovegin37448f72005-10-10 16:58:14 -07002046 /* Map EMAC regs */
Al Virob43de2d2005-12-01 10:15:21 -05002047 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
Eugene Surovegin37448f72005-10-10 16:58:14 -07002048 if (!dev->emacp) {
2049 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2050 dev->def->index);
2051 err = -ENOMEM;
2052 goto out5;
Wade Farnsworth49a9db02005-10-03 22:21:33 -04002053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
Eugene Surovegin37448f72005-10-10 16:58:14 -07002055 /* Fill in MAC address */
2056 for (i = 0; i < 6; ++i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 ndev->dev_addr[i] = emacdata->mac_addr[i];
2058
Eugene Surovegin37448f72005-10-10 16:58:14 -07002059 /* Set some link defaults before we can find out real parameters */
2060 dev->phy.speed = SPEED_100;
2061 dev->phy.duplex = DUPLEX_FULL;
2062 dev->phy.autoneg = AUTONEG_DISABLE;
2063 dev->phy.pause = dev->phy.asym_pause = 0;
Eugene Surovegin8169bd9192005-11-24 14:48:40 -08002064 dev->stop_timeout = STOP_TIMEOUT_100;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002065 init_timer(&dev->link_timer);
2066 dev->link_timer.function = emac_link_timer;
2067 dev->link_timer.data = (unsigned long)dev;
2068
2069 /* Find PHY if any */
2070 dev->phy.dev = ndev;
2071 dev->phy.mode = emacdata->phy_mode;
2072 if (emacdata->phy_map != 0xffffffff) {
2073 u32 phy_map = emacdata->phy_map | busy_phy_map;
2074 u32 adv;
2075
2076 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2077 emacdata->phy_map, busy_phy_map);
2078
2079 EMAC_RX_CLK_TX(dev->def->index);
2080
2081 dev->phy.mdio_read = emac_mdio_read;
2082 dev->phy.mdio_write = emac_mdio_write;
2083
2084 /* Configure EMAC with defaults so we can at least use MDIO
2085 * This is needed mostly for 440GX
2086 */
2087 if (emac_phy_gpcs(dev->phy.mode)) {
2088 /* XXX
2089 * Make GPCS PHY address equal to EMAC index.
2090 * We probably should take into account busy_phy_map
2091 * and/or phy_map here.
2092 */
2093 dev->phy.address = dev->def->index;
2094 }
2095
2096 emac_configure(dev);
2097
2098 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2099 if (!(phy_map & 1)) {
2100 int r;
2101 busy_phy_map |= 1 << i;
2102
2103 /* Quick check if there is a PHY at the address */
2104 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2105 if (r == 0xffff || r < 0)
2106 continue;
2107 if (!mii_phy_probe(&dev->phy, i))
2108 break;
2109 }
2110 if (i == 0x20) {
2111 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2112 dev->def->index);
2113 goto out6;
2114 }
2115
2116 /* Init PHY */
2117 if (dev->phy.def->ops->init)
2118 dev->phy.def->ops->init(&dev->phy);
2119
2120 /* Disable any PHY features not supported by the platform */
2121 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2122
2123 /* Setup initial link parameters */
2124 if (dev->phy.features & SUPPORTED_Autoneg) {
2125 adv = dev->phy.features;
2126#if !defined(CONFIG_40x)
2127 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2128#endif
2129 /* Restart autonegotiation */
2130 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2131 } else {
2132 u32 f = dev->phy.def->features;
2133 int speed = SPEED_10, fd = DUPLEX_HALF;
2134
2135 /* Select highest supported speed/duplex */
2136 if (f & SUPPORTED_1000baseT_Full) {
2137 speed = SPEED_1000;
2138 fd = DUPLEX_FULL;
2139 } else if (f & SUPPORTED_1000baseT_Half)
2140 speed = SPEED_1000;
2141 else if (f & SUPPORTED_100baseT_Full) {
2142 speed = SPEED_100;
2143 fd = DUPLEX_FULL;
2144 } else if (f & SUPPORTED_100baseT_Half)
2145 speed = SPEED_100;
2146 else if (f & SUPPORTED_10baseT_Full)
2147 fd = DUPLEX_FULL;
2148
2149 /* Force link parameters */
2150 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2151 }
2152 } else {
2153 emac_reset(dev);
2154
2155 /* PHY-less configuration.
2156 * XXX I probably should move these settings to emacdata
2157 */
2158 dev->phy.address = -1;
2159 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2160 dev->phy.pause = 1;
2161 }
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 /* Fill in the driver function table */
2164 ndev->open = &emac_open;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002165 if (dev->tah_dev) {
2166 ndev->hard_start_xmit = &emac_start_xmit_sg;
2167 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2168 } else
2169 ndev->hard_start_xmit = &emac_start_xmit;
2170 ndev->tx_timeout = &emac_full_tx_reset;
2171 ndev->watchdog_timeo = 5 * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 ndev->stop = &emac_close;
2173 ndev->get_stats = &emac_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 ndev->set_multicast_list = &emac_set_multicast_list;
2175 ndev->do_ioctl = &emac_ioctl;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002176 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2177 ndev->change_mtu = &emac_change_mtu;
2178 dev->commac.ops = &emac_commac_sg_ops;
2179 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Eugene Surovegin37448f72005-10-10 16:58:14 -07002182 netif_carrier_off(ndev);
2183 netif_stop_queue(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Eugene Surovegin37448f72005-10-10 16:58:14 -07002185 err = register_netdev(ndev);
2186 if (err) {
2187 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2188 dev->def->index, err);
2189 goto out6;
2190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Eugene Surovegin37448f72005-10-10 16:58:14 -07002192 ocp_set_drvdata(ocpdev, dev);
2193
2194 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2195 ndev->name, dev->def->index,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2197 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
Eugene Surovegin37448f72005-10-10 16:58:14 -07002199 if (dev->phy.address >= 0)
2200 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2201 dev->phy.def->name, dev->phy.address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
Eugene Surovegin37448f72005-10-10 16:58:14 -07002203 emac_dbg_register(dev->def->index, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 return 0;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002206 out6:
Al Virob43de2d2005-12-01 10:15:21 -05002207 iounmap(dev->emacp);
Eugene Surovegin37448f72005-10-10 16:58:14 -07002208 out5:
2209 tah_fini(dev->tah_dev);
2210 out4:
2211 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2212 out3:
2213 zmii_fini(dev->zmii_dev, dev->zmii_input);
2214 out2:
2215 mal_unregister_commac(dev->mal, &dev->commac);
2216 out:
2217 kfree(ndev);
2218 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219}
2220
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221static struct ocp_device_id emac_ids[] = {
Eugene Surovegin37448f72005-10-10 16:58:14 -07002222 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2223 { .vendor = OCP_VENDOR_INVALID}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224};
2225
2226static struct ocp_driver emac_driver = {
2227 .name = "emac",
2228 .id_table = emac_ids,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 .probe = emac_probe,
2230 .remove = emac_remove,
2231};
2232
2233static int __init emac_init(void)
2234{
Eugene Surovegin37448f72005-10-10 16:58:14 -07002235 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
Eugene Surovegin37448f72005-10-10 16:58:14 -07002237 DBG(": init" NL);
2238
2239 if (mal_init())
2240 return -ENODEV;
2241
2242 EMAC_CLK_INTERNAL;
2243 if (ocp_register_driver(&emac_driver)) {
2244 EMAC_CLK_EXTERNAL;
2245 ocp_unregister_driver(&emac_driver);
2246 mal_exit();
2247 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 }
Eugene Surovegin37448f72005-10-10 16:58:14 -07002249 EMAC_CLK_EXTERNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Eugene Surovegin37448f72005-10-10 16:58:14 -07002251 emac_init_debug();
2252 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253}
2254
2255static void __exit emac_exit(void)
2256{
Eugene Surovegin37448f72005-10-10 16:58:14 -07002257 DBG(": exit" NL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 ocp_unregister_driver(&emac_driver);
Eugene Surovegin37448f72005-10-10 16:58:14 -07002259 mal_exit();
2260 emac_fini_debug();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261}
2262
2263module_init(emac_init);
2264module_exit(emac_exit);