blob: dd8ad87468258152417592bcc4042e0f93434bc7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Eugene Surovegin37448f72005-10-10 16:58:14 -07002 * drivers/net/ibm_emac/ibm_emac_core.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Eugene Surovegin37448f72005-10-10 16:58:14 -07004 * Driver for PowerPC 4xx on-chip ethernet controller.
5 *
6 * Copyright (c) 2004, 2005 Zultys Technologies.
7 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Based on original work by
Eugene Surovegin37448f72005-10-10 16:58:14 -070010 * Matt Porter <mporter@kernel.crashing.org>
11 * (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 * Armin Kuster <akuster@mvista.com>
13 * Johnnie Peters <jpeters@mvista.com>
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
Eugene Surovegin37448f72005-10-10 16:58:14 -070019 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Eugene Surovegin37448f72005-10-10 16:58:14 -070021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/module.h>
23#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/interrupt.h>
27#include <linux/delay.h>
28#include <linux/init.h>
29#include <linux/types.h>
Eugene Surovegin37448f72005-10-10 16:58:14 -070030#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/crc32.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/ethtool.h>
36#include <linux/mii.h>
37#include <linux/bitops.h>
38
39#include <asm/processor.h>
40#include <asm/io.h>
41#include <asm/dma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/uaccess.h>
43#include <asm/ocp.h>
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include "ibm_emac_core.h"
Eugene Surovegin37448f72005-10-10 16:58:14 -070046#include "ibm_emac_debug.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*
Eugene Surovegin37448f72005-10-10 16:58:14 -070049 * Lack of dma_unmap_???? calls is intentional.
50 *
51 * API-correct usage requires additional support state information to be
52 * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
53 * EMAC design (e.g. TX buffer passed from network stack can be split into
54 * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
55 * maintaining such information will add additional overhead.
56 * Current DMA API implementation for 4xx processors only ensures cache coherency
57 * and dma_unmap_???? routines are empty and are likely to stay this way.
58 * I decided to omit dma_unmap_??? calls because I don't want to add additional
59 * complexity just for the sake of following some abstract API, when it doesn't
60 * add any real benefit to the driver. I understand that this decision maybe
61 * controversial, but I really tried to make code API-correct and efficient
62 * at the same time and didn't come up with code I liked :(. --ebs
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Eugene Surovegin37448f72005-10-10 16:58:14 -070065#define DRV_NAME "emac"
Eugene Surovegin8169bd92005-11-24 14:48:40 -080066#define DRV_VERSION "3.54"
Eugene Surovegin37448f72005-10-10 16:58:14 -070067#define DRV_DESC "PPC 4xx OCP EMAC driver"
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069MODULE_DESCRIPTION(DRV_DESC);
Eugene Surovegin37448f72005-10-10 16:58:14 -070070MODULE_AUTHOR
71 ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
Linus Torvalds1da177e2005-04-16 15:20:36 -070072MODULE_LICENSE("GPL");
73
Eugene Surovegin37448f72005-10-10 16:58:14 -070074/* minimum number of free TX descriptors required to wake up TX process */
75#define EMAC_TX_WAKEUP_THRESH (NUM_TX_BUFF / 4)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Eugene Surovegin37448f72005-10-10 16:58:14 -070077/* If packet size is less than this number, we allocate small skb and copy packet
78 * contents into it instead of just sending original big skb up
79 */
80#define EMAC_RX_COPY_THRESH CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82/* Since multiple EMACs share MDIO lines in various ways, we need
83 * to avoid re-using the same PHY ID in cases where the arch didn't
84 * setup precise phy_map entries
85 */
Eugene Surovegin37448f72005-10-10 16:58:14 -070086static u32 busy_phy_map;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Eugene Surovegin1b195912005-10-29 12:45:31 -070088#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
89 (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
Eugene Surovegin37448f72005-10-10 16:58:14 -070090/* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
91 * with PHY RX clock problem.
Eugene Surovegin1b195912005-10-29 12:45:31 -070092 * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
Eugene Surovegin37448f72005-10-10 16:58:14 -070093 * also allows controlling each EMAC clock
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 */
Eugene Surovegin37448f72005-10-10 16:58:14 -070095static inline void EMAC_RX_CLK_TX(int idx)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 unsigned long flags;
Eugene Surovegin37448f72005-10-10 16:58:14 -070098 local_irq_save(flags);
99
100#if defined(CONFIG_405EP)
101 mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
Eugene Surovegin1b195912005-10-29 12:45:31 -0700102#else /* CONFIG_440EP || CONFIG_440GR */
Eugene Surovegin37448f72005-10-10 16:58:14 -0700103 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
104#endif
105
106 local_irq_restore(flags);
107}
108
109static inline void EMAC_RX_CLK_DEFAULT(int idx)
110{
111 unsigned long flags;
112 local_irq_save(flags);
113
114#if defined(CONFIG_405EP)
115 mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
116#else /* CONFIG_440EP */
117 SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
118#endif
119
120 local_irq_restore(flags);
121}
122#else
123#define EMAC_RX_CLK_TX(idx) ((void)0)
124#define EMAC_RX_CLK_DEFAULT(idx) ((void)0)
125#endif
126
127#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
128/* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
129 * unfortunately this is less flexible than 440EP case, because it's a global
130 * setting for all EMACs, therefore we do this clock trick only during probe.
131 */
132#define EMAC_CLK_INTERNAL SDR_WRITE(DCRN_SDR_MFR, \
133 SDR_READ(DCRN_SDR_MFR) | 0x08000000)
134#define EMAC_CLK_EXTERNAL SDR_WRITE(DCRN_SDR_MFR, \
135 SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
136#else
137#define EMAC_CLK_INTERNAL ((void)0)
138#define EMAC_CLK_EXTERNAL ((void)0)
139#endif
140
141/* I don't want to litter system log with timeout errors
142 * when we have brain-damaged PHY.
143 */
144static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
145 const char *error)
146{
147#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
148 DBG("%d: %s" NL, dev->def->index, error);
149#else
150 if (net_ratelimit())
151 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
152#endif
153}
154
155/* PHY polling intervals */
156#define PHY_POLL_LINK_ON HZ
157#define PHY_POLL_LINK_OFF (HZ / 5)
158
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800159/* Graceful stop timeouts in us.
160 * We should allow up to 1 frame time (full-duplex, ignoring collisions)
161 */
162#define STOP_TIMEOUT_10 1230
163#define STOP_TIMEOUT_100 124
164#define STOP_TIMEOUT_1000 13
165#define STOP_TIMEOUT_1000_JUMBO 73
166
Eugene Surovegin37448f72005-10-10 16:58:14 -0700167/* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
168static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
169 "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
170 "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
171 "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
172 "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
173 "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
174 "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
175 "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
176 "rx_bad_packet", "rx_runt_packet", "rx_short_event",
177 "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
178 "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
179 "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
180 "tx_bd_excessive_collisions", "tx_bd_late_collision",
181 "tx_bd_multple_collisions", "tx_bd_single_collision",
182 "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
183 "tx_errors"
184};
185
David Howells7d12e782006-10-05 14:55:46 +0100186static irqreturn_t emac_irq(int irq, void *dev_instance);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700187static void emac_clean_tx_ring(struct ocp_enet_private *dev);
188
189static inline int emac_phy_supports_gige(int phy_mode)
190{
191 return phy_mode == PHY_MODE_GMII ||
192 phy_mode == PHY_MODE_RGMII ||
193 phy_mode == PHY_MODE_TBI ||
194 phy_mode == PHY_MODE_RTBI;
195}
196
197static inline int emac_phy_gpcs(int phy_mode)
198{
199 return phy_mode == PHY_MODE_TBI ||
200 phy_mode == PHY_MODE_RTBI;
201}
202
203static inline void emac_tx_enable(struct ocp_enet_private *dev)
204{
Al Virob43de2d2005-12-01 10:15:21 -0500205 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700206 unsigned long flags;
207 u32 r;
208
209 local_irq_save(flags);
210
211 DBG("%d: tx_enable" NL, dev->def->index);
212
213 r = in_be32(&p->mr0);
214 if (!(r & EMAC_MR0_TXE))
215 out_be32(&p->mr0, r | EMAC_MR0_TXE);
216 local_irq_restore(flags);
217}
218
219static void emac_tx_disable(struct ocp_enet_private *dev)
220{
Al Virob43de2d2005-12-01 10:15:21 -0500221 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700222 unsigned long flags;
223 u32 r;
224
225 local_irq_save(flags);
226
227 DBG("%d: tx_disable" NL, dev->def->index);
228
229 r = in_be32(&p->mr0);
230 if (r & EMAC_MR0_TXE) {
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800231 int n = dev->stop_timeout;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700232 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800233 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
234 udelay(1);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700235 --n;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800236 }
Eugene Surovegin37448f72005-10-10 16:58:14 -0700237 if (unlikely(!n))
238 emac_report_timeout_error(dev, "TX disable timeout");
239 }
240 local_irq_restore(flags);
241}
242
243static void emac_rx_enable(struct ocp_enet_private *dev)
244{
Al Virob43de2d2005-12-01 10:15:21 -0500245 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700246 unsigned long flags;
247 u32 r;
248
249 local_irq_save(flags);
250 if (unlikely(dev->commac.rx_stopped))
251 goto out;
252
253 DBG("%d: rx_enable" NL, dev->def->index);
254
255 r = in_be32(&p->mr0);
256 if (!(r & EMAC_MR0_RXE)) {
257 if (unlikely(!(r & EMAC_MR0_RXI))) {
258 /* Wait if previous async disable is still in progress */
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800259 int n = dev->stop_timeout;
260 while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
261 udelay(1);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700262 --n;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800263 }
Eugene Surovegin37448f72005-10-10 16:58:14 -0700264 if (unlikely(!n))
265 emac_report_timeout_error(dev,
266 "RX disable timeout");
267 }
268 out_be32(&p->mr0, r | EMAC_MR0_RXE);
269 }
270 out:
271 local_irq_restore(flags);
272}
273
274static void emac_rx_disable(struct ocp_enet_private *dev)
275{
Al Virob43de2d2005-12-01 10:15:21 -0500276 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700277 unsigned long flags;
278 u32 r;
279
280 local_irq_save(flags);
281
282 DBG("%d: rx_disable" NL, dev->def->index);
283
284 r = in_be32(&p->mr0);
285 if (r & EMAC_MR0_RXE) {
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800286 int n = dev->stop_timeout;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700287 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800288 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
289 udelay(1);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700290 --n;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800291 }
Eugene Surovegin37448f72005-10-10 16:58:14 -0700292 if (unlikely(!n))
293 emac_report_timeout_error(dev, "RX disable timeout");
294 }
295 local_irq_restore(flags);
296}
297
298static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
299{
Al Virob43de2d2005-12-01 10:15:21 -0500300 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700301 unsigned long flags;
302 u32 r;
303
304 local_irq_save(flags);
305
306 DBG("%d: rx_disable_async" NL, dev->def->index);
307
308 r = in_be32(&p->mr0);
309 if (r & EMAC_MR0_RXE)
310 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
311 local_irq_restore(flags);
312}
313
314static int emac_reset(struct ocp_enet_private *dev)
315{
Al Virob43de2d2005-12-01 10:15:21 -0500316 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700317 unsigned long flags;
318 int n = 20;
319
320 DBG("%d: reset" NL, dev->def->index);
321
322 local_irq_save(flags);
323
324 if (!dev->reset_failed) {
325 /* 40x erratum suggests stopping RX channel before reset,
326 * we stop TX as well
327 */
328 emac_rx_disable(dev);
329 emac_tx_disable(dev);
330 }
331
332 out_be32(&p->mr0, EMAC_MR0_SRST);
333 while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
334 --n;
335 local_irq_restore(flags);
336
337 if (n) {
338 dev->reset_failed = 0;
339 return 0;
340 } else {
341 emac_report_timeout_error(dev, "reset timeout");
342 dev->reset_failed = 1;
343 return -ETIMEDOUT;
344 }
345}
346
347static void emac_hash_mc(struct ocp_enet_private *dev)
348{
Al Virob43de2d2005-12-01 10:15:21 -0500349 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700350 u16 gaht[4] = { 0 };
351 struct dev_mc_list *dmi;
352
353 DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
354
355 for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
356 int bit;
357 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
358 dev->def->index,
359 dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
360 dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
361
362 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
363 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
364 }
365 out_be32(&p->gaht1, gaht[0]);
366 out_be32(&p->gaht2, gaht[1]);
367 out_be32(&p->gaht3, gaht[2]);
368 out_be32(&p->gaht4, gaht[3]);
369}
370
371static inline u32 emac_iff2rmr(struct net_device *ndev)
372{
373 u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
374 EMAC_RMR_BASE;
375
376 if (ndev->flags & IFF_PROMISC)
377 r |= EMAC_RMR_PME;
378 else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
379 r |= EMAC_RMR_PMME;
380 else if (ndev->mc_count > 0)
381 r |= EMAC_RMR_MAE;
382
383 return r;
384}
385
386static inline int emac_opb_mhz(void)
387{
388 return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
389}
390
391/* BHs disabled */
392static int emac_configure(struct ocp_enet_private *dev)
393{
Al Virob43de2d2005-12-01 10:15:21 -0500394 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700395 struct net_device *ndev = dev->ndev;
396 int gige;
397 u32 r;
398
399 DBG("%d: configure" NL, dev->def->index);
400
401 if (emac_reset(dev) < 0)
402 return -ETIMEDOUT;
403
404 tah_reset(dev->tah_dev);
405
406 /* Mode register */
407 r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
408 if (dev->phy.duplex == DUPLEX_FULL)
Eugene Surovegin38843882005-12-27 12:36:41 -0800409 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800410 dev->stop_timeout = STOP_TIMEOUT_10;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700411 switch (dev->phy.speed) {
412 case SPEED_1000:
413 if (emac_phy_gpcs(dev->phy.mode)) {
414 r |= EMAC_MR1_MF_1000GPCS |
415 EMAC_MR1_MF_IPPA(dev->phy.address);
416
417 /* Put some arbitrary OUI, Manuf & Rev IDs so we can
418 * identify this GPCS PHY later.
419 */
420 out_be32(&p->ipcr, 0xdeadbeef);
421 } else
422 r |= EMAC_MR1_MF_1000;
423 r |= EMAC_MR1_RFS_16K;
424 gige = 1;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800425
426 if (dev->ndev->mtu > ETH_DATA_LEN) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700427 r |= EMAC_MR1_JPSM;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800428 dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
429 } else
430 dev->stop_timeout = STOP_TIMEOUT_1000;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700431 break;
432 case SPEED_100:
433 r |= EMAC_MR1_MF_100;
Eugene Surovegin8169bd92005-11-24 14:48:40 -0800434 dev->stop_timeout = STOP_TIMEOUT_100;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700435 /* Fall through */
436 default:
437 r |= EMAC_MR1_RFS_4K;
438 gige = 0;
439 break;
440 }
441
442 if (dev->rgmii_dev)
443 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
444 dev->phy.speed);
445 else
446 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
447
448#if !defined(CONFIG_40x)
449 /* on 40x erratum forces us to NOT use integrated flow control,
450 * let's hope it works on 44x ;)
451 */
452 if (dev->phy.duplex == DUPLEX_FULL) {
453 if (dev->phy.pause)
454 r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
455 else if (dev->phy.asym_pause)
456 r |= EMAC_MR1_APP;
457 }
458#endif
459 out_be32(&p->mr1, r);
460
461 /* Set individual MAC address */
462 out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
463 out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
464 (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
465 ndev->dev_addr[5]);
466
467 /* VLAN Tag Protocol ID */
468 out_be32(&p->vtpid, 0x8100);
469
470 /* Receive mode register */
471 r = emac_iff2rmr(ndev);
472 if (r & EMAC_RMR_MAE)
473 emac_hash_mc(dev);
474 out_be32(&p->rmr, r);
475
476 /* FIFOs thresholds */
477 r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
478 EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
479 out_be32(&p->tmr1, r);
480 out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
481
482 /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
483 there should be still enough space in FIFO to allow the our link
484 partner time to process this frame and also time to send PAUSE
485 frame itself.
486
487 Here is the worst case scenario for the RX FIFO "headroom"
488 (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
489
490 1) One maximum-length frame on TX 1522 bytes
491 2) One PAUSE frame time 64 bytes
492 3) PAUSE frame decode time allowance 64 bytes
493 4) One maximum-length frame on RX 1522 bytes
494 5) Round-trip propagation delay of the link (100Mb) 15 bytes
495 ----------
496 3187 bytes
497
498 I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
499 low-water mark to RX_FIFO_SIZE / 8 (512 bytes)
500 */
501 r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
502 EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
503 out_be32(&p->rwmr, r);
504
505 /* Set PAUSE timer to the maximum */
506 out_be32(&p->ptr, 0xffff);
507
508 /* IRQ sources */
509 out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
510 EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
511 EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
512 EMAC_ISR_IRE | EMAC_ISR_TE);
513
514 /* We need to take GPCS PHY out of isolate mode after EMAC reset */
515 if (emac_phy_gpcs(dev->phy.mode))
516 mii_reset_phy(&dev->phy);
517
518 return 0;
519}
520
521/* BHs disabled */
522static void emac_reinitialize(struct ocp_enet_private *dev)
523{
524 DBG("%d: reinitialize" NL, dev->def->index);
525
526 if (!emac_configure(dev)) {
527 emac_tx_enable(dev);
528 emac_rx_enable(dev);
529 }
530}
531
532/* BHs disabled */
533static void emac_full_tx_reset(struct net_device *ndev)
534{
535 struct ocp_enet_private *dev = ndev->priv;
536 struct ocp_func_emac_data *emacdata = dev->def->additions;
537
538 DBG("%d: full_tx_reset" NL, dev->def->index);
539
540 emac_tx_disable(dev);
541 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
542 emac_clean_tx_ring(dev);
543 dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
544
545 emac_configure(dev);
546
547 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
548 emac_tx_enable(dev);
549 emac_rx_enable(dev);
550
551 netif_wake_queue(ndev);
552}
553
554static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
555{
Al Virob43de2d2005-12-01 10:15:21 -0500556 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700557 u32 r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int n;
559
Eugene Surovegin37448f72005-10-10 16:58:14 -0700560 DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Eugene Surovegin37448f72005-10-10 16:58:14 -0700562 /* Enable proper MDIO port */
563 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Eugene Surovegin37448f72005-10-10 16:58:14 -0700565 /* Wait for management interface to become idle */
566 n = 10;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700567 while (!emac_phy_done(in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700568 udelay(1);
569 if (!--n)
570 goto to;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 }
572
Eugene Surovegin37448f72005-10-10 16:58:14 -0700573 /* Issue read command */
574 out_be32(&p->stacr,
575 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
576 (reg & EMAC_STACR_PRA_MASK)
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700577 | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
578 | EMAC_STACR_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Eugene Surovegin37448f72005-10-10 16:58:14 -0700580 /* Wait for read to complete */
581 n = 100;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700582 while (!emac_phy_done(r = in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700583 udelay(1);
584 if (!--n)
585 goto to;
586 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Eugene Surovegin37448f72005-10-10 16:58:14 -0700588 if (unlikely(r & EMAC_STACR_PHYE)) {
589 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
590 id, reg);
591 return -EREMOTEIO;
592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Eugene Surovegin37448f72005-10-10 16:58:14 -0700594 r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
595 DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
596 return r;
597 to:
598 DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
599 return -ETIMEDOUT;
600}
601
602static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
603 u16 val)
604{
Al Virob43de2d2005-12-01 10:15:21 -0500605 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700606 int n;
607
608 DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
609 val);
610
611 /* Enable proper MDIO port */
612 zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
613
614 /* Wait for management interface to be idle */
615 n = 10;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700616 while (!emac_phy_done(in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700617 udelay(1);
618 if (!--n)
619 goto to;
620 }
621
622 /* Issue write command */
623 out_be32(&p->stacr,
624 EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
625 (reg & EMAC_STACR_PRA_MASK) |
626 ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700627 (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
Eugene Surovegin37448f72005-10-10 16:58:14 -0700628
629 /* Wait for write to complete */
630 n = 100;
Eugene Surovegin7ad8a892005-10-29 12:43:14 -0700631 while (!emac_phy_done(in_be32(&p->stacr))) {
Eugene Surovegin37448f72005-10-10 16:58:14 -0700632 udelay(1);
633 if (!--n)
634 goto to;
635 }
636 return;
637 to:
638 DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
639}
640
641static int emac_mdio_read(struct net_device *ndev, int id, int reg)
642{
643 struct ocp_enet_private *dev = ndev->priv;
644 int res;
645
646 local_bh_disable();
647 res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
648 (u8) reg);
649 local_bh_enable();
650 return res;
651}
652
653static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
654{
655 struct ocp_enet_private *dev = ndev->priv;
656
657 local_bh_disable();
658 __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
659 (u8) reg, (u16) val);
660 local_bh_enable();
661}
662
663/* BHs disabled */
664static void emac_set_multicast_list(struct net_device *ndev)
665{
666 struct ocp_enet_private *dev = ndev->priv;
Al Virob43de2d2005-12-01 10:15:21 -0500667 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -0700668 u32 rmr = emac_iff2rmr(ndev);
669
670 DBG("%d: multicast %08x" NL, dev->def->index, rmr);
671 BUG_ON(!netif_running(dev->ndev));
672
673 /* I decided to relax register access rules here to avoid
674 * full EMAC reset.
675 *
676 * There is a real problem with EMAC4 core if we use MWSW_001 bit
677 * in MR1 register and do a full EMAC reset.
678 * One TX BD status update is delayed and, after EMAC reset, it
679 * never happens, resulting in TX hung (it'll be recovered by TX
680 * timeout handler eventually, but this is just gross).
681 * So we either have to do full TX reset or try to cheat here :)
682 *
683 * The only required change is to RX mode register, so I *think* all
684 * we need is just to stop RX channel. This seems to work on all
685 * tested SoCs. --ebs
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 */
Eugene Surovegin37448f72005-10-10 16:58:14 -0700687 emac_rx_disable(dev);
688 if (rmr & EMAC_RMR_MAE)
689 emac_hash_mc(dev);
690 out_be32(&p->rmr, rmr);
691 emac_rx_enable(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692}
693
Eugene Surovegin37448f72005-10-10 16:58:14 -0700694/* BHs disabled */
695static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696{
Eugene Surovegin37448f72005-10-10 16:58:14 -0700697 struct ocp_func_emac_data *emacdata = dev->def->additions;
698 int rx_sync_size = emac_rx_sync_size(new_mtu);
699 int rx_skb_size = emac_rx_skb_size(new_mtu);
700 int i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Eugene Surovegin37448f72005-10-10 16:58:14 -0700702 emac_rx_disable(dev);
703 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Eugene Surovegin37448f72005-10-10 16:58:14 -0700705 if (dev->rx_sg_skb) {
706 ++dev->estats.rx_dropped_resize;
707 dev_kfree_skb(dev->rx_sg_skb);
708 dev->rx_sg_skb = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
710
Eugene Surovegin37448f72005-10-10 16:58:14 -0700711 /* Make a first pass over RX ring and mark BDs ready, dropping
712 * non-processed packets on the way. We need this as a separate pass
713 * to simplify error recovery in the case of allocation failure later.
714 */
715 for (i = 0; i < NUM_RX_BUFF; ++i) {
716 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
717 ++dev->estats.rx_dropped_resize;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
Eugene Surovegin37448f72005-10-10 16:58:14 -0700719 dev->rx_desc[i].data_len = 0;
720 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
721 (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
722 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
Eugene Surovegin37448f72005-10-10 16:58:14 -0700724 /* Reallocate RX ring only if bigger skb buffers are required */
725 if (rx_skb_size <= dev->rx_skb_size)
726 goto skip;
727
728 /* Second pass, allocate new skbs */
729 for (i = 0; i < NUM_RX_BUFF; ++i) {
730 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
731 if (!skb) {
732 ret = -ENOMEM;
733 goto oom;
734 }
735
736 BUG_ON(!dev->rx_skb[i]);
737 dev_kfree_skb(dev->rx_skb[i]);
738
739 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
740 dev->rx_desc[i].data_ptr =
741 dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
742 DMA_FROM_DEVICE) + 2;
743 dev->rx_skb[i] = skb;
744 }
745 skip:
746 /* Check if we need to change "Jumbo" bit in MR1 */
747 if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
748 /* This is to prevent starting RX channel in emac_rx_enable() */
749 dev->commac.rx_stopped = 1;
750
751 dev->ndev->mtu = new_mtu;
752 emac_full_tx_reset(dev->ndev);
753 }
754
755 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
756 oom:
757 /* Restart RX */
758 dev->commac.rx_stopped = dev->rx_slot = 0;
759 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
760 emac_rx_enable(dev);
761
762 return ret;
763}
764
765/* Process ctx, rtnl_lock semaphore */
766static int emac_change_mtu(struct net_device *ndev, int new_mtu)
767{
768 struct ocp_enet_private *dev = ndev->priv;
769 int ret = 0;
770
771 if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
772 return -EINVAL;
773
774 DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
775
776 local_bh_disable();
777 if (netif_running(ndev)) {
778 /* Check if we really need to reinitalize RX ring */
779 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
780 ret = emac_resize_rx_ring(dev, new_mtu);
781 }
782
783 if (!ret) {
784 ndev->mtu = new_mtu;
785 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
786 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
787 }
788 local_bh_enable();
789
790 return ret;
791}
792
793static void emac_clean_tx_ring(struct ocp_enet_private *dev)
794{
795 int i;
796 for (i = 0; i < NUM_TX_BUFF; ++i) {
797 if (dev->tx_skb[i]) {
798 dev_kfree_skb(dev->tx_skb[i]);
799 dev->tx_skb[i] = NULL;
800 if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
801 ++dev->estats.tx_dropped;
802 }
803 dev->tx_desc[i].ctrl = 0;
804 dev->tx_desc[i].data_ptr = 0;
805 }
806}
807
808static void emac_clean_rx_ring(struct ocp_enet_private *dev)
809{
810 int i;
811 for (i = 0; i < NUM_RX_BUFF; ++i)
812 if (dev->rx_skb[i]) {
813 dev->rx_desc[i].ctrl = 0;
814 dev_kfree_skb(dev->rx_skb[i]);
815 dev->rx_skb[i] = NULL;
816 dev->rx_desc[i].data_ptr = 0;
817 }
818
819 if (dev->rx_sg_skb) {
820 dev_kfree_skb(dev->rx_sg_skb);
821 dev->rx_sg_skb = NULL;
822 }
823}
824
825static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
Al Virob43de2d2005-12-01 10:15:21 -0500826 gfp_t flags)
Eugene Surovegin37448f72005-10-10 16:58:14 -0700827{
828 struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
829 if (unlikely(!skb))
830 return -ENOMEM;
831
832 dev->rx_skb[slot] = skb;
833 dev->rx_desc[slot].data_len = 0;
834
835 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
836 dev->rx_desc[slot].data_ptr =
837 dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size,
838 DMA_FROM_DEVICE) + 2;
839 barrier();
840 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
841 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
842
843 return 0;
844}
845
846static void emac_print_link_status(struct ocp_enet_private *dev)
847{
848 if (netif_carrier_ok(dev->ndev))
849 printk(KERN_INFO "%s: link is up, %d %s%s\n",
850 dev->ndev->name, dev->phy.speed,
851 dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
852 dev->phy.pause ? ", pause enabled" :
853 dev->phy.asym_pause ? ", assymetric pause enabled" : "");
854 else
855 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
856}
857
858/* Process ctx, rtnl_lock semaphore */
859static int emac_open(struct net_device *ndev)
860{
861 struct ocp_enet_private *dev = ndev->priv;
862 struct ocp_func_emac_data *emacdata = dev->def->additions;
863 int err, i;
864
865 DBG("%d: open" NL, dev->def->index);
866
867 /* Setup error IRQ handler */
868 err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
869 if (err) {
870 printk(KERN_ERR "%s: failed to request IRQ %d\n",
871 ndev->name, dev->def->irq);
872 return err;
873 }
874
875 /* Allocate RX ring */
876 for (i = 0; i < NUM_RX_BUFF; ++i)
877 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
878 printk(KERN_ERR "%s: failed to allocate RX ring\n",
879 ndev->name);
880 goto oom;
881 }
882
883 local_bh_disable();
884 dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
885 dev->commac.rx_stopped = 0;
886 dev->rx_sg_skb = NULL;
887
888 if (dev->phy.address >= 0) {
889 int link_poll_interval;
890 if (dev->phy.def->ops->poll_link(&dev->phy)) {
891 dev->phy.def->ops->read_link(&dev->phy);
892 EMAC_RX_CLK_DEFAULT(dev->def->index);
893 netif_carrier_on(dev->ndev);
894 link_poll_interval = PHY_POLL_LINK_ON;
895 } else {
896 EMAC_RX_CLK_TX(dev->def->index);
897 netif_carrier_off(dev->ndev);
898 link_poll_interval = PHY_POLL_LINK_OFF;
899 }
900 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
901 emac_print_link_status(dev);
902 } else
903 netif_carrier_on(dev->ndev);
904
905 emac_configure(dev);
906 mal_poll_add(dev->mal, &dev->commac);
907 mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
908 mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
909 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
910 emac_tx_enable(dev);
911 emac_rx_enable(dev);
912 netif_start_queue(ndev);
913 local_bh_enable();
914
915 return 0;
916 oom:
917 emac_clean_rx_ring(dev);
918 free_irq(dev->def->irq, dev);
919 return -ENOMEM;
920}
921
922/* BHs disabled */
923static int emac_link_differs(struct ocp_enet_private *dev)
924{
925 u32 r = in_be32(&dev->emacp->mr1);
926
927 int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
928 int speed, pause, asym_pause;
929
930 if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
931 speed = SPEED_1000;
932 else if (r & EMAC_MR1_MF_100)
933 speed = SPEED_100;
934 else
935 speed = SPEED_10;
936
937 switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
938 case (EMAC_MR1_EIFC | EMAC_MR1_APP):
939 pause = 1;
940 asym_pause = 0;
941 break;
942 case EMAC_MR1_APP:
943 pause = 0;
944 asym_pause = 1;
945 break;
946 default:
947 pause = asym_pause = 0;
948 }
949 return speed != dev->phy.speed || duplex != dev->phy.duplex ||
950 pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
951}
952
953/* BHs disabled */
954static void emac_link_timer(unsigned long data)
955{
956 struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
957 int link_poll_interval;
958
959 DBG2("%d: link timer" NL, dev->def->index);
960
961 if (dev->phy.def->ops->poll_link(&dev->phy)) {
962 if (!netif_carrier_ok(dev->ndev)) {
963 EMAC_RX_CLK_DEFAULT(dev->def->index);
964
965 /* Get new link parameters */
966 dev->phy.def->ops->read_link(&dev->phy);
967
968 if (dev->tah_dev || emac_link_differs(dev))
969 emac_full_tx_reset(dev->ndev);
970
971 netif_carrier_on(dev->ndev);
972 emac_print_link_status(dev);
973 }
974 link_poll_interval = PHY_POLL_LINK_ON;
975 } else {
976 if (netif_carrier_ok(dev->ndev)) {
977 EMAC_RX_CLK_TX(dev->def->index);
978#if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
979 emac_reinitialize(dev);
980#endif
981 netif_carrier_off(dev->ndev);
982 emac_print_link_status(dev);
983 }
984
985 /* Retry reset if the previous attempt failed.
986 * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
987 * case, but I left it here because it shouldn't trigger for
988 * sane PHYs anyway.
989 */
990 if (unlikely(dev->reset_failed))
991 emac_reinitialize(dev);
992
993 link_poll_interval = PHY_POLL_LINK_OFF;
994 }
995 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
996}
997
998/* BHs disabled */
999static void emac_force_link_update(struct ocp_enet_private *dev)
1000{
1001 netif_carrier_off(dev->ndev);
1002 if (timer_pending(&dev->link_timer))
1003 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1004}
1005
1006/* Process ctx, rtnl_lock semaphore */
1007static int emac_close(struct net_device *ndev)
1008{
1009 struct ocp_enet_private *dev = ndev->priv;
1010 struct ocp_func_emac_data *emacdata = dev->def->additions;
1011
1012 DBG("%d: close" NL, dev->def->index);
1013
1014 local_bh_disable();
1015
1016 if (dev->phy.address >= 0)
1017 del_timer_sync(&dev->link_timer);
1018
1019 netif_stop_queue(ndev);
1020 emac_rx_disable(dev);
1021 emac_tx_disable(dev);
1022 mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1023 mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1024 mal_poll_del(dev->mal, &dev->commac);
1025 local_bh_enable();
1026
1027 emac_clean_tx_ring(dev);
1028 emac_clean_rx_ring(dev);
1029 free_irq(dev->def->irq, dev);
1030
1031 return 0;
1032}
1033
1034static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1035 struct sk_buff *skb)
1036{
1037#if defined(CONFIG_IBM_EMAC_TAH)
Patrick McHardy84fa7932006-08-29 16:44:56 -07001038 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Eugene Surovegin37448f72005-10-10 16:58:14 -07001039 ++dev->stats.tx_packets_csum;
1040 return EMAC_TX_CTRL_TAH_CSUM;
1041 }
1042#endif
1043 return 0;
1044}
1045
1046static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1047{
Al Virob43de2d2005-12-01 10:15:21 -05001048 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001049 struct net_device *ndev = dev->ndev;
1050
1051 /* Send the packet out */
1052 out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1053
1054 if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1055 netif_stop_queue(ndev);
1056 DBG2("%d: stopped TX queue" NL, dev->def->index);
1057 }
1058
1059 ndev->trans_start = jiffies;
1060 ++dev->stats.tx_packets;
1061 dev->stats.tx_bytes += len;
1062
1063 return 0;
1064}
1065
1066/* BHs disabled */
1067static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1068{
1069 struct ocp_enet_private *dev = ndev->priv;
1070 unsigned int len = skb->len;
1071 int slot;
1072
1073 u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1074 MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1075
1076 slot = dev->tx_slot++;
1077 if (dev->tx_slot == NUM_TX_BUFF) {
1078 dev->tx_slot = 0;
1079 ctrl |= MAL_TX_CTRL_WRAP;
1080 }
1081
1082 DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1083
1084 dev->tx_skb[slot] = skb;
1085 dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1086 DMA_TO_DEVICE);
1087 dev->tx_desc[slot].data_len = (u16) len;
1088 barrier();
1089 dev->tx_desc[slot].ctrl = ctrl;
1090
1091 return emac_xmit_finish(dev, len);
1092}
1093
1094#if defined(CONFIG_IBM_EMAC_TAH)
1095static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1096 u32 pd, int len, int last, u16 base_ctrl)
1097{
1098 while (1) {
1099 u16 ctrl = base_ctrl;
1100 int chunk = min(len, MAL_MAX_TX_SIZE);
1101 len -= chunk;
1102
1103 slot = (slot + 1) % NUM_TX_BUFF;
1104
1105 if (last && !len)
1106 ctrl |= MAL_TX_CTRL_LAST;
1107 if (slot == NUM_TX_BUFF - 1)
1108 ctrl |= MAL_TX_CTRL_WRAP;
1109
1110 dev->tx_skb[slot] = NULL;
1111 dev->tx_desc[slot].data_ptr = pd;
1112 dev->tx_desc[slot].data_len = (u16) chunk;
1113 dev->tx_desc[slot].ctrl = ctrl;
1114 ++dev->tx_cnt;
1115
1116 if (!len)
1117 break;
1118
1119 pd += chunk;
1120 }
1121 return slot;
1122}
1123
1124/* BHs disabled (SG version for TAH equipped EMACs) */
1125static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1126{
1127 struct ocp_enet_private *dev = ndev->priv;
1128 int nr_frags = skb_shinfo(skb)->nr_frags;
1129 int len = skb->len, chunk;
1130 int slot, i;
1131 u16 ctrl;
1132 u32 pd;
1133
1134 /* This is common "fast" path */
1135 if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1136 return emac_start_xmit(skb, ndev);
1137
1138 len -= skb->data_len;
1139
1140 /* Note, this is only an *estimation*, we can still run out of empty
1141 * slots because of the additional fragmentation into
1142 * MAL_MAX_TX_SIZE-sized chunks
1143 */
1144 if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1145 goto stop_queue;
1146
1147 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1148 emac_tx_csum(dev, skb);
1149 slot = dev->tx_slot;
1150
1151 /* skb data */
1152 dev->tx_skb[slot] = NULL;
1153 chunk = min(len, MAL_MAX_TX_SIZE);
1154 dev->tx_desc[slot].data_ptr = pd =
1155 dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1156 dev->tx_desc[slot].data_len = (u16) chunk;
1157 len -= chunk;
1158 if (unlikely(len))
1159 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1160 ctrl);
1161 /* skb fragments */
1162 for (i = 0; i < nr_frags; ++i) {
1163 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1164 len = frag->size;
1165
1166 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1167 goto undo_frame;
1168
1169 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1170 DMA_TO_DEVICE);
1171
1172 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1173 ctrl);
1174 }
1175
1176 DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1177 dev->tx_slot, slot);
1178
1179 /* Attach skb to the last slot so we don't release it too early */
1180 dev->tx_skb[slot] = skb;
1181
1182 /* Send the packet out */
1183 if (dev->tx_slot == NUM_TX_BUFF - 1)
1184 ctrl |= MAL_TX_CTRL_WRAP;
1185 barrier();
1186 dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1187 dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1188
1189 return emac_xmit_finish(dev, skb->len);
1190
1191 undo_frame:
1192 /* Well, too bad. Our previous estimation was overly optimistic.
1193 * Undo everything.
1194 */
1195 while (slot != dev->tx_slot) {
1196 dev->tx_desc[slot].ctrl = 0;
1197 --dev->tx_cnt;
1198 if (--slot < 0)
1199 slot = NUM_TX_BUFF - 1;
1200 }
1201 ++dev->estats.tx_undo;
1202
1203 stop_queue:
1204 netif_stop_queue(ndev);
1205 DBG2("%d: stopped TX queue" NL, dev->def->index);
1206 return 1;
1207}
1208#else
1209# define emac_start_xmit_sg emac_start_xmit
1210#endif /* !defined(CONFIG_IBM_EMAC_TAH) */
1211
1212/* BHs disabled */
1213static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1214{
1215 struct ibm_emac_error_stats *st = &dev->estats;
1216 DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1217
1218 ++st->tx_bd_errors;
1219 if (ctrl & EMAC_TX_ST_BFCS)
1220 ++st->tx_bd_bad_fcs;
1221 if (ctrl & EMAC_TX_ST_LCS)
1222 ++st->tx_bd_carrier_loss;
1223 if (ctrl & EMAC_TX_ST_ED)
1224 ++st->tx_bd_excessive_deferral;
1225 if (ctrl & EMAC_TX_ST_EC)
1226 ++st->tx_bd_excessive_collisions;
1227 if (ctrl & EMAC_TX_ST_LC)
1228 ++st->tx_bd_late_collision;
1229 if (ctrl & EMAC_TX_ST_MC)
1230 ++st->tx_bd_multple_collisions;
1231 if (ctrl & EMAC_TX_ST_SC)
1232 ++st->tx_bd_single_collision;
1233 if (ctrl & EMAC_TX_ST_UR)
1234 ++st->tx_bd_underrun;
1235 if (ctrl & EMAC_TX_ST_SQE)
1236 ++st->tx_bd_sqe;
1237}
1238
1239static void emac_poll_tx(void *param)
1240{
1241 struct ocp_enet_private *dev = param;
1242 DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1243 dev->ack_slot);
1244
1245 if (dev->tx_cnt) {
1246 u16 ctrl;
1247 int slot = dev->ack_slot, n = 0;
1248 again:
1249 ctrl = dev->tx_desc[slot].ctrl;
1250 if (!(ctrl & MAL_TX_CTRL_READY)) {
1251 struct sk_buff *skb = dev->tx_skb[slot];
1252 ++n;
1253
1254 if (skb) {
1255 dev_kfree_skb(skb);
1256 dev->tx_skb[slot] = NULL;
1257 }
1258 slot = (slot + 1) % NUM_TX_BUFF;
1259
1260 if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1261 emac_parse_tx_error(dev, ctrl);
1262
1263 if (--dev->tx_cnt)
1264 goto again;
1265 }
1266 if (n) {
1267 dev->ack_slot = slot;
1268 if (netif_queue_stopped(dev->ndev) &&
1269 dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1270 netif_wake_queue(dev->ndev);
1271
1272 DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1273 }
1274 }
1275}
1276
1277static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1278 int len)
1279{
1280 struct sk_buff *skb = dev->rx_skb[slot];
1281 DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1282
1283 if (len)
1284 dma_map_single(dev->ldev, skb->data - 2,
1285 EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1286
1287 dev->rx_desc[slot].data_len = 0;
1288 barrier();
1289 dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1290 (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1291}
1292
1293static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1294{
1295 struct ibm_emac_error_stats *st = &dev->estats;
1296 DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1297
1298 ++st->rx_bd_errors;
1299 if (ctrl & EMAC_RX_ST_OE)
1300 ++st->rx_bd_overrun;
1301 if (ctrl & EMAC_RX_ST_BP)
1302 ++st->rx_bd_bad_packet;
1303 if (ctrl & EMAC_RX_ST_RP)
1304 ++st->rx_bd_runt_packet;
1305 if (ctrl & EMAC_RX_ST_SE)
1306 ++st->rx_bd_short_event;
1307 if (ctrl & EMAC_RX_ST_AE)
1308 ++st->rx_bd_alignment_error;
1309 if (ctrl & EMAC_RX_ST_BFCS)
1310 ++st->rx_bd_bad_fcs;
1311 if (ctrl & EMAC_RX_ST_PTL)
1312 ++st->rx_bd_packet_too_long;
1313 if (ctrl & EMAC_RX_ST_ORE)
1314 ++st->rx_bd_out_of_range;
1315 if (ctrl & EMAC_RX_ST_IRE)
1316 ++st->rx_bd_in_range;
1317}
1318
1319static inline void emac_rx_csum(struct ocp_enet_private *dev,
1320 struct sk_buff *skb, u16 ctrl)
1321{
1322#if defined(CONFIG_IBM_EMAC_TAH)
1323 if (!ctrl && dev->tah_dev) {
1324 skb->ip_summed = CHECKSUM_UNNECESSARY;
1325 ++dev->stats.rx_packets_csum;
1326 }
1327#endif
1328}
1329
1330static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1331{
1332 if (likely(dev->rx_sg_skb != NULL)) {
1333 int len = dev->rx_desc[slot].data_len;
1334 int tot_len = dev->rx_sg_skb->len + len;
1335
1336 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1337 ++dev->estats.rx_dropped_mtu;
1338 dev_kfree_skb(dev->rx_sg_skb);
1339 dev->rx_sg_skb = NULL;
1340 } else {
1341 cacheable_memcpy(dev->rx_sg_skb->tail,
1342 dev->rx_skb[slot]->data, len);
1343 skb_put(dev->rx_sg_skb, len);
1344 emac_recycle_rx_skb(dev, slot, len);
1345 return 0;
1346 }
1347 }
1348 emac_recycle_rx_skb(dev, slot, 0);
1349 return -1;
1350}
1351
1352/* BHs disabled */
1353static int emac_poll_rx(void *param, int budget)
1354{
1355 struct ocp_enet_private *dev = param;
1356 int slot = dev->rx_slot, received = 0;
1357
1358 DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1359
1360 again:
1361 while (budget > 0) {
1362 int len;
1363 struct sk_buff *skb;
1364 u16 ctrl = dev->rx_desc[slot].ctrl;
1365
1366 if (ctrl & MAL_RX_CTRL_EMPTY)
1367 break;
1368
1369 skb = dev->rx_skb[slot];
1370 barrier();
1371 len = dev->rx_desc[slot].data_len;
1372
1373 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1374 goto sg;
1375
1376 ctrl &= EMAC_BAD_RX_MASK;
1377 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1378 emac_parse_rx_error(dev, ctrl);
1379 ++dev->estats.rx_dropped_error;
1380 emac_recycle_rx_skb(dev, slot, 0);
1381 len = 0;
1382 goto next;
1383 }
1384
1385 if (len && len < EMAC_RX_COPY_THRESH) {
1386 struct sk_buff *copy_skb =
1387 alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1388 if (unlikely(!copy_skb))
1389 goto oom;
1390
1391 skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1392 cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1393 len + 2);
1394 emac_recycle_rx_skb(dev, slot, len);
1395 skb = copy_skb;
1396 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1397 goto oom;
1398
1399 skb_put(skb, len);
1400 push_packet:
1401 skb->dev = dev->ndev;
1402 skb->protocol = eth_type_trans(skb, dev->ndev);
1403 emac_rx_csum(dev, skb, ctrl);
1404
1405 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1406 ++dev->estats.rx_dropped_stack;
1407 next:
1408 ++dev->stats.rx_packets;
1409 skip:
1410 dev->stats.rx_bytes += len;
1411 slot = (slot + 1) % NUM_RX_BUFF;
1412 --budget;
1413 ++received;
1414 continue;
1415 sg:
1416 if (ctrl & MAL_RX_CTRL_FIRST) {
1417 BUG_ON(dev->rx_sg_skb);
1418 if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1419 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1420 ++dev->estats.rx_dropped_oom;
1421 emac_recycle_rx_skb(dev, slot, 0);
1422 } else {
1423 dev->rx_sg_skb = skb;
1424 skb_put(skb, len);
1425 }
1426 } else if (!emac_rx_sg_append(dev, slot) &&
1427 (ctrl & MAL_RX_CTRL_LAST)) {
1428
1429 skb = dev->rx_sg_skb;
1430 dev->rx_sg_skb = NULL;
1431
1432 ctrl &= EMAC_BAD_RX_MASK;
1433 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1434 emac_parse_rx_error(dev, ctrl);
1435 ++dev->estats.rx_dropped_error;
1436 dev_kfree_skb(skb);
1437 len = 0;
1438 } else
1439 goto push_packet;
1440 }
1441 goto skip;
1442 oom:
1443 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1444 /* Drop the packet and recycle skb */
1445 ++dev->estats.rx_dropped_oom;
1446 emac_recycle_rx_skb(dev, slot, 0);
1447 goto next;
1448 }
1449
1450 if (received) {
1451 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1452 dev->rx_slot = slot;
1453 }
1454
1455 if (unlikely(budget && dev->commac.rx_stopped)) {
1456 struct ocp_func_emac_data *emacdata = dev->def->additions;
1457
1458 barrier();
1459 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1460 DBG2("%d: rx restart" NL, dev->def->index);
1461 received = 0;
1462 goto again;
1463 }
1464
1465 if (dev->rx_sg_skb) {
1466 DBG2("%d: dropping partial rx packet" NL,
1467 dev->def->index);
1468 ++dev->estats.rx_dropped_error;
1469 dev_kfree_skb(dev->rx_sg_skb);
1470 dev->rx_sg_skb = NULL;
1471 }
1472
1473 dev->commac.rx_stopped = 0;
1474 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1475 emac_rx_enable(dev);
1476 dev->rx_slot = 0;
1477 }
1478 return received;
1479}
1480
1481/* BHs disabled */
1482static int emac_peek_rx(void *param)
1483{
1484 struct ocp_enet_private *dev = param;
1485 return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1486}
1487
1488/* BHs disabled */
1489static int emac_peek_rx_sg(void *param)
1490{
1491 struct ocp_enet_private *dev = param;
1492 int slot = dev->rx_slot;
1493 while (1) {
1494 u16 ctrl = dev->rx_desc[slot].ctrl;
1495 if (ctrl & MAL_RX_CTRL_EMPTY)
1496 return 0;
1497 else if (ctrl & MAL_RX_CTRL_LAST)
1498 return 1;
1499
1500 slot = (slot + 1) % NUM_RX_BUFF;
1501
1502 /* I'm just being paranoid here :) */
1503 if (unlikely(slot == dev->rx_slot))
1504 return 0;
1505 }
1506}
1507
1508/* Hard IRQ */
1509static void emac_rxde(void *param)
1510{
1511 struct ocp_enet_private *dev = param;
1512 ++dev->estats.rx_stopped;
1513 emac_rx_disable_async(dev);
1514}
1515
1516/* Hard IRQ */
David Howells7d12e782006-10-05 14:55:46 +01001517static irqreturn_t emac_irq(int irq, void *dev_instance)
Eugene Surovegin37448f72005-10-10 16:58:14 -07001518{
1519 struct ocp_enet_private *dev = dev_instance;
Al Virob43de2d2005-12-01 10:15:21 -05001520 struct emac_regs __iomem *p = dev->emacp;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001521 struct ibm_emac_error_stats *st = &dev->estats;
1522
1523 u32 isr = in_be32(&p->isr);
1524 out_be32(&p->isr, isr);
1525
1526 DBG("%d: isr = %08x" NL, dev->def->index, isr);
1527
1528 if (isr & EMAC_ISR_TXPE)
1529 ++st->tx_parity;
1530 if (isr & EMAC_ISR_RXPE)
1531 ++st->rx_parity;
1532 if (isr & EMAC_ISR_TXUE)
1533 ++st->tx_underrun;
1534 if (isr & EMAC_ISR_RXOE)
1535 ++st->rx_fifo_overrun;
1536 if (isr & EMAC_ISR_OVR)
1537 ++st->rx_overrun;
1538 if (isr & EMAC_ISR_BP)
1539 ++st->rx_bad_packet;
1540 if (isr & EMAC_ISR_RP)
1541 ++st->rx_runt_packet;
1542 if (isr & EMAC_ISR_SE)
1543 ++st->rx_short_event;
1544 if (isr & EMAC_ISR_ALE)
1545 ++st->rx_alignment_error;
1546 if (isr & EMAC_ISR_BFCS)
1547 ++st->rx_bad_fcs;
1548 if (isr & EMAC_ISR_PTLE)
1549 ++st->rx_packet_too_long;
1550 if (isr & EMAC_ISR_ORE)
1551 ++st->rx_out_of_range;
1552 if (isr & EMAC_ISR_IRE)
1553 ++st->rx_in_range;
1554 if (isr & EMAC_ISR_SQE)
1555 ++st->tx_sqe;
1556 if (isr & EMAC_ISR_TE)
1557 ++st->tx_errors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 return IRQ_HANDLED;
1560}
1561
Eugene Surovegin37448f72005-10-10 16:58:14 -07001562static struct net_device_stats *emac_stats(struct net_device *ndev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001564 struct ocp_enet_private *dev = ndev->priv;
1565 struct ibm_emac_stats *st = &dev->stats;
1566 struct ibm_emac_error_stats *est = &dev->estats;
1567 struct net_device_stats *nst = &dev->nstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Eugene Surovegin37448f72005-10-10 16:58:14 -07001569 DBG2("%d: stats" NL, dev->def->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Eugene Surovegin37448f72005-10-10 16:58:14 -07001571 /* Compute "legacy" statistics */
1572 local_irq_disable();
1573 nst->rx_packets = (unsigned long)st->rx_packets;
1574 nst->rx_bytes = (unsigned long)st->rx_bytes;
1575 nst->tx_packets = (unsigned long)st->tx_packets;
1576 nst->tx_bytes = (unsigned long)st->tx_bytes;
1577 nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1578 est->rx_dropped_error +
1579 est->rx_dropped_resize +
1580 est->rx_dropped_mtu);
1581 nst->tx_dropped = (unsigned long)est->tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Eugene Surovegin37448f72005-10-10 16:58:14 -07001583 nst->rx_errors = (unsigned long)est->rx_bd_errors;
1584 nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1585 est->rx_fifo_overrun +
1586 est->rx_overrun);
1587 nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1588 est->rx_alignment_error);
1589 nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1590 est->rx_bad_fcs);
1591 nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1592 est->rx_bd_short_event +
1593 est->rx_bd_packet_too_long +
1594 est->rx_bd_out_of_range +
1595 est->rx_bd_in_range +
1596 est->rx_runt_packet +
1597 est->rx_short_event +
1598 est->rx_packet_too_long +
1599 est->rx_out_of_range +
1600 est->rx_in_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
Eugene Surovegin37448f72005-10-10 16:58:14 -07001602 nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1603 nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1604 est->tx_underrun);
1605 nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1606 nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1607 est->tx_bd_excessive_collisions +
1608 est->tx_bd_late_collision +
1609 est->tx_bd_multple_collisions);
1610 local_irq_enable();
1611 return nst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612}
1613
Eugene Surovegin37448f72005-10-10 16:58:14 -07001614static void emac_remove(struct ocp_device *ocpdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001616 struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Eugene Surovegin37448f72005-10-10 16:58:14 -07001618 DBG("%d: remove" NL, dev->def->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Al Virob43de2d2005-12-01 10:15:21 -05001620 ocp_set_drvdata(ocpdev, NULL);
Eugene Surovegin37448f72005-10-10 16:58:14 -07001621 unregister_netdev(dev->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Eugene Surovegin37448f72005-10-10 16:58:14 -07001623 tah_fini(dev->tah_dev);
1624 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1625 zmii_fini(dev->zmii_dev, dev->zmii_input);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
Al Virob43de2d2005-12-01 10:15:21 -05001627 emac_dbg_register(dev->def->index, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Eugene Surovegin37448f72005-10-10 16:58:14 -07001629 mal_unregister_commac(dev->mal, &dev->commac);
Al Virob43de2d2005-12-01 10:15:21 -05001630 iounmap(dev->emacp);
Eugene Surovegin37448f72005-10-10 16:58:14 -07001631 kfree(dev->ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
1633
Eugene Surovegin37448f72005-10-10 16:58:14 -07001634static struct mal_commac_ops emac_commac_ops = {
1635 .poll_tx = &emac_poll_tx,
1636 .poll_rx = &emac_poll_rx,
1637 .peek_rx = &emac_peek_rx,
1638 .rxde = &emac_rxde,
1639};
1640
1641static struct mal_commac_ops emac_commac_sg_ops = {
1642 .poll_tx = &emac_poll_tx,
1643 .poll_rx = &emac_poll_rx,
1644 .peek_rx = &emac_peek_rx_sg,
1645 .rxde = &emac_rxde,
1646};
1647
1648/* Ethtool support */
1649static int emac_ethtool_get_settings(struct net_device *ndev,
1650 struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001652 struct ocp_enet_private *dev = ndev->priv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
Eugene Surovegin37448f72005-10-10 16:58:14 -07001654 cmd->supported = dev->phy.features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 cmd->port = PORT_MII;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001656 cmd->phy_address = dev->phy.address;
1657 cmd->transceiver =
1658 dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1659
1660 local_bh_disable();
1661 cmd->advertising = dev->phy.advertising;
1662 cmd->autoneg = dev->phy.autoneg;
1663 cmd->speed = dev->phy.speed;
1664 cmd->duplex = dev->phy.duplex;
1665 local_bh_enable();
1666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 return 0;
1668}
1669
Eugene Surovegin37448f72005-10-10 16:58:14 -07001670static int emac_ethtool_set_settings(struct net_device *ndev,
1671 struct ethtool_cmd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001673 struct ocp_enet_private *dev = ndev->priv;
1674 u32 f = dev->phy.features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675
Eugene Surovegin37448f72005-10-10 16:58:14 -07001676 DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1677 cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
Eugene Surovegin37448f72005-10-10 16:58:14 -07001679 /* Basic sanity checks */
1680 if (dev->phy.address < 0)
1681 return -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1683 return -EINVAL;
1684 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1685 return -EINVAL;
1686 if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1687 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001688
1689 if (cmd->autoneg == AUTONEG_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 switch (cmd->speed) {
1691 case SPEED_10:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001692 if (cmd->duplex == DUPLEX_HALF
1693 && !(f & SUPPORTED_10baseT_Half))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001695 if (cmd->duplex == DUPLEX_FULL
1696 && !(f & SUPPORTED_10baseT_Full))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 return -EINVAL;
1698 break;
1699 case SPEED_100:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001700 if (cmd->duplex == DUPLEX_HALF
1701 && !(f & SUPPORTED_100baseT_Half))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001703 if (cmd->duplex == DUPLEX_FULL
1704 && !(f & SUPPORTED_100baseT_Full))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return -EINVAL;
1706 break;
1707 case SPEED_1000:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001708 if (cmd->duplex == DUPLEX_HALF
1709 && !(f & SUPPORTED_1000baseT_Half))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001711 if (cmd->duplex == DUPLEX_FULL
1712 && !(f & SUPPORTED_1000baseT_Full))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 return -EINVAL;
1714 break;
1715 default:
1716 return -EINVAL;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001717 }
1718
1719 local_bh_disable();
1720 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1721 cmd->duplex);
1722
1723 } else {
1724 if (!(f & SUPPORTED_Autoneg))
1725 return -EINVAL;
1726
1727 local_bh_disable();
1728 dev->phy.def->ops->setup_aneg(&dev->phy,
1729 (cmd->advertising & f) |
1730 (dev->phy.advertising &
1731 (ADVERTISED_Pause |
1732 ADVERTISED_Asym_Pause)));
1733 }
1734 emac_force_link_update(dev);
1735 local_bh_enable();
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 return 0;
1738}
1739
Eugene Surovegin37448f72005-10-10 16:58:14 -07001740static void emac_ethtool_get_ringparam(struct net_device *ndev,
1741 struct ethtool_ringparam *rp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001743 rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1744 rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Eugene Surovegin37448f72005-10-10 16:58:14 -07001747static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1748 struct ethtool_pauseparam *pp)
1749{
1750 struct ocp_enet_private *dev = ndev->priv;
1751
1752 local_bh_disable();
1753 if ((dev->phy.features & SUPPORTED_Autoneg) &&
1754 (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1755 pp->autoneg = 1;
1756
1757 if (dev->phy.duplex == DUPLEX_FULL) {
1758 if (dev->phy.pause)
1759 pp->rx_pause = pp->tx_pause = 1;
1760 else if (dev->phy.asym_pause)
1761 pp->tx_pause = 1;
1762 }
1763 local_bh_enable();
1764}
1765
1766static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1767{
1768 struct ocp_enet_private *dev = ndev->priv;
1769 return dev->tah_dev != 0;
1770}
1771
1772static int emac_get_regs_len(struct ocp_enet_private *dev)
1773{
1774 return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1775}
1776
1777static int emac_ethtool_get_regs_len(struct net_device *ndev)
1778{
1779 struct ocp_enet_private *dev = ndev->priv;
1780 return sizeof(struct emac_ethtool_regs_hdr) +
1781 emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1782 zmii_get_regs_len(dev->zmii_dev) +
1783 rgmii_get_regs_len(dev->rgmii_dev) +
1784 tah_get_regs_len(dev->tah_dev);
1785}
1786
1787static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1788{
1789 struct emac_ethtool_regs_subhdr *hdr = buf;
1790
1791 hdr->version = EMAC_ETHTOOL_REGS_VER;
1792 hdr->index = dev->def->index;
1793 memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1794 return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1795}
1796
1797static void emac_ethtool_get_regs(struct net_device *ndev,
1798 struct ethtool_regs *regs, void *buf)
1799{
1800 struct ocp_enet_private *dev = ndev->priv;
1801 struct emac_ethtool_regs_hdr *hdr = buf;
1802
1803 hdr->components = 0;
1804 buf = hdr + 1;
1805
1806 local_irq_disable();
1807 buf = mal_dump_regs(dev->mal, buf);
1808 buf = emac_dump_regs(dev, buf);
1809 if (dev->zmii_dev) {
1810 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1811 buf = zmii_dump_regs(dev->zmii_dev, buf);
1812 }
1813 if (dev->rgmii_dev) {
1814 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1815 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1816 }
1817 if (dev->tah_dev) {
1818 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1819 buf = tah_dump_regs(dev->tah_dev, buf);
1820 }
1821 local_irq_enable();
1822}
1823
1824static int emac_ethtool_nway_reset(struct net_device *ndev)
1825{
1826 struct ocp_enet_private *dev = ndev->priv;
1827 int res = 0;
1828
1829 DBG("%d: nway_reset" NL, dev->def->index);
1830
1831 if (dev->phy.address < 0)
1832 return -EOPNOTSUPP;
1833
1834 local_bh_disable();
1835 if (!dev->phy.autoneg) {
1836 res = -EINVAL;
1837 goto out;
1838 }
1839
1840 dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1841 emac_force_link_update(dev);
1842
1843 out:
1844 local_bh_enable();
1845 return res;
1846}
1847
1848static int emac_ethtool_get_stats_count(struct net_device *ndev)
1849{
1850 return EMAC_ETHTOOL_STATS_COUNT;
1851}
1852
1853static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1854 u8 * buf)
1855{
1856 if (stringset == ETH_SS_STATS)
1857 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1858}
1859
1860static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1861 struct ethtool_stats *estats,
1862 u64 * tmp_stats)
1863{
1864 struct ocp_enet_private *dev = ndev->priv;
1865 local_irq_disable();
1866 memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1867 tmp_stats += sizeof(dev->stats) / sizeof(u64);
1868 memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1869 local_irq_enable();
1870}
1871
1872static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1873 struct ethtool_drvinfo *info)
1874{
1875 struct ocp_enet_private *dev = ndev->priv;
1876
1877 strcpy(info->driver, "ibm_emac");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 strcpy(info->version, DRV_VERSION);
1879 info->fw_version[0] = '\0';
Eugene Surovegin37448f72005-10-10 16:58:14 -07001880 sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1881 info->n_stats = emac_ethtool_get_stats_count(ndev);
1882 info->regdump_len = emac_ethtool_get_regs_len(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883}
1884
Jeff Garzik7282d492006-09-13 14:30:00 -04001885static const struct ethtool_ops emac_ethtool_ops = {
Eugene Surovegin37448f72005-10-10 16:58:14 -07001886 .get_settings = emac_ethtool_get_settings,
1887 .set_settings = emac_ethtool_set_settings,
1888 .get_drvinfo = emac_ethtool_get_drvinfo,
1889
1890 .get_regs_len = emac_ethtool_get_regs_len,
1891 .get_regs = emac_ethtool_get_regs,
1892
1893 .nway_reset = emac_ethtool_nway_reset,
1894
1895 .get_ringparam = emac_ethtool_get_ringparam,
1896 .get_pauseparam = emac_ethtool_get_pauseparam,
1897
1898 .get_rx_csum = emac_ethtool_get_rx_csum,
1899
1900 .get_strings = emac_ethtool_get_strings,
1901 .get_stats_count = emac_ethtool_get_stats_count,
1902 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1903
1904 .get_link = ethtool_op_get_link,
1905 .get_tx_csum = ethtool_op_get_tx_csum,
1906 .get_sg = ethtool_op_get_sg,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907};
1908
Eugene Surovegin37448f72005-10-10 16:58:14 -07001909static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001911 struct ocp_enet_private *dev = ndev->priv;
Geoff Levand99718692005-04-14 11:20:32 -07001912 uint16_t *data = (uint16_t *) & rq->ifr_ifru;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Eugene Surovegin37448f72005-10-10 16:58:14 -07001914 DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1915
1916 if (dev->phy.address < 0)
1917 return -EOPNOTSUPP;
1918
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 switch (cmd) {
1920 case SIOCGMIIPHY:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001921 case SIOCDEVPRIVATE:
1922 data[0] = dev->phy.address;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 /* Fall through */
1924 case SIOCGMIIREG:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001925 case SIOCDEVPRIVATE + 1:
1926 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 return 0;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001928
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 case SIOCSMIIREG:
Eugene Surovegin37448f72005-10-10 16:58:14 -07001930 case SIOCDEVPRIVATE + 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 if (!capable(CAP_NET_ADMIN))
1932 return -EPERM;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001933 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 return 0;
1935 default:
1936 return -EOPNOTSUPP;
1937 }
1938}
1939
Eugene Surovegin37448f72005-10-10 16:58:14 -07001940static int __init emac_probe(struct ocp_device *ocpdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941{
Eugene Surovegin37448f72005-10-10 16:58:14 -07001942 struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 struct net_device *ndev;
Eugene Surovegin37448f72005-10-10 16:58:14 -07001944 struct ocp_device *maldev;
1945 struct ocp_enet_private *dev;
1946 int err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947
Eugene Surovegin37448f72005-10-10 16:58:14 -07001948 DBG("%d: probe" NL, ocpdev->def->index);
1949
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 if (!emacdata) {
1951 printk(KERN_ERR "emac%d: Missing additional data!\n",
1952 ocpdev->def->index);
1953 return -ENODEV;
1954 }
1955
1956 /* Allocate our net_device structure */
1957 ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
Eugene Surovegin37448f72005-10-10 16:58:14 -07001958 if (!ndev) {
1959 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 ocpdev->def->index);
1961 return -ENOMEM;
1962 }
Eugene Surovegin37448f72005-10-10 16:58:14 -07001963 dev = ndev->priv;
1964 dev->ndev = ndev;
1965 dev->ldev = &ocpdev->dev;
1966 dev->def = ocpdev->def;
1967 SET_MODULE_OWNER(ndev);
1968
1969 /* Find MAL device we are connected to */
1970 maldev =
1971 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1972 if (!maldev) {
1973 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1974 dev->def->index, emacdata->mal_idx);
1975 err = -ENODEV;
1976 goto out;
1977 }
1978 dev->mal = ocp_get_drvdata(maldev);
1979 if (!dev->mal) {
1980 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1981 dev->def->index, emacdata->mal_idx);
1982 err = -ENODEV;
1983 goto out;
1984 }
1985
1986 /* Register with MAL */
1987 dev->commac.ops = &emac_commac_ops;
1988 dev->commac.dev = dev;
1989 dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1990 dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1991 err = mal_register_commac(dev->mal, &dev->commac);
1992 if (err) {
1993 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1994 dev->def->index, emacdata->mal_idx);
1995 goto out;
1996 }
1997 dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1998 dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1999
2000 /* Get pointers to BD rings */
2001 dev->tx_desc =
2002 dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2003 emacdata->mal_tx_chan);
2004 dev->rx_desc =
2005 dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2006 emacdata->mal_rx_chan);
2007
2008 DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2009 DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2010
2011 /* Clean rings */
2012 memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2013 memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2014
2015 /* If we depend on another EMAC for MDIO, check whether it was probed already */
2016 if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2017 struct ocp_device *mdiodev =
2018 ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2019 emacdata->mdio_idx);
2020 if (!mdiodev) {
2021 printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2022 dev->def->index, emacdata->mdio_idx);
2023 err = -ENODEV;
2024 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 }
Eugene Surovegin37448f72005-10-10 16:58:14 -07002026 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2027 if (!dev->mdio_dev) {
2028 printk(KERN_ERR
2029 "emac%d: emac%d hasn't been initialized yet!\n",
2030 dev->def->index, emacdata->mdio_idx);
2031 err = -ENODEV;
2032 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 }
2034 }
2035
Eugene Surovegin37448f72005-10-10 16:58:14 -07002036 /* Attach to ZMII, if needed */
2037 if ((err = zmii_attach(dev)) != 0)
2038 goto out2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
Eugene Surovegin37448f72005-10-10 16:58:14 -07002040 /* Attach to RGMII, if needed */
2041 if ((err = rgmii_attach(dev)) != 0)
2042 goto out3;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
Eugene Surovegin37448f72005-10-10 16:58:14 -07002044 /* Attach to TAH, if needed */
2045 if ((err = tah_attach(dev)) != 0)
2046 goto out4;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
Eugene Surovegin37448f72005-10-10 16:58:14 -07002048 /* Map EMAC regs */
Al Virob43de2d2005-12-01 10:15:21 -05002049 dev->emacp = ioremap(dev->def->paddr, sizeof(struct emac_regs));
Eugene Surovegin37448f72005-10-10 16:58:14 -07002050 if (!dev->emacp) {
2051 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2052 dev->def->index);
2053 err = -ENOMEM;
2054 goto out5;
Wade Farnsworth49a9db02005-10-03 22:21:33 -04002055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
Eugene Surovegin37448f72005-10-10 16:58:14 -07002057 /* Fill in MAC address */
2058 for (i = 0; i < 6; ++i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 ndev->dev_addr[i] = emacdata->mac_addr[i];
2060
Eugene Surovegin37448f72005-10-10 16:58:14 -07002061 /* Set some link defaults before we can find out real parameters */
2062 dev->phy.speed = SPEED_100;
2063 dev->phy.duplex = DUPLEX_FULL;
2064 dev->phy.autoneg = AUTONEG_DISABLE;
2065 dev->phy.pause = dev->phy.asym_pause = 0;
Eugene Surovegin8169bd92005-11-24 14:48:40 -08002066 dev->stop_timeout = STOP_TIMEOUT_100;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002067 init_timer(&dev->link_timer);
2068 dev->link_timer.function = emac_link_timer;
2069 dev->link_timer.data = (unsigned long)dev;
2070
2071 /* Find PHY if any */
2072 dev->phy.dev = ndev;
2073 dev->phy.mode = emacdata->phy_mode;
2074 if (emacdata->phy_map != 0xffffffff) {
2075 u32 phy_map = emacdata->phy_map | busy_phy_map;
2076 u32 adv;
2077
2078 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2079 emacdata->phy_map, busy_phy_map);
2080
2081 EMAC_RX_CLK_TX(dev->def->index);
2082
2083 dev->phy.mdio_read = emac_mdio_read;
2084 dev->phy.mdio_write = emac_mdio_write;
2085
2086 /* Configure EMAC with defaults so we can at least use MDIO
2087 * This is needed mostly for 440GX
2088 */
2089 if (emac_phy_gpcs(dev->phy.mode)) {
2090 /* XXX
2091 * Make GPCS PHY address equal to EMAC index.
2092 * We probably should take into account busy_phy_map
2093 * and/or phy_map here.
2094 */
2095 dev->phy.address = dev->def->index;
2096 }
2097
2098 emac_configure(dev);
2099
2100 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2101 if (!(phy_map & 1)) {
2102 int r;
2103 busy_phy_map |= 1 << i;
2104
2105 /* Quick check if there is a PHY at the address */
2106 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2107 if (r == 0xffff || r < 0)
2108 continue;
2109 if (!mii_phy_probe(&dev->phy, i))
2110 break;
2111 }
2112 if (i == 0x20) {
2113 printk(KERN_WARNING "emac%d: can't find PHY!\n",
2114 dev->def->index);
2115 goto out6;
2116 }
2117
2118 /* Init PHY */
2119 if (dev->phy.def->ops->init)
2120 dev->phy.def->ops->init(&dev->phy);
2121
2122 /* Disable any PHY features not supported by the platform */
2123 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2124
2125 /* Setup initial link parameters */
2126 if (dev->phy.features & SUPPORTED_Autoneg) {
2127 adv = dev->phy.features;
2128#if !defined(CONFIG_40x)
2129 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2130#endif
2131 /* Restart autonegotiation */
2132 dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2133 } else {
2134 u32 f = dev->phy.def->features;
2135 int speed = SPEED_10, fd = DUPLEX_HALF;
2136
2137 /* Select highest supported speed/duplex */
2138 if (f & SUPPORTED_1000baseT_Full) {
2139 speed = SPEED_1000;
2140 fd = DUPLEX_FULL;
2141 } else if (f & SUPPORTED_1000baseT_Half)
2142 speed = SPEED_1000;
2143 else if (f & SUPPORTED_100baseT_Full) {
2144 speed = SPEED_100;
2145 fd = DUPLEX_FULL;
2146 } else if (f & SUPPORTED_100baseT_Half)
2147 speed = SPEED_100;
2148 else if (f & SUPPORTED_10baseT_Full)
2149 fd = DUPLEX_FULL;
2150
2151 /* Force link parameters */
2152 dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2153 }
2154 } else {
2155 emac_reset(dev);
2156
2157 /* PHY-less configuration.
2158 * XXX I probably should move these settings to emacdata
2159 */
2160 dev->phy.address = -1;
2161 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2162 dev->phy.pause = 1;
2163 }
2164
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 /* Fill in the driver function table */
2166 ndev->open = &emac_open;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002167 if (dev->tah_dev) {
2168 ndev->hard_start_xmit = &emac_start_xmit_sg;
2169 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2170 } else
2171 ndev->hard_start_xmit = &emac_start_xmit;
2172 ndev->tx_timeout = &emac_full_tx_reset;
2173 ndev->watchdog_timeo = 5 * HZ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 ndev->stop = &emac_close;
2175 ndev->get_stats = &emac_stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 ndev->set_multicast_list = &emac_set_multicast_list;
2177 ndev->do_ioctl = &emac_ioctl;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002178 if (emac_phy_supports_gige(emacdata->phy_mode)) {
2179 ndev->change_mtu = &emac_change_mtu;
2180 dev->commac.ops = &emac_commac_sg_ops;
2181 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Eugene Surovegin37448f72005-10-10 16:58:14 -07002184 netif_carrier_off(ndev);
2185 netif_stop_queue(ndev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Eugene Surovegin37448f72005-10-10 16:58:14 -07002187 err = register_netdev(ndev);
2188 if (err) {
2189 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2190 dev->def->index, err);
2191 goto out6;
2192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
Eugene Surovegin37448f72005-10-10 16:58:14 -07002194 ocp_set_drvdata(ocpdev, dev);
2195
2196 printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2197 ndev->name, dev->def->index,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2199 ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200
Eugene Surovegin37448f72005-10-10 16:58:14 -07002201 if (dev->phy.address >= 0)
2202 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2203 dev->phy.def->name, dev->phy.address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
Eugene Surovegin37448f72005-10-10 16:58:14 -07002205 emac_dbg_register(dev->def->index, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 return 0;
Eugene Surovegin37448f72005-10-10 16:58:14 -07002208 out6:
Al Virob43de2d2005-12-01 10:15:21 -05002209 iounmap(dev->emacp);
Eugene Surovegin37448f72005-10-10 16:58:14 -07002210 out5:
2211 tah_fini(dev->tah_dev);
2212 out4:
2213 rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2214 out3:
2215 zmii_fini(dev->zmii_dev, dev->zmii_input);
2216 out2:
2217 mal_unregister_commac(dev->mal, &dev->commac);
2218 out:
2219 kfree(ndev);
2220 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221}
2222
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223static struct ocp_device_id emac_ids[] = {
Eugene Surovegin37448f72005-10-10 16:58:14 -07002224 { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2225 { .vendor = OCP_VENDOR_INVALID}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226};
2227
2228static struct ocp_driver emac_driver = {
2229 .name = "emac",
2230 .id_table = emac_ids,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 .probe = emac_probe,
2232 .remove = emac_remove,
2233};
2234
2235static int __init emac_init(void)
2236{
Eugene Surovegin37448f72005-10-10 16:58:14 -07002237 printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
Eugene Surovegin37448f72005-10-10 16:58:14 -07002239 DBG(": init" NL);
2240
2241 if (mal_init())
2242 return -ENODEV;
2243
2244 EMAC_CLK_INTERNAL;
2245 if (ocp_register_driver(&emac_driver)) {
2246 EMAC_CLK_EXTERNAL;
2247 ocp_unregister_driver(&emac_driver);
2248 mal_exit();
2249 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 }
Eugene Surovegin37448f72005-10-10 16:58:14 -07002251 EMAC_CLK_EXTERNAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
Eugene Surovegin37448f72005-10-10 16:58:14 -07002253 emac_init_debug();
2254 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255}
2256
2257static void __exit emac_exit(void)
2258{
Eugene Surovegin37448f72005-10-10 16:58:14 -07002259 DBG(": exit" NL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 ocp_unregister_driver(&emac_driver);
Eugene Surovegin37448f72005-10-10 16:58:14 -07002261 mal_exit();
2262 emac_fini_debug();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263}
2264
2265module_init(emac_init);
2266module_exit(emac_exit);