blob: ca34efc62baeed98b74dfbffc22a353d612491f1 [file] [log] [blame]
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001/*
Jamie Ilesf75ba502011-11-08 10:12:32 +00002 * Cadence MACB/GEM Ethernet Controller driver
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
Jamie Ilesc220f8c2011-03-08 20:27:08 +000011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010012#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/slab.h>
18#include <linux/init.h>
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +000019#include <linux/gpio.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000020#include <linux/interrupt.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010021#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010023#include <linux/dma-mapping.h>
Jamie Iles84e0cdb2011-03-08 20:17:06 +000024#include <linux/platform_data/macb.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010025#include <linux/platform_device.h>
frederic RODO6c36a702007-07-12 19:07:24 +020026#include <linux/phy.h>
Olof Johanssonb17471f2011-12-20 13:13:07 -080027#include <linux/of.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010028#include <linux/of_device.h>
29#include <linux/of_net.h>
Jean-Christophe PLAGNIOL-VILLARD8ef29f8a2012-10-31 06:04:59 +000030#include <linux/pinctrl/consumer.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010031
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010032#include "macb.h"
33
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010034#define RX_BUFFER_SIZE 128
Havard Skinnemoen55054a12012-10-31 06:04:55 +000035#define RX_RING_SIZE 512 /* must be power of 2 */
36#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010037
Havard Skinnemoen55054a12012-10-31 06:04:55 +000038#define TX_RING_SIZE 128 /* must be power of 2 */
39#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010040
41/* minimum number of free TX descriptors before waking up TX process */
42#define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4)
43
44#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
45 | MACB_BIT(ISR_ROVR))
Nicolas Ferree86cd532012-10-31 06:04:57 +000046#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
47 | MACB_BIT(ISR_RLE) \
48 | MACB_BIT(TXERR))
49#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
50
51/*
52 * Graceful stop timeouts in us. We should allow up to
53 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
54 */
55#define MACB_HALT_TIMEOUT 1230
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010056
Havard Skinnemoen55054a12012-10-31 06:04:55 +000057/* Ring buffer accessors */
58static unsigned int macb_tx_ring_wrap(unsigned int index)
59{
60 return index & (TX_RING_SIZE - 1);
61}
62
63static unsigned int macb_tx_ring_avail(struct macb *bp)
64{
65 return (bp->tx_tail - bp->tx_head) & (TX_RING_SIZE - 1);
66}
67
68static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
69{
70 return &bp->tx_ring[macb_tx_ring_wrap(index)];
71}
72
73static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
74{
75 return &bp->tx_skb[macb_tx_ring_wrap(index)];
76}
77
78static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
79{
80 dma_addr_t offset;
81
82 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
83
84 return bp->tx_ring_dma + offset;
85}
86
87static unsigned int macb_rx_ring_wrap(unsigned int index)
88{
89 return index & (RX_RING_SIZE - 1);
90}
91
92static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
93{
94 return &bp->rx_ring[macb_rx_ring_wrap(index)];
95}
96
97static void *macb_rx_buffer(struct macb *bp, unsigned int index)
98{
99 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
100}
101
Joachim Eastwood314bccc2012-11-07 08:14:52 +0000102void macb_set_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100103{
104 u32 bottom;
105 u16 top;
106
107 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000108 macb_or_gem_writel(bp, SA1B, bottom);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100109 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000110 macb_or_gem_writel(bp, SA1T, top);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100111}
Joachim Eastwood314bccc2012-11-07 08:14:52 +0000112EXPORT_SYMBOL_GPL(macb_set_hwaddr);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100113
Joachim Eastwood314bccc2012-11-07 08:14:52 +0000114void macb_get_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100115{
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000116 struct macb_platform_data *pdata;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100117 u32 bottom;
118 u16 top;
119 u8 addr[6];
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000120 int i;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100121
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000122 pdata = bp->pdev->dev.platform_data;
123
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000124 /* Check all 4 address register for vaild address */
125 for (i = 0; i < 4; i++) {
126 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
127 top = macb_or_gem_readl(bp, SA1T + i * 8);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100128
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000129 if (pdata && pdata->rev_eth_addr) {
130 addr[5] = bottom & 0xff;
131 addr[4] = (bottom >> 8) & 0xff;
132 addr[3] = (bottom >> 16) & 0xff;
133 addr[2] = (bottom >> 24) & 0xff;
134 addr[1] = top & 0xff;
135 addr[0] = (top & 0xff00) >> 8;
136 } else {
137 addr[0] = bottom & 0xff;
138 addr[1] = (bottom >> 8) & 0xff;
139 addr[2] = (bottom >> 16) & 0xff;
140 addr[3] = (bottom >> 24) & 0xff;
141 addr[4] = top & 0xff;
142 addr[5] = (top >> 8) & 0xff;
143 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100144
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000145 if (is_valid_ether_addr(addr)) {
146 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
147 return;
148 }
Sven Schnelled1d57412008-06-09 16:33:57 -0700149 }
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000150
151 netdev_info(bp->dev, "invalid hw address, using random\n");
152 eth_hw_addr_random(bp->dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100153}
Joachim Eastwood314bccc2012-11-07 08:14:52 +0000154EXPORT_SYMBOL_GPL(macb_get_hwaddr);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100155
frederic RODO6c36a702007-07-12 19:07:24 +0200156static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100157{
frederic RODO6c36a702007-07-12 19:07:24 +0200158 struct macb *bp = bus->priv;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100159 int value;
160
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100161 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
162 | MACB_BF(RW, MACB_MAN_READ)
frederic RODO6c36a702007-07-12 19:07:24 +0200163 | MACB_BF(PHYA, mii_id)
164 | MACB_BF(REGA, regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100165 | MACB_BF(CODE, MACB_MAN_CODE)));
166
frederic RODO6c36a702007-07-12 19:07:24 +0200167 /* wait for end of transfer */
168 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
169 cpu_relax();
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100170
171 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100172
173 return value;
174}
175
frederic RODO6c36a702007-07-12 19:07:24 +0200176static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
177 u16 value)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100178{
frederic RODO6c36a702007-07-12 19:07:24 +0200179 struct macb *bp = bus->priv;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100180
181 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
182 | MACB_BF(RW, MACB_MAN_WRITE)
frederic RODO6c36a702007-07-12 19:07:24 +0200183 | MACB_BF(PHYA, mii_id)
184 | MACB_BF(REGA, regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100185 | MACB_BF(CODE, MACB_MAN_CODE)
frederic RODO6c36a702007-07-12 19:07:24 +0200186 | MACB_BF(DATA, value)));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100187
frederic RODO6c36a702007-07-12 19:07:24 +0200188 /* wait for end of transfer */
189 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
190 cpu_relax();
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100191
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100192 return 0;
193}
194
frederic RODO6c36a702007-07-12 19:07:24 +0200195static int macb_mdio_reset(struct mii_bus *bus)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100196{
frederic RODO6c36a702007-07-12 19:07:24 +0200197 return 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100198}
199
frederic RODO6c36a702007-07-12 19:07:24 +0200200static void macb_handle_link_change(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100201{
frederic RODO6c36a702007-07-12 19:07:24 +0200202 struct macb *bp = netdev_priv(dev);
203 struct phy_device *phydev = bp->phy_dev;
204 unsigned long flags;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100205
frederic RODO6c36a702007-07-12 19:07:24 +0200206 int status_change = 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100207
frederic RODO6c36a702007-07-12 19:07:24 +0200208 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100209
frederic RODO6c36a702007-07-12 19:07:24 +0200210 if (phydev->link) {
211 if ((bp->speed != phydev->speed) ||
212 (bp->duplex != phydev->duplex)) {
213 u32 reg;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100214
frederic RODO6c36a702007-07-12 19:07:24 +0200215 reg = macb_readl(bp, NCFGR);
216 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
Patrice Vilchez140b7552012-10-31 06:04:50 +0000217 if (macb_is_gem(bp))
218 reg &= ~GEM_BIT(GBE);
frederic RODO6c36a702007-07-12 19:07:24 +0200219
220 if (phydev->duplex)
221 reg |= MACB_BIT(FD);
Atsushi Nemoto179956f2008-02-21 22:50:54 +0900222 if (phydev->speed == SPEED_100)
frederic RODO6c36a702007-07-12 19:07:24 +0200223 reg |= MACB_BIT(SPD);
Patrice Vilchez140b7552012-10-31 06:04:50 +0000224 if (phydev->speed == SPEED_1000)
225 reg |= GEM_BIT(GBE);
frederic RODO6c36a702007-07-12 19:07:24 +0200226
Patrice Vilchez140b7552012-10-31 06:04:50 +0000227 macb_or_gem_writel(bp, NCFGR, reg);
frederic RODO6c36a702007-07-12 19:07:24 +0200228
229 bp->speed = phydev->speed;
230 bp->duplex = phydev->duplex;
231 status_change = 1;
232 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100233 }
234
frederic RODO6c36a702007-07-12 19:07:24 +0200235 if (phydev->link != bp->link) {
Anton Vorontsovc8f15682008-07-22 15:41:24 -0700236 if (!phydev->link) {
frederic RODO6c36a702007-07-12 19:07:24 +0200237 bp->speed = 0;
238 bp->duplex = -1;
239 }
240 bp->link = phydev->link;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100241
frederic RODO6c36a702007-07-12 19:07:24 +0200242 status_change = 1;
243 }
244
245 spin_unlock_irqrestore(&bp->lock, flags);
246
247 if (status_change) {
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000248 if (phydev->link) {
249 netif_carrier_on(dev);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000250 netdev_info(dev, "link up (%d/%s)\n",
251 phydev->speed,
252 phydev->duplex == DUPLEX_FULL ?
253 "Full" : "Half");
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000254 } else {
255 netif_carrier_off(dev);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000256 netdev_info(dev, "link down\n");
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000257 }
frederic RODO6c36a702007-07-12 19:07:24 +0200258 }
259}
260
261/* based on au1000_eth. c*/
262static int macb_mii_probe(struct net_device *dev)
263{
264 struct macb *bp = netdev_priv(dev);
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +0000265 struct macb_platform_data *pdata;
Jiri Pirko7455a762010-02-08 05:12:08 +0000266 struct phy_device *phydev;
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +0000267 int phy_irq;
Jiri Pirko7455a762010-02-08 05:12:08 +0000268 int ret;
frederic RODO6c36a702007-07-12 19:07:24 +0200269
Jiri Pirko7455a762010-02-08 05:12:08 +0000270 phydev = phy_find_first(bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200271 if (!phydev) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000272 netdev_err(dev, "no PHY found\n");
frederic RODO6c36a702007-07-12 19:07:24 +0200273 return -1;
274 }
275
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +0000276 pdata = dev_get_platdata(&bp->pdev->dev);
277 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
278 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
279 if (!ret) {
280 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
281 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
282 }
283 }
frederic RODO6c36a702007-07-12 19:07:24 +0200284
285 /* attach the mac to the phy */
Jiri Pirko7455a762010-02-08 05:12:08 +0000286 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +0100287 bp->phy_interface);
Jiri Pirko7455a762010-02-08 05:12:08 +0000288 if (ret) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000289 netdev_err(dev, "Could not attach to PHY\n");
Jiri Pirko7455a762010-02-08 05:12:08 +0000290 return ret;
frederic RODO6c36a702007-07-12 19:07:24 +0200291 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100292
frederic RODO6c36a702007-07-12 19:07:24 +0200293 /* mask with MAC supported features */
Patrice Vilchez140b7552012-10-31 06:04:50 +0000294 if (macb_is_gem(bp))
295 phydev->supported &= PHY_GBIT_FEATURES;
296 else
297 phydev->supported &= PHY_BASIC_FEATURES;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100298
frederic RODO6c36a702007-07-12 19:07:24 +0200299 phydev->advertising = phydev->supported;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100300
frederic RODO6c36a702007-07-12 19:07:24 +0200301 bp->link = 0;
302 bp->speed = 0;
303 bp->duplex = -1;
304 bp->phy_dev = phydev;
305
306 return 0;
307}
308
Joachim Eastwood0005f542012-10-18 11:01:12 +0000309int macb_mii_init(struct macb *bp)
frederic RODO6c36a702007-07-12 19:07:24 +0200310{
Jamie Iles84e0cdb2011-03-08 20:17:06 +0000311 struct macb_platform_data *pdata;
frederic RODO6c36a702007-07-12 19:07:24 +0200312 int err = -ENXIO, i;
313
Uwe Kleine-Koenig3dbda772009-07-23 08:31:31 +0200314 /* Enable management port */
frederic RODO6c36a702007-07-12 19:07:24 +0200315 macb_writel(bp, NCR, MACB_BIT(MPE));
316
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700317 bp->mii_bus = mdiobus_alloc();
318 if (bp->mii_bus == NULL) {
frederic RODO6c36a702007-07-12 19:07:24 +0200319 err = -ENOMEM;
320 goto err_out;
321 }
322
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700323 bp->mii_bus->name = "MACB_mii_bus";
324 bp->mii_bus->read = &macb_mdio_read;
325 bp->mii_bus->write = &macb_mdio_write;
326 bp->mii_bus->reset = &macb_mdio_reset;
Florian Fainelli98d5e572012-01-09 23:59:11 +0000327 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
328 bp->pdev->name, bp->pdev->id);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700329 bp->mii_bus->priv = bp;
330 bp->mii_bus->parent = &bp->dev->dev;
331 pdata = bp->pdev->dev.platform_data;
332
333 if (pdata)
334 bp->mii_bus->phy_mask = pdata->phy_mask;
335
336 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
337 if (!bp->mii_bus->irq) {
338 err = -ENOMEM;
339 goto err_out_free_mdiobus;
340 }
341
frederic RODO6c36a702007-07-12 19:07:24 +0200342 for (i = 0; i < PHY_MAX_ADDR; i++)
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700343 bp->mii_bus->irq[i] = PHY_POLL;
frederic RODO6c36a702007-07-12 19:07:24 +0200344
Jamie Iles91523942011-02-28 04:05:25 +0000345 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200346
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700347 if (mdiobus_register(bp->mii_bus))
frederic RODO6c36a702007-07-12 19:07:24 +0200348 goto err_out_free_mdio_irq;
349
350 if (macb_mii_probe(bp->dev) != 0) {
351 goto err_out_unregister_bus;
352 }
353
354 return 0;
355
356err_out_unregister_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700357 mdiobus_unregister(bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200358err_out_free_mdio_irq:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700359 kfree(bp->mii_bus->irq);
360err_out_free_mdiobus:
361 mdiobus_free(bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200362err_out:
363 return err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100364}
Joachim Eastwood0005f542012-10-18 11:01:12 +0000365EXPORT_SYMBOL_GPL(macb_mii_init);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100366
367static void macb_update_stats(struct macb *bp)
368{
369 u32 __iomem *reg = bp->regs + MACB_PFR;
Jamie Ilesa494ed82011-03-09 16:26:35 +0000370 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
371 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100372
373 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
374
375 for(; p < end; p++, reg++)
Haavard Skinnemoen0f0d84e2006-12-08 14:38:30 +0100376 *p += __raw_readl(reg);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100377}
378
Nicolas Ferree86cd532012-10-31 06:04:57 +0000379static int macb_halt_tx(struct macb *bp)
380{
381 unsigned long halt_time, timeout;
382 u32 status;
383
384 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
385
386 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
387 do {
388 halt_time = jiffies;
389 status = macb_readl(bp, TSR);
390 if (!(status & MACB_BIT(TGO)))
391 return 0;
392
393 usleep_range(10, 250);
394 } while (time_before(halt_time, timeout));
395
396 return -ETIMEDOUT;
397}
398
399static void macb_tx_error_task(struct work_struct *work)
400{
401 struct macb *bp = container_of(work, struct macb, tx_error_task);
402 struct macb_tx_skb *tx_skb;
403 struct sk_buff *skb;
404 unsigned int tail;
405
406 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
407 bp->tx_tail, bp->tx_head);
408
409 /* Make sure nobody is trying to queue up new packets */
410 netif_stop_queue(bp->dev);
411
412 /*
413 * Stop transmission now
414 * (in case we have just queued new packets)
415 */
416 if (macb_halt_tx(bp))
417 /* Just complain for now, reinitializing TX path can be good */
418 netdev_err(bp->dev, "BUG: halt tx timed out\n");
419
420 /* No need for the lock here as nobody will interrupt us anymore */
421
422 /*
423 * Treat frames in TX queue including the ones that caused the error.
424 * Free transmit buffers in upper layer.
425 */
426 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
427 struct macb_dma_desc *desc;
428 u32 ctrl;
429
430 desc = macb_tx_desc(bp, tail);
431 ctrl = desc->ctrl;
432 tx_skb = macb_tx_skb(bp, tail);
433 skb = tx_skb->skb;
434
435 if (ctrl & MACB_BIT(TX_USED)) {
436 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
437 macb_tx_ring_wrap(tail), skb->data);
438 bp->stats.tx_packets++;
439 bp->stats.tx_bytes += skb->len;
440 } else {
441 /*
442 * "Buffers exhausted mid-frame" errors may only happen
443 * if the driver is buggy, so complain loudly about those.
444 * Statistics are updated by hardware.
445 */
446 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
447 netdev_err(bp->dev,
448 "BUG: TX buffers exhausted mid-frame\n");
449
450 desc->ctrl = ctrl | MACB_BIT(TX_USED);
451 }
452
453 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
454 DMA_TO_DEVICE);
455 tx_skb->skb = NULL;
456 dev_kfree_skb(skb);
457 }
458
459 /* Make descriptor updates visible to hardware */
460 wmb();
461
462 /* Reinitialize the TX desc queue */
463 macb_writel(bp, TBQP, bp->tx_ring_dma);
464 /* Make TX ring reflect state of hardware */
465 bp->tx_head = bp->tx_tail = 0;
466
467 /* Now we are ready to start transmission again */
468 netif_wake_queue(bp->dev);
469
470 /* Housework before enabling TX IRQ */
471 macb_writel(bp, TSR, macb_readl(bp, TSR));
472 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
473}
474
475static void macb_tx_interrupt(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100476{
477 unsigned int tail;
478 unsigned int head;
479 u32 status;
480
481 status = macb_readl(bp, TSR);
482 macb_writel(bp, TSR, status);
483
Nicolas Ferree86cd532012-10-31 06:04:57 +0000484 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
485 (unsigned long)status);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100486
487 head = bp->tx_head;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000488 for (tail = bp->tx_tail; tail != head; tail++) {
489 struct macb_tx_skb *tx_skb;
490 struct sk_buff *skb;
491 struct macb_dma_desc *desc;
492 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100493
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000494 desc = macb_tx_desc(bp, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100495
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000496 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100497 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000498
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000499 ctrl = desc->ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100500
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000501 if (!(ctrl & MACB_BIT(TX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100502 break;
503
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000504 tx_skb = macb_tx_skb(bp, tail);
505 skb = tx_skb->skb;
506
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000507 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000508 macb_tx_ring_wrap(tail), skb->data);
509 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100510 DMA_TO_DEVICE);
511 bp->stats.tx_packets++;
512 bp->stats.tx_bytes += skb->len;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000513 tx_skb->skb = NULL;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100514 dev_kfree_skb_irq(skb);
515 }
516
517 bp->tx_tail = tail;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000518 if (netif_queue_stopped(bp->dev)
519 && macb_tx_ring_avail(bp) > MACB_TX_WAKEUP_THRESH)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100520 netif_wake_queue(bp->dev);
521}
522
523static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
524 unsigned int last_frag)
525{
526 unsigned int len;
527 unsigned int frag;
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +0000528 unsigned int offset;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100529 struct sk_buff *skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000530 struct macb_dma_desc *desc;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100531
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000532 desc = macb_rx_desc(bp, last_frag);
533 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100534
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000535 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000536 macb_rx_ring_wrap(first_frag),
537 macb_rx_ring_wrap(last_frag), len);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100538
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +0000539 /*
540 * The ethernet header starts NET_IP_ALIGN bytes into the
541 * first buffer. Since the header is 14 bytes, this makes the
542 * payload word-aligned.
543 *
544 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
545 * the two padding bytes into the skb so that we avoid hitting
546 * the slowpath in memcpy(), and pull them off afterwards.
547 */
548 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100549 if (!skb) {
550 bp->stats.rx_dropped++;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000551 for (frag = first_frag; ; frag++) {
552 desc = macb_rx_desc(bp, frag);
553 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100554 if (frag == last_frag)
555 break;
556 }
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000557
558 /* Make descriptor updates visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100559 wmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000560
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100561 return 1;
562 }
563
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +0000564 offset = 0;
565 len += NET_IP_ALIGN;
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700566 skb_checksum_none_assert(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100567 skb_put(skb, len);
568
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000569 for (frag = first_frag; ; frag++) {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100570 unsigned int frag_len = RX_BUFFER_SIZE;
571
572 if (offset + frag_len > len) {
573 BUG_ON(frag != last_frag);
574 frag_len = len - offset;
575 }
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -0300576 skb_copy_to_linear_data_offset(skb, offset,
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000577 macb_rx_buffer(bp, frag), frag_len);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100578 offset += RX_BUFFER_SIZE;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000579 desc = macb_rx_desc(bp, frag);
580 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100581
582 if (frag == last_frag)
583 break;
584 }
585
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000586 /* Make descriptor updates visible to hardware */
587 wmb();
588
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +0000589 __skb_pull(skb, NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100590 skb->protocol = eth_type_trans(skb, bp->dev);
591
592 bp->stats.rx_packets++;
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +0000593 bp->stats.rx_bytes += skb->len;
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000594 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000595 skb->len, skb->csum);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100596 netif_receive_skb(skb);
597
598 return 0;
599}
600
601/* Mark DMA descriptors from begin up to and not including end as unused */
602static void discard_partial_frame(struct macb *bp, unsigned int begin,
603 unsigned int end)
604{
605 unsigned int frag;
606
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000607 for (frag = begin; frag != end; frag++) {
608 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
609 desc->addr &= ~MACB_BIT(RX_USED);
610 }
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000611
612 /* Make descriptor updates visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100613 wmb();
614
615 /*
616 * When this happens, the hardware stats registers for
617 * whatever caused this is updated, so we don't have to record
618 * anything.
619 */
620}
621
622static int macb_rx(struct macb *bp, int budget)
623{
624 int received = 0;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000625 unsigned int tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100626 int first_frag = -1;
627
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000628 for (tail = bp->rx_tail; budget > 0; tail++) {
629 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100630 u32 addr, ctrl;
631
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000632 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100633 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000634
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000635 addr = desc->addr;
636 ctrl = desc->ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100637
638 if (!(addr & MACB_BIT(RX_USED)))
639 break;
640
641 if (ctrl & MACB_BIT(RX_SOF)) {
642 if (first_frag != -1)
643 discard_partial_frame(bp, first_frag, tail);
644 first_frag = tail;
645 }
646
647 if (ctrl & MACB_BIT(RX_EOF)) {
648 int dropped;
649 BUG_ON(first_frag == -1);
650
651 dropped = macb_rx_frame(bp, first_frag, tail);
652 first_frag = -1;
653 if (!dropped) {
654 received++;
655 budget--;
656 }
657 }
658 }
659
660 if (first_frag != -1)
661 bp->rx_tail = first_frag;
662 else
663 bp->rx_tail = tail;
664
665 return received;
666}
667
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700668static int macb_poll(struct napi_struct *napi, int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100669{
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700670 struct macb *bp = container_of(napi, struct macb, napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700671 int work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100672 u32 status;
673
674 status = macb_readl(bp, RSR);
675 macb_writel(bp, RSR, status);
676
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700677 work_done = 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100678
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000679 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000680 (unsigned long)status, budget);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100681
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700682 work_done = macb_rx(bp, budget);
Joshua Hokeb3363692010-10-25 01:44:22 +0000683 if (work_done < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800684 napi_complete(napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100685
Joshua Hokeb3363692010-10-25 01:44:22 +0000686 /*
687 * We've done what we can to clean the buffers. Make sure we
688 * get notified when new packets arrive.
689 */
690 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
691 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100692
693 /* TODO: Handle errors */
694
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700695 return work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100696}
697
698static irqreturn_t macb_interrupt(int irq, void *dev_id)
699{
700 struct net_device *dev = dev_id;
701 struct macb *bp = netdev_priv(dev);
702 u32 status;
703
704 status = macb_readl(bp, ISR);
705
706 if (unlikely(!status))
707 return IRQ_NONE;
708
709 spin_lock(&bp->lock);
710
711 while (status) {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100712 /* close possible race with dev_close */
713 if (unlikely(!netif_running(dev))) {
Joachim Eastwood95ebcea2012-10-22 08:45:31 +0000714 macb_writel(bp, IDR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100715 break;
716 }
717
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000718 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
719
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100720 if (status & MACB_RX_INT_FLAGS) {
Joshua Hokeb3363692010-10-25 01:44:22 +0000721 /*
722 * There's no point taking any more interrupts
723 * until we have processed the buffers. The
724 * scheduling call may fail if the poll routine
725 * is already scheduled, so disable interrupts
726 * now.
727 */
728 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
729
Ben Hutchings288379f2009-01-19 16:43:59 -0800730 if (napi_schedule_prep(&bp->napi)) {
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000731 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
Ben Hutchings288379f2009-01-19 16:43:59 -0800732 __napi_schedule(&bp->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100733 }
734 }
735
Nicolas Ferree86cd532012-10-31 06:04:57 +0000736 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
737 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
738 schedule_work(&bp->tx_error_task);
739 break;
740 }
741
742 if (status & MACB_BIT(TCOMP))
743 macb_tx_interrupt(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100744
745 /*
746 * Link change detection isn't possible with RMII, so we'll
747 * add that if/when we get our hands on a full-blown MII PHY.
748 */
749
Alexander Steinb19f7f72011-04-13 05:03:24 +0000750 if (status & MACB_BIT(ISR_ROVR)) {
751 /* We missed at least one packet */
Jamie Ilesf75ba502011-11-08 10:12:32 +0000752 if (macb_is_gem(bp))
753 bp->hw_stats.gem.rx_overruns++;
754 else
755 bp->hw_stats.macb.rx_overruns++;
Alexander Steinb19f7f72011-04-13 05:03:24 +0000756 }
757
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100758 if (status & MACB_BIT(HRESP)) {
759 /*
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000760 * TODO: Reset the hardware, and maybe move the
761 * netdev_err to a lower-priority context as well
762 * (work queue?)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100763 */
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000764 netdev_err(dev, "DMA bus error: HRESP not OK\n");
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100765 }
766
767 status = macb_readl(bp, ISR);
768 }
769
770 spin_unlock(&bp->lock);
771
772 return IRQ_HANDLED;
773}
774
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -0700775#ifdef CONFIG_NET_POLL_CONTROLLER
776/*
777 * Polling receive - used by netconsole and other diagnostic tools
778 * to allow network i/o with interrupts disabled.
779 */
780static void macb_poll_controller(struct net_device *dev)
781{
782 unsigned long flags;
783
784 local_irq_save(flags);
785 macb_interrupt(dev->irq, dev);
786 local_irq_restore(flags);
787}
788#endif
789
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100790static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
791{
792 struct macb *bp = netdev_priv(dev);
793 dma_addr_t mapping;
794 unsigned int len, entry;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000795 struct macb_dma_desc *desc;
796 struct macb_tx_skb *tx_skb;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100797 u32 ctrl;
Dongdong Deng48719532009-08-23 19:49:07 -0700798 unsigned long flags;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100799
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000800#if defined(DEBUG) && defined(VERBOSE_DEBUG)
801 netdev_vdbg(bp->dev,
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000802 "start_xmit: len %u head %p data %p tail %p end %p\n",
803 skb->len, skb->head, skb->data,
804 skb_tail_pointer(skb), skb_end_pointer(skb));
805 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
806 skb->data, 16, true);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100807#endif
808
809 len = skb->len;
Dongdong Deng48719532009-08-23 19:49:07 -0700810 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100811
812 /* This is a hard error, log it. */
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000813 if (macb_tx_ring_avail(bp) < 1) {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100814 netif_stop_queue(dev);
Dongdong Deng48719532009-08-23 19:49:07 -0700815 spin_unlock_irqrestore(&bp->lock, flags);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000816 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
817 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
818 bp->tx_head, bp->tx_tail);
Patrick McHardy5b548142009-06-12 06:22:29 +0000819 return NETDEV_TX_BUSY;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100820 }
821
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000822 entry = macb_tx_ring_wrap(bp->tx_head);
823 bp->tx_head++;
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000824 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100825 mapping = dma_map_single(&bp->pdev->dev, skb->data,
826 len, DMA_TO_DEVICE);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000827
828 tx_skb = &bp->tx_skb[entry];
829 tx_skb->skb = skb;
830 tx_skb->mapping = mapping;
Havard Skinnemoena268adb2012-10-31 06:04:52 +0000831 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000832 skb->data, (unsigned long)mapping);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100833
834 ctrl = MACB_BF(TX_FRMLEN, len);
835 ctrl |= MACB_BIT(TX_LAST);
836 if (entry == (TX_RING_SIZE - 1))
837 ctrl |= MACB_BIT(TX_WRAP);
838
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000839 desc = &bp->tx_ring[entry];
840 desc->addr = mapping;
841 desc->ctrl = ctrl;
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000842
843 /* Make newly initialized descriptor visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100844 wmb();
845
Richard Cochrane0720922011-06-19 21:51:28 +0000846 skb_tx_timestamp(skb);
847
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100848 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
849
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000850 if (macb_tx_ring_avail(bp) < 1)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100851 netif_stop_queue(dev);
852
Dongdong Deng48719532009-08-23 19:49:07 -0700853 spin_unlock_irqrestore(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100854
Patrick McHardy6ed10652009-06-23 06:03:08 +0000855 return NETDEV_TX_OK;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100856}
857
858static void macb_free_consistent(struct macb *bp)
859{
860 if (bp->tx_skb) {
861 kfree(bp->tx_skb);
862 bp->tx_skb = NULL;
863 }
864 if (bp->rx_ring) {
865 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
866 bp->rx_ring, bp->rx_ring_dma);
867 bp->rx_ring = NULL;
868 }
869 if (bp->tx_ring) {
870 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
871 bp->tx_ring, bp->tx_ring_dma);
872 bp->tx_ring = NULL;
873 }
874 if (bp->rx_buffers) {
875 dma_free_coherent(&bp->pdev->dev,
876 RX_RING_SIZE * RX_BUFFER_SIZE,
877 bp->rx_buffers, bp->rx_buffers_dma);
878 bp->rx_buffers = NULL;
879 }
880}
881
882static int macb_alloc_consistent(struct macb *bp)
883{
884 int size;
885
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000886 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100887 bp->tx_skb = kmalloc(size, GFP_KERNEL);
888 if (!bp->tx_skb)
889 goto out_err;
890
891 size = RX_RING_BYTES;
892 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
893 &bp->rx_ring_dma, GFP_KERNEL);
894 if (!bp->rx_ring)
895 goto out_err;
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000896 netdev_dbg(bp->dev,
897 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
898 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100899
900 size = TX_RING_BYTES;
901 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
902 &bp->tx_ring_dma, GFP_KERNEL);
903 if (!bp->tx_ring)
904 goto out_err;
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000905 netdev_dbg(bp->dev,
906 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
907 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100908
909 size = RX_RING_SIZE * RX_BUFFER_SIZE;
910 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
911 &bp->rx_buffers_dma, GFP_KERNEL);
912 if (!bp->rx_buffers)
913 goto out_err;
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000914 netdev_dbg(bp->dev,
915 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
916 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100917
918 return 0;
919
920out_err:
921 macb_free_consistent(bp);
922 return -ENOMEM;
923}
924
925static void macb_init_rings(struct macb *bp)
926{
927 int i;
928 dma_addr_t addr;
929
930 addr = bp->rx_buffers_dma;
931 for (i = 0; i < RX_RING_SIZE; i++) {
932 bp->rx_ring[i].addr = addr;
933 bp->rx_ring[i].ctrl = 0;
934 addr += RX_BUFFER_SIZE;
935 }
936 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
937
938 for (i = 0; i < TX_RING_SIZE; i++) {
939 bp->tx_ring[i].addr = 0;
940 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
941 }
942 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
943
944 bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
945}
946
947static void macb_reset_hw(struct macb *bp)
948{
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100949 /*
950 * Disable RX and TX (XXX: Should we halt the transmission
951 * more gracefully?)
952 */
953 macb_writel(bp, NCR, 0);
954
955 /* Clear the stats registers (XXX: Update stats first?) */
956 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
957
958 /* Clear all status flags */
Joachim Eastwood95ebcea2012-10-22 08:45:31 +0000959 macb_writel(bp, TSR, -1);
960 macb_writel(bp, RSR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100961
962 /* Disable all interrupts */
Joachim Eastwood95ebcea2012-10-22 08:45:31 +0000963 macb_writel(bp, IDR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100964 macb_readl(bp, ISR);
965}
966
Jamie Iles70c9f3d2011-03-09 16:22:54 +0000967static u32 gem_mdc_clk_div(struct macb *bp)
968{
969 u32 config;
970 unsigned long pclk_hz = clk_get_rate(bp->pclk);
971
972 if (pclk_hz <= 20000000)
973 config = GEM_BF(CLK, GEM_CLK_DIV8);
974 else if (pclk_hz <= 40000000)
975 config = GEM_BF(CLK, GEM_CLK_DIV16);
976 else if (pclk_hz <= 80000000)
977 config = GEM_BF(CLK, GEM_CLK_DIV32);
978 else if (pclk_hz <= 120000000)
979 config = GEM_BF(CLK, GEM_CLK_DIV48);
980 else if (pclk_hz <= 160000000)
981 config = GEM_BF(CLK, GEM_CLK_DIV64);
982 else
983 config = GEM_BF(CLK, GEM_CLK_DIV96);
984
985 return config;
986}
987
988static u32 macb_mdc_clk_div(struct macb *bp)
989{
990 u32 config;
991 unsigned long pclk_hz;
992
993 if (macb_is_gem(bp))
994 return gem_mdc_clk_div(bp);
995
996 pclk_hz = clk_get_rate(bp->pclk);
997 if (pclk_hz <= 20000000)
998 config = MACB_BF(CLK, MACB_CLK_DIV8);
999 else if (pclk_hz <= 40000000)
1000 config = MACB_BF(CLK, MACB_CLK_DIV16);
1001 else if (pclk_hz <= 80000000)
1002 config = MACB_BF(CLK, MACB_CLK_DIV32);
1003 else
1004 config = MACB_BF(CLK, MACB_CLK_DIV64);
1005
1006 return config;
1007}
1008
Jamie Iles757a03c2011-03-09 16:29:59 +00001009/*
1010 * Get the DMA bus width field of the network configuration register that we
1011 * should program. We find the width from decoding the design configuration
1012 * register to find the maximum supported data bus width.
1013 */
1014static u32 macb_dbw(struct macb *bp)
1015{
1016 if (!macb_is_gem(bp))
1017 return 0;
1018
1019 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1020 case 4:
1021 return GEM_BF(DBW, GEM_DBW128);
1022 case 2:
1023 return GEM_BF(DBW, GEM_DBW64);
1024 case 1:
1025 default:
1026 return GEM_BF(DBW, GEM_DBW32);
1027 }
1028}
1029
Jamie Iles0116da42011-03-14 17:38:30 +00001030/*
1031 * Configure the receive DMA engine to use the correct receive buffer size.
1032 * This is a configurable parameter for GEM.
1033 */
1034static void macb_configure_dma(struct macb *bp)
1035{
1036 u32 dmacfg;
1037
1038 if (macb_is_gem(bp)) {
1039 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1040 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1041 gem_writel(bp, DMACFG, dmacfg);
1042 }
1043}
1044
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001045static void macb_init_hw(struct macb *bp)
1046{
1047 u32 config;
1048
1049 macb_reset_hw(bp);
Joachim Eastwood314bccc2012-11-07 08:14:52 +00001050 macb_set_hwaddr(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001051
Jamie Iles70c9f3d2011-03-09 16:22:54 +00001052 config = macb_mdc_clk_div(bp);
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001053 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001054 config |= MACB_BIT(PAE); /* PAuse Enable */
1055 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
Peter Korsgaard8dd4bd02010-04-07 21:53:41 -07001056 config |= MACB_BIT(BIG); /* Receive oversized frames */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001057 if (bp->dev->flags & IFF_PROMISC)
1058 config |= MACB_BIT(CAF); /* Copy All Frames */
1059 if (!(bp->dev->flags & IFF_BROADCAST))
1060 config |= MACB_BIT(NBC); /* No BroadCast */
Jamie Iles757a03c2011-03-09 16:29:59 +00001061 config |= macb_dbw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001062 macb_writel(bp, NCFGR, config);
Vitalii Demianets26cdfb42012-11-02 07:09:24 +00001063 bp->speed = SPEED_10;
1064 bp->duplex = DUPLEX_HALF;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001065
Jamie Iles0116da42011-03-14 17:38:30 +00001066 macb_configure_dma(bp);
1067
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001068 /* Initialize TX and RX buffers */
1069 macb_writel(bp, RBQP, bp->rx_ring_dma);
1070 macb_writel(bp, TBQP, bp->tx_ring_dma);
1071
1072 /* Enable TX and RX */
frederic RODO6c36a702007-07-12 19:07:24 +02001073 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001074
1075 /* Enable interrupts */
Nicolas Ferree86cd532012-10-31 06:04:57 +00001076 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
1077 | MACB_TX_INT_FLAGS
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001078 | MACB_BIT(HRESP)));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001079
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001080}
1081
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001082/*
1083 * The hash address register is 64 bits long and takes up two
1084 * locations in the memory map. The least significant bits are stored
1085 * in EMAC_HSL and the most significant bits in EMAC_HSH.
1086 *
1087 * The unicast hash enable and the multicast hash enable bits in the
1088 * network configuration register enable the reception of hash matched
1089 * frames. The destination address is reduced to a 6 bit index into
1090 * the 64 bit hash register using the following hash function. The
1091 * hash function is an exclusive or of every sixth bit of the
1092 * destination address.
1093 *
1094 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
1095 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
1096 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
1097 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
1098 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
1099 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
1100 *
1101 * da[0] represents the least significant bit of the first byte
1102 * received, that is, the multicast/unicast indicator, and da[47]
1103 * represents the most significant bit of the last byte received. If
1104 * the hash index, hi[n], points to a bit that is set in the hash
1105 * register then the frame will be matched according to whether the
1106 * frame is multicast or unicast. A multicast match will be signalled
1107 * if the multicast hash enable bit is set, da[0] is 1 and the hash
1108 * index points to a bit set in the hash register. A unicast match
1109 * will be signalled if the unicast hash enable bit is set, da[0] is 0
1110 * and the hash index points to a bit set in the hash register. To
1111 * receive all multicast frames, the hash register should be set with
1112 * all ones and the multicast hash enable bit should be set in the
1113 * network configuration register.
1114 */
1115
1116static inline int hash_bit_value(int bitnr, __u8 *addr)
1117{
1118 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1119 return 1;
1120 return 0;
1121}
1122
1123/*
1124 * Return the hash index value for the specified address.
1125 */
1126static int hash_get_index(__u8 *addr)
1127{
1128 int i, j, bitval;
1129 int hash_index = 0;
1130
1131 for (j = 0; j < 6; j++) {
1132 for (i = 0, bitval = 0; i < 8; i++)
1133 bitval ^= hash_bit_value(i*6 + j, addr);
1134
1135 hash_index |= (bitval << j);
1136 }
1137
1138 return hash_index;
1139}
1140
1141/*
1142 * Add multicast addresses to the internal multicast-hash table.
1143 */
1144static void macb_sethashtable(struct net_device *dev)
1145{
Jiri Pirko22bedad32010-04-01 21:22:57 +00001146 struct netdev_hw_addr *ha;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001147 unsigned long mc_filter[2];
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00001148 unsigned int bitnr;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001149 struct macb *bp = netdev_priv(dev);
1150
1151 mc_filter[0] = mc_filter[1] = 0;
1152
Jiri Pirko22bedad32010-04-01 21:22:57 +00001153 netdev_for_each_mc_addr(ha, dev) {
1154 bitnr = hash_get_index(ha->addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001155 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1156 }
1157
Jamie Ilesf75ba502011-11-08 10:12:32 +00001158 macb_or_gem_writel(bp, HRB, mc_filter[0]);
1159 macb_or_gem_writel(bp, HRT, mc_filter[1]);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001160}
1161
1162/*
1163 * Enable/Disable promiscuous and multicast modes.
1164 */
Joachim Eastwoode0da1f12012-10-18 11:01:15 +00001165void macb_set_rx_mode(struct net_device *dev)
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001166{
1167 unsigned long cfg;
1168 struct macb *bp = netdev_priv(dev);
1169
1170 cfg = macb_readl(bp, NCFGR);
1171
1172 if (dev->flags & IFF_PROMISC)
1173 /* Enable promiscuous mode */
1174 cfg |= MACB_BIT(CAF);
1175 else if (dev->flags & (~IFF_PROMISC))
1176 /* Disable promiscuous mode */
1177 cfg &= ~MACB_BIT(CAF);
1178
1179 if (dev->flags & IFF_ALLMULTI) {
1180 /* Enable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00001181 macb_or_gem_writel(bp, HRB, -1);
1182 macb_or_gem_writel(bp, HRT, -1);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001183 cfg |= MACB_BIT(NCFGR_MTI);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001184 } else if (!netdev_mc_empty(dev)) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001185 /* Enable specific multicasts */
1186 macb_sethashtable(dev);
1187 cfg |= MACB_BIT(NCFGR_MTI);
1188 } else if (dev->flags & (~IFF_ALLMULTI)) {
1189 /* Disable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00001190 macb_or_gem_writel(bp, HRB, 0);
1191 macb_or_gem_writel(bp, HRT, 0);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001192 cfg &= ~MACB_BIT(NCFGR_MTI);
1193 }
1194
1195 macb_writel(bp, NCFGR, cfg);
1196}
Joachim Eastwoode0da1f12012-10-18 11:01:15 +00001197EXPORT_SYMBOL_GPL(macb_set_rx_mode);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02001198
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001199static int macb_open(struct net_device *dev)
1200{
1201 struct macb *bp = netdev_priv(dev);
1202 int err;
1203
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001204 netdev_dbg(bp->dev, "open\n");
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001205
Nicolas Ferre03fc4722012-07-03 23:14:13 +00001206 /* carrier starts down */
1207 netif_carrier_off(dev);
1208
frederic RODO6c36a702007-07-12 19:07:24 +02001209 /* if the phy is not yet register, retry later*/
1210 if (!bp->phy_dev)
1211 return -EAGAIN;
1212
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001213 if (!is_valid_ether_addr(dev->dev_addr))
1214 return -EADDRNOTAVAIL;
1215
1216 err = macb_alloc_consistent(bp);
1217 if (err) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001218 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1219 err);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001220 return err;
1221 }
1222
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001223 napi_enable(&bp->napi);
1224
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001225 macb_init_rings(bp);
1226 macb_init_hw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001227
frederic RODO6c36a702007-07-12 19:07:24 +02001228 /* schedule a link state check */
1229 phy_start(bp->phy_dev);
1230
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001231 netif_start_queue(dev);
1232
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001233 return 0;
1234}
1235
1236static int macb_close(struct net_device *dev)
1237{
1238 struct macb *bp = netdev_priv(dev);
1239 unsigned long flags;
1240
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001241 netif_stop_queue(dev);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001242 napi_disable(&bp->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001243
frederic RODO6c36a702007-07-12 19:07:24 +02001244 if (bp->phy_dev)
1245 phy_stop(bp->phy_dev);
1246
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001247 spin_lock_irqsave(&bp->lock, flags);
1248 macb_reset_hw(bp);
1249 netif_carrier_off(dev);
1250 spin_unlock_irqrestore(&bp->lock, flags);
1251
1252 macb_free_consistent(bp);
1253
1254 return 0;
1255}
1256
Jamie Ilesa494ed82011-03-09 16:26:35 +00001257static void gem_update_stats(struct macb *bp)
1258{
1259 u32 __iomem *reg = bp->regs + GEM_OTX;
1260 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1261 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1262
1263 for (; p < end; p++, reg++)
1264 *p += __raw_readl(reg);
1265}
1266
1267static struct net_device_stats *gem_get_stats(struct macb *bp)
1268{
1269 struct gem_stats *hwstat = &bp->hw_stats.gem;
1270 struct net_device_stats *nstat = &bp->stats;
1271
1272 gem_update_stats(bp);
1273
1274 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1275 hwstat->rx_alignment_errors +
1276 hwstat->rx_resource_errors +
1277 hwstat->rx_overruns +
1278 hwstat->rx_oversize_frames +
1279 hwstat->rx_jabbers +
1280 hwstat->rx_undersized_frames +
1281 hwstat->rx_length_field_frame_errors);
1282 nstat->tx_errors = (hwstat->tx_late_collisions +
1283 hwstat->tx_excessive_collisions +
1284 hwstat->tx_underrun +
1285 hwstat->tx_carrier_sense_errors);
1286 nstat->multicast = hwstat->rx_multicast_frames;
1287 nstat->collisions = (hwstat->tx_single_collision_frames +
1288 hwstat->tx_multiple_collision_frames +
1289 hwstat->tx_excessive_collisions);
1290 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1291 hwstat->rx_jabbers +
1292 hwstat->rx_undersized_frames +
1293 hwstat->rx_length_field_frame_errors);
1294 nstat->rx_over_errors = hwstat->rx_resource_errors;
1295 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1296 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1297 nstat->rx_fifo_errors = hwstat->rx_overruns;
1298 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1299 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1300 nstat->tx_fifo_errors = hwstat->tx_underrun;
1301
1302 return nstat;
1303}
1304
Joachim Eastwood2ea32ee2012-11-07 08:14:54 +00001305struct net_device_stats *macb_get_stats(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001306{
1307 struct macb *bp = netdev_priv(dev);
1308 struct net_device_stats *nstat = &bp->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00001309 struct macb_stats *hwstat = &bp->hw_stats.macb;
1310
1311 if (macb_is_gem(bp))
1312 return gem_get_stats(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001313
frederic RODO6c36a702007-07-12 19:07:24 +02001314 /* read stats from hardware */
1315 macb_update_stats(bp);
1316
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001317 /* Convert HW stats into netdevice stats */
1318 nstat->rx_errors = (hwstat->rx_fcs_errors +
1319 hwstat->rx_align_errors +
1320 hwstat->rx_resource_errors +
1321 hwstat->rx_overruns +
1322 hwstat->rx_oversize_pkts +
1323 hwstat->rx_jabbers +
1324 hwstat->rx_undersize_pkts +
1325 hwstat->sqe_test_errors +
1326 hwstat->rx_length_mismatch);
1327 nstat->tx_errors = (hwstat->tx_late_cols +
1328 hwstat->tx_excessive_cols +
1329 hwstat->tx_underruns +
1330 hwstat->tx_carrier_errors);
1331 nstat->collisions = (hwstat->tx_single_cols +
1332 hwstat->tx_multiple_cols +
1333 hwstat->tx_excessive_cols);
1334 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1335 hwstat->rx_jabbers +
1336 hwstat->rx_undersize_pkts +
1337 hwstat->rx_length_mismatch);
Alexander Steinb19f7f72011-04-13 05:03:24 +00001338 nstat->rx_over_errors = hwstat->rx_resource_errors +
1339 hwstat->rx_overruns;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001340 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1341 nstat->rx_frame_errors = hwstat->rx_align_errors;
1342 nstat->rx_fifo_errors = hwstat->rx_overruns;
1343 /* XXX: What does "missed" mean? */
1344 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1345 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1346 nstat->tx_fifo_errors = hwstat->tx_underruns;
1347 /* Don't know about heartbeat or window errors... */
1348
1349 return nstat;
1350}
Joachim Eastwood2ea32ee2012-11-07 08:14:54 +00001351EXPORT_SYMBOL_GPL(macb_get_stats);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001352
1353static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1354{
1355 struct macb *bp = netdev_priv(dev);
frederic RODO6c36a702007-07-12 19:07:24 +02001356 struct phy_device *phydev = bp->phy_dev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001357
frederic RODO6c36a702007-07-12 19:07:24 +02001358 if (!phydev)
1359 return -ENODEV;
1360
1361 return phy_ethtool_gset(phydev, cmd);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001362}
1363
1364static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1365{
1366 struct macb *bp = netdev_priv(dev);
frederic RODO6c36a702007-07-12 19:07:24 +02001367 struct phy_device *phydev = bp->phy_dev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001368
frederic RODO6c36a702007-07-12 19:07:24 +02001369 if (!phydev)
1370 return -ENODEV;
1371
1372 return phy_ethtool_sset(phydev, cmd);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001373}
1374
Nicolas Ferred1d1b532012-10-31 06:04:56 +00001375static int macb_get_regs_len(struct net_device *netdev)
1376{
1377 return MACB_GREGS_NBR * sizeof(u32);
1378}
1379
1380static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1381 void *p)
1382{
1383 struct macb *bp = netdev_priv(dev);
1384 unsigned int tail, head;
1385 u32 *regs_buff = p;
1386
1387 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1388 | MACB_GREGS_VERSION;
1389
1390 tail = macb_tx_ring_wrap(bp->tx_tail);
1391 head = macb_tx_ring_wrap(bp->tx_head);
1392
1393 regs_buff[0] = macb_readl(bp, NCR);
1394 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1395 regs_buff[2] = macb_readl(bp, NSR);
1396 regs_buff[3] = macb_readl(bp, TSR);
1397 regs_buff[4] = macb_readl(bp, RBQP);
1398 regs_buff[5] = macb_readl(bp, TBQP);
1399 regs_buff[6] = macb_readl(bp, RSR);
1400 regs_buff[7] = macb_readl(bp, IMR);
1401
1402 regs_buff[8] = tail;
1403 regs_buff[9] = head;
1404 regs_buff[10] = macb_tx_dma(bp, tail);
1405 regs_buff[11] = macb_tx_dma(bp, head);
1406
1407 if (macb_is_gem(bp)) {
1408 regs_buff[12] = gem_readl(bp, USRIO);
1409 regs_buff[13] = gem_readl(bp, DMACFG);
1410 }
1411}
1412
Joachim Eastwood0005f542012-10-18 11:01:12 +00001413const struct ethtool_ops macb_ethtool_ops = {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001414 .get_settings = macb_get_settings,
1415 .set_settings = macb_set_settings,
Nicolas Ferred1d1b532012-10-31 06:04:56 +00001416 .get_regs_len = macb_get_regs_len,
1417 .get_regs = macb_get_regs,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001418 .get_link = ethtool_op_get_link,
Richard Cochran17f393e2012-04-03 22:59:31 +00001419 .get_ts_info = ethtool_op_get_ts_info,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001420};
Joachim Eastwood0005f542012-10-18 11:01:12 +00001421EXPORT_SYMBOL_GPL(macb_ethtool_ops);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001422
Joachim Eastwood0005f542012-10-18 11:01:12 +00001423int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001424{
1425 struct macb *bp = netdev_priv(dev);
frederic RODO6c36a702007-07-12 19:07:24 +02001426 struct phy_device *phydev = bp->phy_dev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001427
1428 if (!netif_running(dev))
1429 return -EINVAL;
1430
frederic RODO6c36a702007-07-12 19:07:24 +02001431 if (!phydev)
1432 return -ENODEV;
1433
Richard Cochran28b04112010-07-17 08:48:55 +00001434 return phy_mii_ioctl(phydev, rq, cmd);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001435}
Joachim Eastwood0005f542012-10-18 11:01:12 +00001436EXPORT_SYMBOL_GPL(macb_ioctl);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001437
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00001438static const struct net_device_ops macb_netdev_ops = {
1439 .ndo_open = macb_open,
1440 .ndo_stop = macb_close,
1441 .ndo_start_xmit = macb_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001442 .ndo_set_rx_mode = macb_set_rx_mode,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00001443 .ndo_get_stats = macb_get_stats,
1444 .ndo_do_ioctl = macb_ioctl,
1445 .ndo_validate_addr = eth_validate_addr,
1446 .ndo_change_mtu = eth_change_mtu,
1447 .ndo_set_mac_address = eth_mac_addr,
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001448#ifdef CONFIG_NET_POLL_CONTROLLER
1449 .ndo_poll_controller = macb_poll_controller,
1450#endif
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00001451};
1452
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01001453#if defined(CONFIG_OF)
1454static const struct of_device_id macb_dt_ids[] = {
1455 { .compatible = "cdns,at32ap7000-macb" },
1456 { .compatible = "cdns,at91sam9260-macb" },
1457 { .compatible = "cdns,macb" },
1458 { .compatible = "cdns,pc302-gem" },
1459 { .compatible = "cdns,gem" },
1460 { /* sentinel */ }
1461};
1462
1463MODULE_DEVICE_TABLE(of, macb_dt_ids);
1464
1465static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
1466{
1467 struct device_node *np = pdev->dev.of_node;
1468
1469 if (np)
1470 return of_get_phy_mode(np);
1471
1472 return -ENODEV;
1473}
1474
1475static int __devinit macb_get_hwaddr_dt(struct macb *bp)
1476{
1477 struct device_node *np = bp->pdev->dev.of_node;
1478 if (np) {
1479 const char *mac = of_get_mac_address(np);
1480 if (mac) {
1481 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1482 return 0;
1483 }
1484 }
1485
1486 return -ENODEV;
1487}
1488#else
1489static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
1490{
1491 return -ENODEV;
1492}
1493static int __devinit macb_get_hwaddr_dt(struct macb *bp)
1494{
1495 return -ENODEV;
1496}
1497#endif
1498
Haavard Skinnemoen06c3fd62008-01-31 13:10:22 +01001499static int __init macb_probe(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001500{
Jamie Iles84e0cdb2011-03-08 20:17:06 +00001501 struct macb_platform_data *pdata;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001502 struct resource *regs;
1503 struct net_device *dev;
1504 struct macb *bp;
frederic RODO6c36a702007-07-12 19:07:24 +02001505 struct phy_device *phydev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001506 u32 config;
1507 int err = -ENXIO;
Jean-Christophe PLAGNIOL-VILLARD8ef29f8a2012-10-31 06:04:59 +00001508 struct pinctrl *pinctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001509
1510 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1511 if (!regs) {
1512 dev_err(&pdev->dev, "no mmio resource defined\n");
1513 goto err_out;
1514 }
1515
Jean-Christophe PLAGNIOL-VILLARD8ef29f8a2012-10-31 06:04:59 +00001516 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1517 if (IS_ERR(pinctrl)) {
1518 err = PTR_ERR(pinctrl);
1519 if (err == -EPROBE_DEFER)
1520 goto err_out;
1521
1522 dev_warn(&pdev->dev, "No pinctrl provided\n");
1523 }
1524
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001525 err = -ENOMEM;
1526 dev = alloc_etherdev(sizeof(*bp));
Joe Perches41de8d42012-01-29 13:47:52 +00001527 if (!dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001528 goto err_out;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001529
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001530 SET_NETDEV_DEV(dev, &pdev->dev);
1531
1532 /* TODO: Actually, we have some interesting features... */
1533 dev->features |= 0;
1534
1535 bp = netdev_priv(dev);
1536 bp->pdev = pdev;
1537 bp->dev = dev;
1538
1539 spin_lock_init(&bp->lock);
Nicolas Ferree86cd532012-10-31 06:04:57 +00001540 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001541
Jamie Iles461845d2011-03-08 20:19:23 +00001542 bp->pclk = clk_get(&pdev->dev, "pclk");
Andrew Victor0cc86742007-02-07 16:40:44 +01001543 if (IS_ERR(bp->pclk)) {
1544 dev_err(&pdev->dev, "failed to get macb_clk\n");
1545 goto err_out_free_dev;
1546 }
1547 clk_enable(bp->pclk);
Jamie Iles461845d2011-03-08 20:19:23 +00001548
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001549 bp->hclk = clk_get(&pdev->dev, "hclk");
1550 if (IS_ERR(bp->hclk)) {
1551 dev_err(&pdev->dev, "failed to get hclk\n");
1552 goto err_out_put_pclk;
1553 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001554 clk_enable(bp->hclk);
1555
Joe Perches28f65c112011-06-09 09:13:32 -07001556 bp->regs = ioremap(regs->start, resource_size(regs));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001557 if (!bp->regs) {
1558 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1559 err = -ENOMEM;
1560 goto err_out_disable_clocks;
1561 }
1562
1563 dev->irq = platform_get_irq(pdev, 0);
Javier Martinez Canillasab392d22011-03-28 16:27:31 +00001564 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001565 if (err) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001566 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
1567 dev->irq, err);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001568 goto err_out_iounmap;
1569 }
1570
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00001571 dev->netdev_ops = &macb_netdev_ops;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001572 netif_napi_add(dev, &bp->napi, macb_poll, 64);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001573 dev->ethtool_ops = &macb_ethtool_ops;
1574
1575 dev->base_addr = regs->start;
1576
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001577 /* Set MII management clock divider */
Jamie Iles70c9f3d2011-03-09 16:22:54 +00001578 config = macb_mdc_clk_div(bp);
Jamie Iles757a03c2011-03-09 16:29:59 +00001579 config |= macb_dbw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001580 macb_writel(bp, NCFGR, config);
1581
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01001582 err = macb_get_hwaddr_dt(bp);
1583 if (err < 0)
1584 macb_get_hwaddr(bp);
frederic RODO6c36a702007-07-12 19:07:24 +02001585
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01001586 err = macb_get_phy_mode_dt(pdev);
1587 if (err < 0) {
1588 pdata = pdev->dev.platform_data;
1589 if (pdata && pdata->is_rmii)
1590 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
1591 else
1592 bp->phy_interface = PHY_INTERFACE_MODE_MII;
1593 } else {
1594 bp->phy_interface = err;
1595 }
1596
Patrice Vilchez140b7552012-10-31 06:04:50 +00001597 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1598 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1599 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
Andrew Victor0cc86742007-02-07 16:40:44 +01001600#if defined(CONFIG_ARCH_AT91)
Jamie Ilesf75ba502011-11-08 10:12:32 +00001601 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1602 MACB_BIT(CLKEN)));
Andrew Victor0cc86742007-02-07 16:40:44 +01001603#else
Jamie Ilesf75ba502011-11-08 10:12:32 +00001604 macb_or_gem_writel(bp, USRIO, 0);
Andrew Victor0cc86742007-02-07 16:40:44 +01001605#endif
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001606 else
Andrew Victor0cc86742007-02-07 16:40:44 +01001607#if defined(CONFIG_ARCH_AT91)
Jamie Ilesf75ba502011-11-08 10:12:32 +00001608 macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
Andrew Victor0cc86742007-02-07 16:40:44 +01001609#else
Jamie Ilesf75ba502011-11-08 10:12:32 +00001610 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
Andrew Victor0cc86742007-02-07 16:40:44 +01001611#endif
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001612
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001613 err = register_netdev(dev);
1614 if (err) {
1615 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1616 goto err_out_free_irq;
1617 }
1618
frederic RODO6c36a702007-07-12 19:07:24 +02001619 if (macb_mii_init(bp) != 0) {
1620 goto err_out_unregister_netdev;
1621 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001622
frederic RODO6c36a702007-07-12 19:07:24 +02001623 platform_set_drvdata(pdev, dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001624
Nicolas Ferre03fc4722012-07-03 23:14:13 +00001625 netif_carrier_off(dev);
1626
Jamie Ilesf75ba502011-11-08 10:12:32 +00001627 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1628 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1629 dev->irq, dev->dev_addr);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001630
frederic RODO6c36a702007-07-12 19:07:24 +02001631 phydev = bp->phy_dev;
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001632 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1633 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
frederic RODO6c36a702007-07-12 19:07:24 +02001634
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001635 return 0;
1636
frederic RODO6c36a702007-07-12 19:07:24 +02001637err_out_unregister_netdev:
1638 unregister_netdev(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001639err_out_free_irq:
1640 free_irq(dev->irq, dev);
1641err_out_iounmap:
1642 iounmap(bp->regs);
1643err_out_disable_clocks:
1644 clk_disable(bp->hclk);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001645 clk_put(bp->hclk);
Andrew Victor0cc86742007-02-07 16:40:44 +01001646 clk_disable(bp->pclk);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001647err_out_put_pclk:
1648 clk_put(bp->pclk);
1649err_out_free_dev:
1650 free_netdev(dev);
1651err_out:
1652 platform_set_drvdata(pdev, NULL);
1653 return err;
1654}
1655
Haavard Skinnemoen06c3fd62008-01-31 13:10:22 +01001656static int __exit macb_remove(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001657{
1658 struct net_device *dev;
1659 struct macb *bp;
1660
1661 dev = platform_get_drvdata(pdev);
1662
1663 if (dev) {
1664 bp = netdev_priv(dev);
Atsushi Nemoto84b79012008-04-10 23:30:07 +09001665 if (bp->phy_dev)
1666 phy_disconnect(bp->phy_dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001667 mdiobus_unregister(bp->mii_bus);
1668 kfree(bp->mii_bus->irq);
1669 mdiobus_free(bp->mii_bus);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001670 unregister_netdev(dev);
1671 free_irq(dev->irq, dev);
1672 iounmap(bp->regs);
1673 clk_disable(bp->hclk);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001674 clk_put(bp->hclk);
Andrew Victor0cc86742007-02-07 16:40:44 +01001675 clk_disable(bp->pclk);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001676 clk_put(bp->pclk);
1677 free_netdev(dev);
1678 platform_set_drvdata(pdev, NULL);
1679 }
1680
1681 return 0;
1682}
1683
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001684#ifdef CONFIG_PM
1685static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1686{
1687 struct net_device *netdev = platform_get_drvdata(pdev);
1688 struct macb *bp = netdev_priv(netdev);
1689
Nicolas Ferre03fc4722012-07-03 23:14:13 +00001690 netif_carrier_off(netdev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001691 netif_device_detach(netdev);
1692
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001693 clk_disable(bp->hclk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001694 clk_disable(bp->pclk);
1695
1696 return 0;
1697}
1698
1699static int macb_resume(struct platform_device *pdev)
1700{
1701 struct net_device *netdev = platform_get_drvdata(pdev);
1702 struct macb *bp = netdev_priv(netdev);
1703
1704 clk_enable(bp->pclk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001705 clk_enable(bp->hclk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001706
1707 netif_device_attach(netdev);
1708
1709 return 0;
1710}
1711#else
1712#define macb_suspend NULL
1713#define macb_resume NULL
1714#endif
1715
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001716static struct platform_driver macb_driver = {
Haavard Skinnemoen06c3fd62008-01-31 13:10:22 +01001717 .remove = __exit_p(macb_remove),
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01001718 .suspend = macb_suspend,
1719 .resume = macb_resume,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001720 .driver = {
1721 .name = "macb",
Kay Sievers72abb462008-04-18 13:50:44 -07001722 .owner = THIS_MODULE,
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01001723 .of_match_table = of_match_ptr(macb_dt_ids),
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001724 },
1725};
1726
1727static int __init macb_init(void)
1728{
Haavard Skinnemoen06c3fd62008-01-31 13:10:22 +01001729 return platform_driver_probe(&macb_driver, macb_probe);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001730}
1731
1732static void __exit macb_exit(void)
1733{
1734 platform_driver_unregister(&macb_driver);
1735}
1736
1737module_init(macb_init);
1738module_exit(macb_exit);
1739
1740MODULE_LICENSE("GPL");
Jamie Ilesf75ba502011-11-08 10:12:32 +00001741MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02001742MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Kay Sievers72abb462008-04-18 13:50:44 -07001743MODULE_ALIAS("platform:macb");