Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1 | /* |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 2 | * Cadence MACB/GEM Ethernet Controller driver |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 3 | * |
| 4 | * Copyright (C) 2004-2006 Atmel Corporation |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
| 10 | |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 11 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 12 | #include <linux/clk.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/moduleparam.h> |
| 15 | #include <linux/kernel.h> |
| 16 | #include <linux/types.h> |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 17 | #include <linux/circ_buf.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 18 | #include <linux/slab.h> |
| 19 | #include <linux/init.h> |
Soren Brinkmann | 60fe716 | 2013-12-10 16:07:21 -0800 | [diff] [blame] | 20 | #include <linux/io.h> |
Joachim Eastwood | 2dbfdbb | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 21 | #include <linux/gpio.h> |
Alexey Dobriyan | a6b7a40 | 2011-06-06 10:43:46 +0000 | [diff] [blame] | 22 | #include <linux/interrupt.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 23 | #include <linux/netdevice.h> |
| 24 | #include <linux/etherdevice.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 25 | #include <linux/dma-mapping.h> |
Jamie Iles | 84e0cdb | 2011-03-08 20:17:06 +0000 | [diff] [blame] | 26 | #include <linux/platform_data/macb.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 27 | #include <linux/platform_device.h> |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 28 | #include <linux/phy.h> |
Olof Johansson | b17471f | 2011-12-20 13:13:07 -0800 | [diff] [blame] | 29 | #include <linux/of.h> |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 30 | #include <linux/of_device.h> |
Boris BREZILLON | 148cbb5 | 2013-08-22 17:57:28 +0200 | [diff] [blame] | 31 | #include <linux/of_mdio.h> |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 32 | #include <linux/of_net.h> |
Jean-Christophe PLAGNIOL-VILLARD | 8ef29f8a | 2012-10-31 06:04:59 +0000 | [diff] [blame] | 33 | #include <linux/pinctrl/consumer.h> |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 34 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 35 | #include "macb.h" |
| 36 | |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 37 | #define MACB_RX_BUFFER_SIZE 128 |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 38 | #define RX_BUFFER_MULTIPLE 64 /* bytes */ |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 39 | #define RX_RING_SIZE 512 /* must be power of 2 */ |
| 40 | #define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 41 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 42 | #define TX_RING_SIZE 128 /* must be power of 2 */ |
| 43 | #define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 44 | |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 45 | /* level of occupied TX descriptors under which we wake up TX process */ |
| 46 | #define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 47 | |
| 48 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ |
| 49 | | MACB_BIT(ISR_ROVR)) |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 50 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
| 51 | | MACB_BIT(ISR_RLE) \ |
| 52 | | MACB_BIT(TXERR)) |
| 53 | #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP)) |
| 54 | |
| 55 | /* |
| 56 | * Graceful stop timeouts in us. We should allow up to |
| 57 | * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) |
| 58 | */ |
| 59 | #define MACB_HALT_TIMEOUT 1230 |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 60 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 61 | /* Ring buffer accessors */ |
| 62 | static unsigned int macb_tx_ring_wrap(unsigned int index) |
| 63 | { |
| 64 | return index & (TX_RING_SIZE - 1); |
| 65 | } |
| 66 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 67 | static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index) |
| 68 | { |
| 69 | return &bp->tx_ring[macb_tx_ring_wrap(index)]; |
| 70 | } |
| 71 | |
| 72 | static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index) |
| 73 | { |
| 74 | return &bp->tx_skb[macb_tx_ring_wrap(index)]; |
| 75 | } |
| 76 | |
| 77 | static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index) |
| 78 | { |
| 79 | dma_addr_t offset; |
| 80 | |
| 81 | offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); |
| 82 | |
| 83 | return bp->tx_ring_dma + offset; |
| 84 | } |
| 85 | |
| 86 | static unsigned int macb_rx_ring_wrap(unsigned int index) |
| 87 | { |
| 88 | return index & (RX_RING_SIZE - 1); |
| 89 | } |
| 90 | |
| 91 | static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) |
| 92 | { |
| 93 | return &bp->rx_ring[macb_rx_ring_wrap(index)]; |
| 94 | } |
| 95 | |
| 96 | static void *macb_rx_buffer(struct macb *bp, unsigned int index) |
| 97 | { |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 98 | return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 99 | } |
| 100 | |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 101 | void macb_set_hwaddr(struct macb *bp) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 102 | { |
| 103 | u32 bottom; |
| 104 | u16 top; |
| 105 | |
| 106 | bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 107 | macb_or_gem_writel(bp, SA1B, bottom); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 108 | top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 109 | macb_or_gem_writel(bp, SA1T, top); |
Joachim Eastwood | 3629a6c | 2012-11-11 13:56:28 +0000 | [diff] [blame] | 110 | |
| 111 | /* Clear unused address register sets */ |
| 112 | macb_or_gem_writel(bp, SA2B, 0); |
| 113 | macb_or_gem_writel(bp, SA2T, 0); |
| 114 | macb_or_gem_writel(bp, SA3B, 0); |
| 115 | macb_or_gem_writel(bp, SA3T, 0); |
| 116 | macb_or_gem_writel(bp, SA4B, 0); |
| 117 | macb_or_gem_writel(bp, SA4T, 0); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 118 | } |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 119 | EXPORT_SYMBOL_GPL(macb_set_hwaddr); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 120 | |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 121 | void macb_get_hwaddr(struct macb *bp) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 122 | { |
Joachim Eastwood | d25e78a | 2012-11-07 08:14:51 +0000 | [diff] [blame] | 123 | struct macb_platform_data *pdata; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 124 | u32 bottom; |
| 125 | u16 top; |
| 126 | u8 addr[6]; |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 127 | int i; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 128 | |
Jingoo Han | c607a0d | 2013-08-30 14:12:21 +0900 | [diff] [blame] | 129 | pdata = dev_get_platdata(&bp->pdev->dev); |
Joachim Eastwood | d25e78a | 2012-11-07 08:14:51 +0000 | [diff] [blame] | 130 | |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 131 | /* Check all 4 address register for vaild address */ |
| 132 | for (i = 0; i < 4; i++) { |
| 133 | bottom = macb_or_gem_readl(bp, SA1B + i * 8); |
| 134 | top = macb_or_gem_readl(bp, SA1T + i * 8); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 135 | |
Joachim Eastwood | d25e78a | 2012-11-07 08:14:51 +0000 | [diff] [blame] | 136 | if (pdata && pdata->rev_eth_addr) { |
| 137 | addr[5] = bottom & 0xff; |
| 138 | addr[4] = (bottom >> 8) & 0xff; |
| 139 | addr[3] = (bottom >> 16) & 0xff; |
| 140 | addr[2] = (bottom >> 24) & 0xff; |
| 141 | addr[1] = top & 0xff; |
| 142 | addr[0] = (top & 0xff00) >> 8; |
| 143 | } else { |
| 144 | addr[0] = bottom & 0xff; |
| 145 | addr[1] = (bottom >> 8) & 0xff; |
| 146 | addr[2] = (bottom >> 16) & 0xff; |
| 147 | addr[3] = (bottom >> 24) & 0xff; |
| 148 | addr[4] = top & 0xff; |
| 149 | addr[5] = (top >> 8) & 0xff; |
| 150 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 151 | |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 152 | if (is_valid_ether_addr(addr)) { |
| 153 | memcpy(bp->dev->dev_addr, addr, sizeof(addr)); |
| 154 | return; |
| 155 | } |
Sven Schnelle | d1d5741 | 2008-06-09 16:33:57 -0700 | [diff] [blame] | 156 | } |
Joachim Eastwood | 17b8bb3 | 2012-11-07 08:14:50 +0000 | [diff] [blame] | 157 | |
| 158 | netdev_info(bp->dev, "invalid hw address, using random\n"); |
| 159 | eth_hw_addr_random(bp->dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 160 | } |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 161 | EXPORT_SYMBOL_GPL(macb_get_hwaddr); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 162 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 163 | static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 164 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 165 | struct macb *bp = bus->priv; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 166 | int value; |
| 167 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 168 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
| 169 | | MACB_BF(RW, MACB_MAN_READ) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 170 | | MACB_BF(PHYA, mii_id) |
| 171 | | MACB_BF(REGA, regnum) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 172 | | MACB_BF(CODE, MACB_MAN_CODE))); |
| 173 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 174 | /* wait for end of transfer */ |
| 175 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
| 176 | cpu_relax(); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 177 | |
| 178 | value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 179 | |
| 180 | return value; |
| 181 | } |
| 182 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 183 | static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
| 184 | u16 value) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 185 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 186 | struct macb *bp = bus->priv; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 187 | |
| 188 | macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) |
| 189 | | MACB_BF(RW, MACB_MAN_WRITE) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 190 | | MACB_BF(PHYA, mii_id) |
| 191 | | MACB_BF(REGA, regnum) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 192 | | MACB_BF(CODE, MACB_MAN_CODE) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 193 | | MACB_BF(DATA, value))); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 194 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 195 | /* wait for end of transfer */ |
| 196 | while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) |
| 197 | cpu_relax(); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 198 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 199 | return 0; |
| 200 | } |
| 201 | |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 202 | /** |
| 203 | * macb_set_tx_clk() - Set a clock to a new frequency |
| 204 | * @clk Pointer to the clock to change |
| 205 | * @rate New frequency in Hz |
| 206 | * @dev Pointer to the struct net_device |
| 207 | */ |
| 208 | static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) |
| 209 | { |
| 210 | long ferr, rate, rate_rounded; |
| 211 | |
| 212 | switch (speed) { |
| 213 | case SPEED_10: |
| 214 | rate = 2500000; |
| 215 | break; |
| 216 | case SPEED_100: |
| 217 | rate = 25000000; |
| 218 | break; |
| 219 | case SPEED_1000: |
| 220 | rate = 125000000; |
| 221 | break; |
| 222 | default: |
Soren Brinkmann | 9319e47 | 2013-12-10 20:57:57 -0800 | [diff] [blame] | 223 | return; |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | rate_rounded = clk_round_rate(clk, rate); |
| 227 | if (rate_rounded < 0) |
| 228 | return; |
| 229 | |
| 230 | /* RGMII allows 50 ppm frequency error. Test and warn if this limit |
| 231 | * is not satisfied. |
| 232 | */ |
| 233 | ferr = abs(rate_rounded - rate); |
| 234 | ferr = DIV_ROUND_UP(ferr, rate / 100000); |
| 235 | if (ferr > 5) |
| 236 | netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", |
| 237 | rate); |
| 238 | |
| 239 | if (clk_set_rate(clk, rate_rounded)) |
| 240 | netdev_err(dev, "adjusting tx_clk failed.\n"); |
| 241 | } |
| 242 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 243 | static void macb_handle_link_change(struct net_device *dev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 244 | { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 245 | struct macb *bp = netdev_priv(dev); |
| 246 | struct phy_device *phydev = bp->phy_dev; |
| 247 | unsigned long flags; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 248 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 249 | int status_change = 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 250 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 251 | spin_lock_irqsave(&bp->lock, flags); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 252 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 253 | if (phydev->link) { |
| 254 | if ((bp->speed != phydev->speed) || |
| 255 | (bp->duplex != phydev->duplex)) { |
| 256 | u32 reg; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 257 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 258 | reg = macb_readl(bp, NCFGR); |
| 259 | reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 260 | if (macb_is_gem(bp)) |
| 261 | reg &= ~GEM_BIT(GBE); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 262 | |
| 263 | if (phydev->duplex) |
| 264 | reg |= MACB_BIT(FD); |
Atsushi Nemoto | 179956f | 2008-02-21 22:50:54 +0900 | [diff] [blame] | 265 | if (phydev->speed == SPEED_100) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 266 | reg |= MACB_BIT(SPD); |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 267 | if (phydev->speed == SPEED_1000) |
| 268 | reg |= GEM_BIT(GBE); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 269 | |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 270 | macb_or_gem_writel(bp, NCFGR, reg); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 271 | |
| 272 | bp->speed = phydev->speed; |
| 273 | bp->duplex = phydev->duplex; |
| 274 | status_change = 1; |
| 275 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 276 | } |
| 277 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 278 | if (phydev->link != bp->link) { |
Anton Vorontsov | c8f1568 | 2008-07-22 15:41:24 -0700 | [diff] [blame] | 279 | if (!phydev->link) { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 280 | bp->speed = 0; |
| 281 | bp->duplex = -1; |
| 282 | } |
| 283 | bp->link = phydev->link; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 284 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 285 | status_change = 1; |
| 286 | } |
| 287 | |
| 288 | spin_unlock_irqrestore(&bp->lock, flags); |
| 289 | |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 290 | if (!IS_ERR(bp->tx_clk)) |
| 291 | macb_set_tx_clk(bp->tx_clk, phydev->speed, dev); |
| 292 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 293 | if (status_change) { |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 294 | if (phydev->link) { |
| 295 | netif_carrier_on(dev); |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 296 | netdev_info(dev, "link up (%d/%s)\n", |
| 297 | phydev->speed, |
| 298 | phydev->duplex == DUPLEX_FULL ? |
| 299 | "Full" : "Half"); |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 300 | } else { |
| 301 | netif_carrier_off(dev); |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 302 | netdev_info(dev, "link down\n"); |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 303 | } |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 304 | } |
| 305 | } |
| 306 | |
| 307 | /* based on au1000_eth. c*/ |
| 308 | static int macb_mii_probe(struct net_device *dev) |
| 309 | { |
| 310 | struct macb *bp = netdev_priv(dev); |
Joachim Eastwood | 2dbfdbb | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 311 | struct macb_platform_data *pdata; |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 312 | struct phy_device *phydev; |
Joachim Eastwood | 2dbfdbb | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 313 | int phy_irq; |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 314 | int ret; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 315 | |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 316 | phydev = phy_find_first(bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 317 | if (!phydev) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 318 | netdev_err(dev, "no PHY found\n"); |
Boris BREZILLON | 7daa78e | 2013-08-27 14:36:14 +0200 | [diff] [blame] | 319 | return -ENXIO; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 320 | } |
| 321 | |
Joachim Eastwood | 2dbfdbb | 2012-11-11 13:56:27 +0000 | [diff] [blame] | 322 | pdata = dev_get_platdata(&bp->pdev->dev); |
| 323 | if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { |
| 324 | ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); |
| 325 | if (!ret) { |
| 326 | phy_irq = gpio_to_irq(pdata->phy_irq_pin); |
| 327 | phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; |
| 328 | } |
| 329 | } |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 330 | |
| 331 | /* attach the mac to the phy */ |
Florian Fainelli | f9a8f83 | 2013-01-14 00:52:52 +0000 | [diff] [blame] | 332 | ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 333 | bp->phy_interface); |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 334 | if (ret) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 335 | netdev_err(dev, "Could not attach to PHY\n"); |
Jiri Pirko | 7455a76 | 2010-02-08 05:12:08 +0000 | [diff] [blame] | 336 | return ret; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 337 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 338 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 339 | /* mask with MAC supported features */ |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 340 | if (macb_is_gem(bp)) |
| 341 | phydev->supported &= PHY_GBIT_FEATURES; |
| 342 | else |
| 343 | phydev->supported &= PHY_BASIC_FEATURES; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 344 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 345 | phydev->advertising = phydev->supported; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 346 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 347 | bp->link = 0; |
| 348 | bp->speed = 0; |
| 349 | bp->duplex = -1; |
| 350 | bp->phy_dev = phydev; |
| 351 | |
| 352 | return 0; |
| 353 | } |
| 354 | |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 355 | int macb_mii_init(struct macb *bp) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 356 | { |
Jamie Iles | 84e0cdb | 2011-03-08 20:17:06 +0000 | [diff] [blame] | 357 | struct macb_platform_data *pdata; |
Boris BREZILLON | 148cbb5 | 2013-08-22 17:57:28 +0200 | [diff] [blame] | 358 | struct device_node *np; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 359 | int err = -ENXIO, i; |
| 360 | |
Uwe Kleine-Koenig | 3dbda77 | 2009-07-23 08:31:31 +0200 | [diff] [blame] | 361 | /* Enable management port */ |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 362 | macb_writel(bp, NCR, MACB_BIT(MPE)); |
| 363 | |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 364 | bp->mii_bus = mdiobus_alloc(); |
| 365 | if (bp->mii_bus == NULL) { |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 366 | err = -ENOMEM; |
| 367 | goto err_out; |
| 368 | } |
| 369 | |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 370 | bp->mii_bus->name = "MACB_mii_bus"; |
| 371 | bp->mii_bus->read = &macb_mdio_read; |
| 372 | bp->mii_bus->write = &macb_mdio_write; |
Florian Fainelli | 98d5e57 | 2012-01-09 23:59:11 +0000 | [diff] [blame] | 373 | snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", |
| 374 | bp->pdev->name, bp->pdev->id); |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 375 | bp->mii_bus->priv = bp; |
| 376 | bp->mii_bus->parent = &bp->dev->dev; |
Jingoo Han | c607a0d | 2013-08-30 14:12:21 +0900 | [diff] [blame] | 377 | pdata = dev_get_platdata(&bp->pdev->dev); |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 378 | |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 379 | bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); |
| 380 | if (!bp->mii_bus->irq) { |
| 381 | err = -ENOMEM; |
| 382 | goto err_out_free_mdiobus; |
| 383 | } |
| 384 | |
Jamie Iles | 9152394 | 2011-02-28 04:05:25 +0000 | [diff] [blame] | 385 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 386 | |
Boris BREZILLON | 148cbb5 | 2013-08-22 17:57:28 +0200 | [diff] [blame] | 387 | np = bp->pdev->dev.of_node; |
| 388 | if (np) { |
| 389 | /* try dt phy registration */ |
| 390 | err = of_mdiobus_register(bp->mii_bus, np); |
| 391 | |
| 392 | /* fallback to standard phy registration if no phy were |
| 393 | found during dt phy registration */ |
| 394 | if (!err && !phy_find_first(bp->mii_bus)) { |
| 395 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
| 396 | struct phy_device *phydev; |
| 397 | |
| 398 | phydev = mdiobus_scan(bp->mii_bus, i); |
| 399 | if (IS_ERR(phydev)) { |
| 400 | err = PTR_ERR(phydev); |
| 401 | break; |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | if (err) |
| 406 | goto err_out_unregister_bus; |
| 407 | } |
| 408 | } else { |
| 409 | for (i = 0; i < PHY_MAX_ADDR; i++) |
| 410 | bp->mii_bus->irq[i] = PHY_POLL; |
| 411 | |
| 412 | if (pdata) |
| 413 | bp->mii_bus->phy_mask = pdata->phy_mask; |
| 414 | |
| 415 | err = mdiobus_register(bp->mii_bus); |
| 416 | } |
| 417 | |
| 418 | if (err) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 419 | goto err_out_free_mdio_irq; |
| 420 | |
Boris BREZILLON | 7daa78e | 2013-08-27 14:36:14 +0200 | [diff] [blame] | 421 | err = macb_mii_probe(bp->dev); |
| 422 | if (err) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 423 | goto err_out_unregister_bus; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 424 | |
| 425 | return 0; |
| 426 | |
| 427 | err_out_unregister_bus: |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 428 | mdiobus_unregister(bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 429 | err_out_free_mdio_irq: |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 430 | kfree(bp->mii_bus->irq); |
| 431 | err_out_free_mdiobus: |
| 432 | mdiobus_free(bp->mii_bus); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 433 | err_out: |
| 434 | return err; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 435 | } |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 436 | EXPORT_SYMBOL_GPL(macb_mii_init); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 437 | |
| 438 | static void macb_update_stats(struct macb *bp) |
| 439 | { |
| 440 | u32 __iomem *reg = bp->regs + MACB_PFR; |
Jamie Iles | a494ed8 | 2011-03-09 16:26:35 +0000 | [diff] [blame] | 441 | u32 *p = &bp->hw_stats.macb.rx_pause_frames; |
| 442 | u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 443 | |
| 444 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
| 445 | |
| 446 | for(; p < end; p++, reg++) |
Haavard Skinnemoen | 0f0d84e | 2006-12-08 14:38:30 +0100 | [diff] [blame] | 447 | *p += __raw_readl(reg); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 448 | } |
| 449 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 450 | static int macb_halt_tx(struct macb *bp) |
| 451 | { |
| 452 | unsigned long halt_time, timeout; |
| 453 | u32 status; |
| 454 | |
| 455 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); |
| 456 | |
| 457 | timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); |
| 458 | do { |
| 459 | halt_time = jiffies; |
| 460 | status = macb_readl(bp, TSR); |
| 461 | if (!(status & MACB_BIT(TGO))) |
| 462 | return 0; |
| 463 | |
| 464 | usleep_range(10, 250); |
| 465 | } while (time_before(halt_time, timeout)); |
| 466 | |
| 467 | return -ETIMEDOUT; |
| 468 | } |
| 469 | |
| 470 | static void macb_tx_error_task(struct work_struct *work) |
| 471 | { |
| 472 | struct macb *bp = container_of(work, struct macb, tx_error_task); |
| 473 | struct macb_tx_skb *tx_skb; |
| 474 | struct sk_buff *skb; |
| 475 | unsigned int tail; |
| 476 | |
| 477 | netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n", |
| 478 | bp->tx_tail, bp->tx_head); |
| 479 | |
| 480 | /* Make sure nobody is trying to queue up new packets */ |
| 481 | netif_stop_queue(bp->dev); |
| 482 | |
| 483 | /* |
| 484 | * Stop transmission now |
| 485 | * (in case we have just queued new packets) |
| 486 | */ |
| 487 | if (macb_halt_tx(bp)) |
| 488 | /* Just complain for now, reinitializing TX path can be good */ |
| 489 | netdev_err(bp->dev, "BUG: halt tx timed out\n"); |
| 490 | |
| 491 | /* No need for the lock here as nobody will interrupt us anymore */ |
| 492 | |
| 493 | /* |
| 494 | * Treat frames in TX queue including the ones that caused the error. |
| 495 | * Free transmit buffers in upper layer. |
| 496 | */ |
| 497 | for (tail = bp->tx_tail; tail != bp->tx_head; tail++) { |
| 498 | struct macb_dma_desc *desc; |
| 499 | u32 ctrl; |
| 500 | |
| 501 | desc = macb_tx_desc(bp, tail); |
| 502 | ctrl = desc->ctrl; |
| 503 | tx_skb = macb_tx_skb(bp, tail); |
| 504 | skb = tx_skb->skb; |
| 505 | |
| 506 | if (ctrl & MACB_BIT(TX_USED)) { |
| 507 | netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", |
| 508 | macb_tx_ring_wrap(tail), skb->data); |
| 509 | bp->stats.tx_packets++; |
| 510 | bp->stats.tx_bytes += skb->len; |
| 511 | } else { |
| 512 | /* |
| 513 | * "Buffers exhausted mid-frame" errors may only happen |
| 514 | * if the driver is buggy, so complain loudly about those. |
| 515 | * Statistics are updated by hardware. |
| 516 | */ |
| 517 | if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) |
| 518 | netdev_err(bp->dev, |
| 519 | "BUG: TX buffers exhausted mid-frame\n"); |
| 520 | |
| 521 | desc->ctrl = ctrl | MACB_BIT(TX_USED); |
| 522 | } |
| 523 | |
| 524 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, |
| 525 | DMA_TO_DEVICE); |
| 526 | tx_skb->skb = NULL; |
| 527 | dev_kfree_skb(skb); |
| 528 | } |
| 529 | |
| 530 | /* Make descriptor updates visible to hardware */ |
| 531 | wmb(); |
| 532 | |
| 533 | /* Reinitialize the TX desc queue */ |
| 534 | macb_writel(bp, TBQP, bp->tx_ring_dma); |
| 535 | /* Make TX ring reflect state of hardware */ |
| 536 | bp->tx_head = bp->tx_tail = 0; |
| 537 | |
| 538 | /* Now we are ready to start transmission again */ |
| 539 | netif_wake_queue(bp->dev); |
| 540 | |
| 541 | /* Housework before enabling TX IRQ */ |
| 542 | macb_writel(bp, TSR, macb_readl(bp, TSR)); |
| 543 | macb_writel(bp, IER, MACB_TX_INT_FLAGS); |
| 544 | } |
| 545 | |
| 546 | static void macb_tx_interrupt(struct macb *bp) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 547 | { |
| 548 | unsigned int tail; |
| 549 | unsigned int head; |
| 550 | u32 status; |
| 551 | |
| 552 | status = macb_readl(bp, TSR); |
| 553 | macb_writel(bp, TSR, status); |
| 554 | |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 555 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 556 | macb_writel(bp, ISR, MACB_BIT(TCOMP)); |
Steffen Trumtrar | 749a2b6 | 2013-03-27 23:07:05 +0000 | [diff] [blame] | 557 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 558 | netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", |
| 559 | (unsigned long)status); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 560 | |
| 561 | head = bp->tx_head; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 562 | for (tail = bp->tx_tail; tail != head; tail++) { |
| 563 | struct macb_tx_skb *tx_skb; |
| 564 | struct sk_buff *skb; |
| 565 | struct macb_dma_desc *desc; |
| 566 | u32 ctrl; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 567 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 568 | desc = macb_tx_desc(bp, tail); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 569 | |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 570 | /* Make hw descriptor updates visible to CPU */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 571 | rmb(); |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 572 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 573 | ctrl = desc->ctrl; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 574 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 575 | if (!(ctrl & MACB_BIT(TX_USED))) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 576 | break; |
| 577 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 578 | tx_skb = macb_tx_skb(bp, tail); |
| 579 | skb = tx_skb->skb; |
| 580 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 581 | netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 582 | macb_tx_ring_wrap(tail), skb->data); |
| 583 | dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 584 | DMA_TO_DEVICE); |
| 585 | bp->stats.tx_packets++; |
| 586 | bp->stats.tx_bytes += skb->len; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 587 | tx_skb->skb = NULL; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 588 | dev_kfree_skb_irq(skb); |
| 589 | } |
| 590 | |
| 591 | bp->tx_tail = tail; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 592 | if (netif_queue_stopped(bp->dev) |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 593 | && CIRC_CNT(bp->tx_head, bp->tx_tail, |
| 594 | TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 595 | netif_wake_queue(bp->dev); |
| 596 | } |
| 597 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 598 | static void gem_rx_refill(struct macb *bp) |
| 599 | { |
| 600 | unsigned int entry; |
| 601 | struct sk_buff *skb; |
| 602 | struct macb_dma_desc *desc; |
| 603 | dma_addr_t paddr; |
| 604 | |
| 605 | while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { |
| 606 | u32 addr, ctrl; |
| 607 | |
| 608 | entry = macb_rx_ring_wrap(bp->rx_prepared_head); |
| 609 | desc = &bp->rx_ring[entry]; |
| 610 | |
| 611 | /* Make hw descriptor updates visible to CPU */ |
| 612 | rmb(); |
| 613 | |
| 614 | addr = desc->addr; |
| 615 | ctrl = desc->ctrl; |
| 616 | bp->rx_prepared_head++; |
| 617 | |
| 618 | if ((addr & MACB_BIT(RX_USED))) |
| 619 | continue; |
| 620 | |
| 621 | if (bp->rx_skbuff[entry] == NULL) { |
| 622 | /* allocate sk_buff for this free entry in ring */ |
| 623 | skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); |
| 624 | if (unlikely(skb == NULL)) { |
| 625 | netdev_err(bp->dev, |
| 626 | "Unable to allocate sk_buff\n"); |
| 627 | break; |
| 628 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 629 | |
| 630 | /* now fill corresponding descriptor entry */ |
| 631 | paddr = dma_map_single(&bp->pdev->dev, skb->data, |
| 632 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
Soren Brinkmann | 9203090 | 2014-03-04 08:46:39 -0800 | [diff] [blame] | 633 | if (dma_mapping_error(&bp->pdev->dev, paddr)) { |
| 634 | dev_kfree_skb(skb); |
| 635 | break; |
| 636 | } |
| 637 | |
| 638 | bp->rx_skbuff[entry] = skb; |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 639 | |
| 640 | if (entry == RX_RING_SIZE - 1) |
| 641 | paddr |= MACB_BIT(RX_WRAP); |
| 642 | bp->rx_ring[entry].addr = paddr; |
| 643 | bp->rx_ring[entry].ctrl = 0; |
| 644 | |
| 645 | /* properly align Ethernet header */ |
| 646 | skb_reserve(skb, NET_IP_ALIGN); |
| 647 | } |
| 648 | } |
| 649 | |
| 650 | /* Make descriptor updates visible to hardware */ |
| 651 | wmb(); |
| 652 | |
| 653 | netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", |
| 654 | bp->rx_prepared_head, bp->rx_tail); |
| 655 | } |
| 656 | |
| 657 | /* Mark DMA descriptors from begin up to and not including end as unused */ |
| 658 | static void discard_partial_frame(struct macb *bp, unsigned int begin, |
| 659 | unsigned int end) |
| 660 | { |
| 661 | unsigned int frag; |
| 662 | |
| 663 | for (frag = begin; frag != end; frag++) { |
| 664 | struct macb_dma_desc *desc = macb_rx_desc(bp, frag); |
| 665 | desc->addr &= ~MACB_BIT(RX_USED); |
| 666 | } |
| 667 | |
| 668 | /* Make descriptor updates visible to hardware */ |
| 669 | wmb(); |
| 670 | |
| 671 | /* |
| 672 | * When this happens, the hardware stats registers for |
| 673 | * whatever caused this is updated, so we don't have to record |
| 674 | * anything. |
| 675 | */ |
| 676 | } |
| 677 | |
| 678 | static int gem_rx(struct macb *bp, int budget) |
| 679 | { |
| 680 | unsigned int len; |
| 681 | unsigned int entry; |
| 682 | struct sk_buff *skb; |
| 683 | struct macb_dma_desc *desc; |
| 684 | int count = 0; |
| 685 | |
| 686 | while (count < budget) { |
| 687 | u32 addr, ctrl; |
| 688 | |
| 689 | entry = macb_rx_ring_wrap(bp->rx_tail); |
| 690 | desc = &bp->rx_ring[entry]; |
| 691 | |
| 692 | /* Make hw descriptor updates visible to CPU */ |
| 693 | rmb(); |
| 694 | |
| 695 | addr = desc->addr; |
| 696 | ctrl = desc->ctrl; |
| 697 | |
| 698 | if (!(addr & MACB_BIT(RX_USED))) |
| 699 | break; |
| 700 | |
| 701 | desc->addr &= ~MACB_BIT(RX_USED); |
| 702 | bp->rx_tail++; |
| 703 | count++; |
| 704 | |
| 705 | if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { |
| 706 | netdev_err(bp->dev, |
| 707 | "not whole frame pointed by descriptor\n"); |
| 708 | bp->stats.rx_dropped++; |
| 709 | break; |
| 710 | } |
| 711 | skb = bp->rx_skbuff[entry]; |
| 712 | if (unlikely(!skb)) { |
| 713 | netdev_err(bp->dev, |
| 714 | "inconsistent Rx descriptor chain\n"); |
| 715 | bp->stats.rx_dropped++; |
| 716 | break; |
| 717 | } |
| 718 | /* now everything is ready for receiving packet */ |
| 719 | bp->rx_skbuff[entry] = NULL; |
| 720 | len = MACB_BFEXT(RX_FRMLEN, ctrl); |
| 721 | |
| 722 | netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); |
| 723 | |
| 724 | skb_put(skb, len); |
| 725 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); |
| 726 | dma_unmap_single(&bp->pdev->dev, addr, |
Soren Brinkmann | 48330e08 | 2014-03-04 08:46:40 -0800 | [diff] [blame] | 727 | bp->rx_buffer_size, DMA_FROM_DEVICE); |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 728 | |
| 729 | skb->protocol = eth_type_trans(skb, bp->dev); |
| 730 | skb_checksum_none_assert(skb); |
| 731 | |
| 732 | bp->stats.rx_packets++; |
| 733 | bp->stats.rx_bytes += skb->len; |
| 734 | |
| 735 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
| 736 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
| 737 | skb->len, skb->csum); |
| 738 | print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1, |
| 739 | skb->mac_header, 16, true); |
| 740 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1, |
| 741 | skb->data, 32, true); |
| 742 | #endif |
| 743 | |
| 744 | netif_receive_skb(skb); |
| 745 | } |
| 746 | |
| 747 | gem_rx_refill(bp); |
| 748 | |
| 749 | return count; |
| 750 | } |
| 751 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 752 | static int macb_rx_frame(struct macb *bp, unsigned int first_frag, |
| 753 | unsigned int last_frag) |
| 754 | { |
| 755 | unsigned int len; |
| 756 | unsigned int frag; |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 757 | unsigned int offset; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 758 | struct sk_buff *skb; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 759 | struct macb_dma_desc *desc; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 760 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 761 | desc = macb_rx_desc(bp, last_frag); |
| 762 | len = MACB_BFEXT(RX_FRMLEN, desc->ctrl); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 763 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 764 | netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 765 | macb_rx_ring_wrap(first_frag), |
| 766 | macb_rx_ring_wrap(last_frag), len); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 767 | |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 768 | /* |
| 769 | * The ethernet header starts NET_IP_ALIGN bytes into the |
| 770 | * first buffer. Since the header is 14 bytes, this makes the |
| 771 | * payload word-aligned. |
| 772 | * |
| 773 | * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy |
| 774 | * the two padding bytes into the skb so that we avoid hitting |
| 775 | * the slowpath in memcpy(), and pull them off afterwards. |
| 776 | */ |
| 777 | skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 778 | if (!skb) { |
| 779 | bp->stats.rx_dropped++; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 780 | for (frag = first_frag; ; frag++) { |
| 781 | desc = macb_rx_desc(bp, frag); |
| 782 | desc->addr &= ~MACB_BIT(RX_USED); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 783 | if (frag == last_frag) |
| 784 | break; |
| 785 | } |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 786 | |
| 787 | /* Make descriptor updates visible to hardware */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 788 | wmb(); |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 789 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 790 | return 1; |
| 791 | } |
| 792 | |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 793 | offset = 0; |
| 794 | len += NET_IP_ALIGN; |
Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 795 | skb_checksum_none_assert(skb); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 796 | skb_put(skb, len); |
| 797 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 798 | for (frag = first_frag; ; frag++) { |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 799 | unsigned int frag_len = bp->rx_buffer_size; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 800 | |
| 801 | if (offset + frag_len > len) { |
| 802 | BUG_ON(frag != last_frag); |
| 803 | frag_len = len - offset; |
| 804 | } |
Arnaldo Carvalho de Melo | 27d7ff4 | 2007-03-31 11:55:19 -0300 | [diff] [blame] | 805 | skb_copy_to_linear_data_offset(skb, offset, |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 806 | macb_rx_buffer(bp, frag), frag_len); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 807 | offset += bp->rx_buffer_size; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 808 | desc = macb_rx_desc(bp, frag); |
| 809 | desc->addr &= ~MACB_BIT(RX_USED); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 810 | |
| 811 | if (frag == last_frag) |
| 812 | break; |
| 813 | } |
| 814 | |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 815 | /* Make descriptor updates visible to hardware */ |
| 816 | wmb(); |
| 817 | |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 818 | __skb_pull(skb, NET_IP_ALIGN); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 819 | skb->protocol = eth_type_trans(skb, bp->dev); |
| 820 | |
| 821 | bp->stats.rx_packets++; |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 822 | bp->stats.rx_bytes += skb->len; |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 823 | netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 824 | skb->len, skb->csum); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 825 | netif_receive_skb(skb); |
| 826 | |
| 827 | return 0; |
| 828 | } |
| 829 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 830 | static int macb_rx(struct macb *bp, int budget) |
| 831 | { |
| 832 | int received = 0; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 833 | unsigned int tail; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 834 | int first_frag = -1; |
| 835 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 836 | for (tail = bp->rx_tail; budget > 0; tail++) { |
| 837 | struct macb_dma_desc *desc = macb_rx_desc(bp, tail); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 838 | u32 addr, ctrl; |
| 839 | |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 840 | /* Make hw descriptor updates visible to CPU */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 841 | rmb(); |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 842 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 843 | addr = desc->addr; |
| 844 | ctrl = desc->ctrl; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 845 | |
| 846 | if (!(addr & MACB_BIT(RX_USED))) |
| 847 | break; |
| 848 | |
| 849 | if (ctrl & MACB_BIT(RX_SOF)) { |
| 850 | if (first_frag != -1) |
| 851 | discard_partial_frame(bp, first_frag, tail); |
| 852 | first_frag = tail; |
| 853 | } |
| 854 | |
| 855 | if (ctrl & MACB_BIT(RX_EOF)) { |
| 856 | int dropped; |
| 857 | BUG_ON(first_frag == -1); |
| 858 | |
| 859 | dropped = macb_rx_frame(bp, first_frag, tail); |
| 860 | first_frag = -1; |
| 861 | if (!dropped) { |
| 862 | received++; |
| 863 | budget--; |
| 864 | } |
| 865 | } |
| 866 | } |
| 867 | |
| 868 | if (first_frag != -1) |
| 869 | bp->rx_tail = first_frag; |
| 870 | else |
| 871 | bp->rx_tail = tail; |
| 872 | |
| 873 | return received; |
| 874 | } |
| 875 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 876 | static int macb_poll(struct napi_struct *napi, int budget) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 877 | { |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 878 | struct macb *bp = container_of(napi, struct macb, napi); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 879 | int work_done; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 880 | u32 status; |
| 881 | |
| 882 | status = macb_readl(bp, RSR); |
| 883 | macb_writel(bp, RSR, status); |
| 884 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 885 | work_done = 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 886 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 887 | netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 888 | (unsigned long)status, budget); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 889 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 890 | work_done = bp->macbgem_ops.mog_rx(bp, budget); |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 891 | if (work_done < budget) { |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 892 | napi_complete(napi); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 893 | |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 894 | /* |
| 895 | * We've done what we can to clean the buffers. Make sure we |
| 896 | * get notified when new packets arrive. |
| 897 | */ |
| 898 | macb_writel(bp, IER, MACB_RX_INT_FLAGS); |
Nicolas Ferre | 8770e91 | 2013-02-12 11:08:48 +0100 | [diff] [blame] | 899 | |
| 900 | /* Packets received while interrupts were disabled */ |
| 901 | status = macb_readl(bp, RSR); |
| 902 | if (unlikely(status)) |
| 903 | napi_reschedule(napi); |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 904 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 905 | |
| 906 | /* TODO: Handle errors */ |
| 907 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 908 | return work_done; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 909 | } |
| 910 | |
| 911 | static irqreturn_t macb_interrupt(int irq, void *dev_id) |
| 912 | { |
| 913 | struct net_device *dev = dev_id; |
| 914 | struct macb *bp = netdev_priv(dev); |
| 915 | u32 status; |
| 916 | |
| 917 | status = macb_readl(bp, ISR); |
| 918 | |
| 919 | if (unlikely(!status)) |
| 920 | return IRQ_NONE; |
| 921 | |
| 922 | spin_lock(&bp->lock); |
| 923 | |
| 924 | while (status) { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 925 | /* close possible race with dev_close */ |
| 926 | if (unlikely(!netif_running(dev))) { |
Joachim Eastwood | 95ebcea | 2012-10-22 08:45:31 +0000 | [diff] [blame] | 927 | macb_writel(bp, IDR, -1); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 928 | break; |
| 929 | } |
| 930 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 931 | netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status); |
| 932 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 933 | if (status & MACB_RX_INT_FLAGS) { |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 934 | /* |
| 935 | * There's no point taking any more interrupts |
| 936 | * until we have processed the buffers. The |
| 937 | * scheduling call may fail if the poll routine |
| 938 | * is already scheduled, so disable interrupts |
| 939 | * now. |
| 940 | */ |
| 941 | macb_writel(bp, IDR, MACB_RX_INT_FLAGS); |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 942 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 943 | macb_writel(bp, ISR, MACB_BIT(RCOMP)); |
Joshua Hoke | b336369 | 2010-10-25 01:44:22 +0000 | [diff] [blame] | 944 | |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 945 | if (napi_schedule_prep(&bp->napi)) { |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 946 | netdev_vdbg(bp->dev, "scheduling RX softirq\n"); |
Ben Hutchings | 288379f | 2009-01-19 16:43:59 -0800 | [diff] [blame] | 947 | __napi_schedule(&bp->napi); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 948 | } |
| 949 | } |
| 950 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 951 | if (unlikely(status & (MACB_TX_ERR_FLAGS))) { |
| 952 | macb_writel(bp, IDR, MACB_TX_INT_FLAGS); |
| 953 | schedule_work(&bp->tx_error_task); |
Soren Brinkmann | 6a027b7 | 2014-05-04 15:42:59 -0700 | [diff] [blame^] | 954 | |
| 955 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 956 | macb_writel(bp, ISR, MACB_TX_ERR_FLAGS); |
| 957 | |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 958 | break; |
| 959 | } |
| 960 | |
| 961 | if (status & MACB_BIT(TCOMP)) |
| 962 | macb_tx_interrupt(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 963 | |
| 964 | /* |
| 965 | * Link change detection isn't possible with RMII, so we'll |
| 966 | * add that if/when we get our hands on a full-blown MII PHY. |
| 967 | */ |
| 968 | |
Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 969 | if (status & MACB_BIT(ISR_ROVR)) { |
| 970 | /* We missed at least one packet */ |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 971 | if (macb_is_gem(bp)) |
| 972 | bp->hw_stats.gem.rx_overruns++; |
| 973 | else |
| 974 | bp->hw_stats.macb.rx_overruns++; |
Soren Brinkmann | 6a027b7 | 2014-05-04 15:42:59 -0700 | [diff] [blame^] | 975 | |
| 976 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 977 | macb_writel(bp, ISR, MACB_BIT(ISR_ROVR)); |
Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 978 | } |
| 979 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 980 | if (status & MACB_BIT(HRESP)) { |
| 981 | /* |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 982 | * TODO: Reset the hardware, and maybe move the |
| 983 | * netdev_err to a lower-priority context as well |
| 984 | * (work queue?) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 985 | */ |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 986 | netdev_err(dev, "DMA bus error: HRESP not OK\n"); |
Soren Brinkmann | 6a027b7 | 2014-05-04 15:42:59 -0700 | [diff] [blame^] | 987 | |
| 988 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
| 989 | macb_writel(bp, ISR, MACB_BIT(HRESP)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 990 | } |
| 991 | |
| 992 | status = macb_readl(bp, ISR); |
| 993 | } |
| 994 | |
| 995 | spin_unlock(&bp->lock); |
| 996 | |
| 997 | return IRQ_HANDLED; |
| 998 | } |
| 999 | |
Thomas Petazzoni | 6e8cf5c | 2009-05-04 11:08:41 -0700 | [diff] [blame] | 1000 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1001 | /* |
| 1002 | * Polling receive - used by netconsole and other diagnostic tools |
| 1003 | * to allow network i/o with interrupts disabled. |
| 1004 | */ |
| 1005 | static void macb_poll_controller(struct net_device *dev) |
| 1006 | { |
| 1007 | unsigned long flags; |
| 1008 | |
| 1009 | local_irq_save(flags); |
| 1010 | macb_interrupt(dev->irq, dev); |
| 1011 | local_irq_restore(flags); |
| 1012 | } |
| 1013 | #endif |
| 1014 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1015 | static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| 1016 | { |
| 1017 | struct macb *bp = netdev_priv(dev); |
| 1018 | dma_addr_t mapping; |
| 1019 | unsigned int len, entry; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1020 | struct macb_dma_desc *desc; |
| 1021 | struct macb_tx_skb *tx_skb; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1022 | u32 ctrl; |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 1023 | unsigned long flags; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1024 | |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 1025 | #if defined(DEBUG) && defined(VERBOSE_DEBUG) |
| 1026 | netdev_vdbg(bp->dev, |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1027 | "start_xmit: len %u head %p data %p tail %p end %p\n", |
| 1028 | skb->len, skb->head, skb->data, |
| 1029 | skb_tail_pointer(skb), skb_end_pointer(skb)); |
| 1030 | print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, |
| 1031 | skb->data, 16, true); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1032 | #endif |
| 1033 | |
| 1034 | len = skb->len; |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 1035 | spin_lock_irqsave(&bp->lock, flags); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1036 | |
| 1037 | /* This is a hard error, log it. */ |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 1038 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1039 | netif_stop_queue(dev); |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 1040 | spin_unlock_irqrestore(&bp->lock, flags); |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1041 | netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); |
| 1042 | netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", |
| 1043 | bp->tx_head, bp->tx_tail); |
Patrick McHardy | 5b54814 | 2009-06-12 06:22:29 +0000 | [diff] [blame] | 1044 | return NETDEV_TX_BUSY; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1045 | } |
| 1046 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1047 | entry = macb_tx_ring_wrap(bp->tx_head); |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 1048 | netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1049 | mapping = dma_map_single(&bp->pdev->dev, skb->data, |
| 1050 | len, DMA_TO_DEVICE); |
Soren Brinkmann | 9203090 | 2014-03-04 08:46:39 -0800 | [diff] [blame] | 1051 | if (dma_mapping_error(&bp->pdev->dev, mapping)) { |
Eric W. Biederman | c88b5b6 | 2014-03-15 16:08:27 -0700 | [diff] [blame] | 1052 | dev_kfree_skb_any(skb); |
Soren Brinkmann | 9203090 | 2014-03-04 08:46:39 -0800 | [diff] [blame] | 1053 | goto unlock; |
| 1054 | } |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1055 | |
Soren Brinkmann | 9203090 | 2014-03-04 08:46:39 -0800 | [diff] [blame] | 1056 | bp->tx_head++; |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1057 | tx_skb = &bp->tx_skb[entry]; |
| 1058 | tx_skb->skb = skb; |
| 1059 | tx_skb->mapping = mapping; |
Havard Skinnemoen | a268adb | 2012-10-31 06:04:52 +0000 | [diff] [blame] | 1060 | netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1061 | skb->data, (unsigned long)mapping); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1062 | |
| 1063 | ctrl = MACB_BF(TX_FRMLEN, len); |
| 1064 | ctrl |= MACB_BIT(TX_LAST); |
| 1065 | if (entry == (TX_RING_SIZE - 1)) |
| 1066 | ctrl |= MACB_BIT(TX_WRAP); |
| 1067 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1068 | desc = &bp->tx_ring[entry]; |
| 1069 | desc->addr = mapping; |
| 1070 | desc->ctrl = ctrl; |
Havard Skinnemoen | 03dbe05 | 2012-10-31 06:04:51 +0000 | [diff] [blame] | 1071 | |
| 1072 | /* Make newly initialized descriptor visible to hardware */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1073 | wmb(); |
| 1074 | |
Richard Cochran | e072092 | 2011-06-19 21:51:28 +0000 | [diff] [blame] | 1075 | skb_tx_timestamp(skb); |
| 1076 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1077 | macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); |
| 1078 | |
Nicolas Ferre | 909a858 | 2012-11-19 06:00:21 +0000 | [diff] [blame] | 1079 | if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1080 | netif_stop_queue(dev); |
| 1081 | |
Soren Brinkmann | 9203090 | 2014-03-04 08:46:39 -0800 | [diff] [blame] | 1082 | unlock: |
Dongdong Deng | 4871953 | 2009-08-23 19:49:07 -0700 | [diff] [blame] | 1083 | spin_unlock_irqrestore(&bp->lock, flags); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1084 | |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 1085 | return NETDEV_TX_OK; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1086 | } |
| 1087 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1088 | static void macb_init_rx_buffer_size(struct macb *bp, size_t size) |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1089 | { |
| 1090 | if (!macb_is_gem(bp)) { |
| 1091 | bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; |
| 1092 | } else { |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1093 | bp->rx_buffer_size = size; |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1094 | |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1095 | if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1096 | netdev_dbg(bp->dev, |
| 1097 | "RX buffer must be multiple of %d bytes, expanding\n", |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1098 | RX_BUFFER_MULTIPLE); |
| 1099 | bp->rx_buffer_size = |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1100 | roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1101 | } |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1102 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1103 | |
| 1104 | netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n", |
| 1105 | bp->dev->mtu, bp->rx_buffer_size); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1106 | } |
| 1107 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1108 | static void gem_free_rx_buffers(struct macb *bp) |
| 1109 | { |
| 1110 | struct sk_buff *skb; |
| 1111 | struct macb_dma_desc *desc; |
| 1112 | dma_addr_t addr; |
| 1113 | int i; |
| 1114 | |
| 1115 | if (!bp->rx_skbuff) |
| 1116 | return; |
| 1117 | |
| 1118 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 1119 | skb = bp->rx_skbuff[i]; |
| 1120 | |
| 1121 | if (skb == NULL) |
| 1122 | continue; |
| 1123 | |
| 1124 | desc = &bp->rx_ring[i]; |
| 1125 | addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); |
Soren Brinkmann | ccd6d0a | 2014-05-04 15:42:58 -0700 | [diff] [blame] | 1126 | dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1127 | DMA_FROM_DEVICE); |
| 1128 | dev_kfree_skb_any(skb); |
| 1129 | skb = NULL; |
| 1130 | } |
| 1131 | |
| 1132 | kfree(bp->rx_skbuff); |
| 1133 | bp->rx_skbuff = NULL; |
| 1134 | } |
| 1135 | |
| 1136 | static void macb_free_rx_buffers(struct macb *bp) |
| 1137 | { |
| 1138 | if (bp->rx_buffers) { |
| 1139 | dma_free_coherent(&bp->pdev->dev, |
| 1140 | RX_RING_SIZE * bp->rx_buffer_size, |
| 1141 | bp->rx_buffers, bp->rx_buffers_dma); |
| 1142 | bp->rx_buffers = NULL; |
| 1143 | } |
| 1144 | } |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1145 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1146 | static void macb_free_consistent(struct macb *bp) |
| 1147 | { |
| 1148 | if (bp->tx_skb) { |
| 1149 | kfree(bp->tx_skb); |
| 1150 | bp->tx_skb = NULL; |
| 1151 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1152 | bp->macbgem_ops.mog_free_rx_buffers(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1153 | if (bp->rx_ring) { |
| 1154 | dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, |
| 1155 | bp->rx_ring, bp->rx_ring_dma); |
| 1156 | bp->rx_ring = NULL; |
| 1157 | } |
| 1158 | if (bp->tx_ring) { |
| 1159 | dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, |
| 1160 | bp->tx_ring, bp->tx_ring_dma); |
| 1161 | bp->tx_ring = NULL; |
| 1162 | } |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1163 | } |
| 1164 | |
| 1165 | static int gem_alloc_rx_buffers(struct macb *bp) |
| 1166 | { |
| 1167 | int size; |
| 1168 | |
| 1169 | size = RX_RING_SIZE * sizeof(struct sk_buff *); |
| 1170 | bp->rx_skbuff = kzalloc(size, GFP_KERNEL); |
| 1171 | if (!bp->rx_skbuff) |
| 1172 | return -ENOMEM; |
| 1173 | else |
| 1174 | netdev_dbg(bp->dev, |
| 1175 | "Allocated %d RX struct sk_buff entries at %p\n", |
| 1176 | RX_RING_SIZE, bp->rx_skbuff); |
| 1177 | return 0; |
| 1178 | } |
| 1179 | |
| 1180 | static int macb_alloc_rx_buffers(struct macb *bp) |
| 1181 | { |
| 1182 | int size; |
| 1183 | |
| 1184 | size = RX_RING_SIZE * bp->rx_buffer_size; |
| 1185 | bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1186 | &bp->rx_buffers_dma, GFP_KERNEL); |
| 1187 | if (!bp->rx_buffers) |
| 1188 | return -ENOMEM; |
| 1189 | else |
| 1190 | netdev_dbg(bp->dev, |
| 1191 | "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", |
| 1192 | size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); |
| 1193 | return 0; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1194 | } |
| 1195 | |
| 1196 | static int macb_alloc_consistent(struct macb *bp) |
| 1197 | { |
| 1198 | int size; |
| 1199 | |
Havard Skinnemoen | 55054a1 | 2012-10-31 06:04:55 +0000 | [diff] [blame] | 1200 | size = TX_RING_SIZE * sizeof(struct macb_tx_skb); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1201 | bp->tx_skb = kmalloc(size, GFP_KERNEL); |
| 1202 | if (!bp->tx_skb) |
| 1203 | goto out_err; |
| 1204 | |
| 1205 | size = RX_RING_BYTES; |
| 1206 | bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1207 | &bp->rx_ring_dma, GFP_KERNEL); |
| 1208 | if (!bp->rx_ring) |
| 1209 | goto out_err; |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1210 | netdev_dbg(bp->dev, |
| 1211 | "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", |
| 1212 | size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1213 | |
| 1214 | size = TX_RING_BYTES; |
| 1215 | bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, |
| 1216 | &bp->tx_ring_dma, GFP_KERNEL); |
| 1217 | if (!bp->tx_ring) |
| 1218 | goto out_err; |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1219 | netdev_dbg(bp->dev, |
| 1220 | "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", |
| 1221 | size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1222 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1223 | if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1224 | goto out_err; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1225 | |
| 1226 | return 0; |
| 1227 | |
| 1228 | out_err: |
| 1229 | macb_free_consistent(bp); |
| 1230 | return -ENOMEM; |
| 1231 | } |
| 1232 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1233 | static void gem_init_rings(struct macb *bp) |
| 1234 | { |
| 1235 | int i; |
| 1236 | |
| 1237 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 1238 | bp->tx_ring[i].addr = 0; |
| 1239 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
| 1240 | } |
| 1241 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
| 1242 | |
| 1243 | bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0; |
| 1244 | |
| 1245 | gem_rx_refill(bp); |
| 1246 | } |
| 1247 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1248 | static void macb_init_rings(struct macb *bp) |
| 1249 | { |
| 1250 | int i; |
| 1251 | dma_addr_t addr; |
| 1252 | |
| 1253 | addr = bp->rx_buffers_dma; |
| 1254 | for (i = 0; i < RX_RING_SIZE; i++) { |
| 1255 | bp->rx_ring[i].addr = addr; |
| 1256 | bp->rx_ring[i].ctrl = 0; |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1257 | addr += bp->rx_buffer_size; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1258 | } |
| 1259 | bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); |
| 1260 | |
| 1261 | for (i = 0; i < TX_RING_SIZE; i++) { |
| 1262 | bp->tx_ring[i].addr = 0; |
| 1263 | bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); |
| 1264 | } |
| 1265 | bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); |
| 1266 | |
| 1267 | bp->rx_tail = bp->tx_head = bp->tx_tail = 0; |
| 1268 | } |
| 1269 | |
| 1270 | static void macb_reset_hw(struct macb *bp) |
| 1271 | { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1272 | /* |
| 1273 | * Disable RX and TX (XXX: Should we halt the transmission |
| 1274 | * more gracefully?) |
| 1275 | */ |
| 1276 | macb_writel(bp, NCR, 0); |
| 1277 | |
| 1278 | /* Clear the stats registers (XXX: Update stats first?) */ |
| 1279 | macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); |
| 1280 | |
| 1281 | /* Clear all status flags */ |
Joachim Eastwood | 95ebcea | 2012-10-22 08:45:31 +0000 | [diff] [blame] | 1282 | macb_writel(bp, TSR, -1); |
| 1283 | macb_writel(bp, RSR, -1); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1284 | |
| 1285 | /* Disable all interrupts */ |
Joachim Eastwood | 95ebcea | 2012-10-22 08:45:31 +0000 | [diff] [blame] | 1286 | macb_writel(bp, IDR, -1); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1287 | macb_readl(bp, ISR); |
| 1288 | } |
| 1289 | |
Jamie Iles | 70c9f3d | 2011-03-09 16:22:54 +0000 | [diff] [blame] | 1290 | static u32 gem_mdc_clk_div(struct macb *bp) |
| 1291 | { |
| 1292 | u32 config; |
| 1293 | unsigned long pclk_hz = clk_get_rate(bp->pclk); |
| 1294 | |
| 1295 | if (pclk_hz <= 20000000) |
| 1296 | config = GEM_BF(CLK, GEM_CLK_DIV8); |
| 1297 | else if (pclk_hz <= 40000000) |
| 1298 | config = GEM_BF(CLK, GEM_CLK_DIV16); |
| 1299 | else if (pclk_hz <= 80000000) |
| 1300 | config = GEM_BF(CLK, GEM_CLK_DIV32); |
| 1301 | else if (pclk_hz <= 120000000) |
| 1302 | config = GEM_BF(CLK, GEM_CLK_DIV48); |
| 1303 | else if (pclk_hz <= 160000000) |
| 1304 | config = GEM_BF(CLK, GEM_CLK_DIV64); |
| 1305 | else |
| 1306 | config = GEM_BF(CLK, GEM_CLK_DIV96); |
| 1307 | |
| 1308 | return config; |
| 1309 | } |
| 1310 | |
| 1311 | static u32 macb_mdc_clk_div(struct macb *bp) |
| 1312 | { |
| 1313 | u32 config; |
| 1314 | unsigned long pclk_hz; |
| 1315 | |
| 1316 | if (macb_is_gem(bp)) |
| 1317 | return gem_mdc_clk_div(bp); |
| 1318 | |
| 1319 | pclk_hz = clk_get_rate(bp->pclk); |
| 1320 | if (pclk_hz <= 20000000) |
| 1321 | config = MACB_BF(CLK, MACB_CLK_DIV8); |
| 1322 | else if (pclk_hz <= 40000000) |
| 1323 | config = MACB_BF(CLK, MACB_CLK_DIV16); |
| 1324 | else if (pclk_hz <= 80000000) |
| 1325 | config = MACB_BF(CLK, MACB_CLK_DIV32); |
| 1326 | else |
| 1327 | config = MACB_BF(CLK, MACB_CLK_DIV64); |
| 1328 | |
| 1329 | return config; |
| 1330 | } |
| 1331 | |
Jamie Iles | 757a03c | 2011-03-09 16:29:59 +0000 | [diff] [blame] | 1332 | /* |
| 1333 | * Get the DMA bus width field of the network configuration register that we |
| 1334 | * should program. We find the width from decoding the design configuration |
| 1335 | * register to find the maximum supported data bus width. |
| 1336 | */ |
| 1337 | static u32 macb_dbw(struct macb *bp) |
| 1338 | { |
| 1339 | if (!macb_is_gem(bp)) |
| 1340 | return 0; |
| 1341 | |
| 1342 | switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { |
| 1343 | case 4: |
| 1344 | return GEM_BF(DBW, GEM_DBW128); |
| 1345 | case 2: |
| 1346 | return GEM_BF(DBW, GEM_DBW64); |
| 1347 | case 1: |
| 1348 | default: |
| 1349 | return GEM_BF(DBW, GEM_DBW32); |
| 1350 | } |
| 1351 | } |
| 1352 | |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1353 | /* |
Nicolas Ferre | b3e3bd71 | 2012-11-23 03:49:01 +0000 | [diff] [blame] | 1354 | * Configure the receive DMA engine |
| 1355 | * - use the correct receive buffer size |
| 1356 | * - set the possibility to use INCR16 bursts |
| 1357 | * (if not supported by FIFO, it will fallback to default) |
| 1358 | * - set both rx/tx packet buffers to full memory size |
| 1359 | * These are configurable parameters for GEM. |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1360 | */ |
| 1361 | static void macb_configure_dma(struct macb *bp) |
| 1362 | { |
| 1363 | u32 dmacfg; |
| 1364 | |
| 1365 | if (macb_is_gem(bp)) { |
| 1366 | dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1367 | dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); |
Nicolas Ferre | b3e3bd71 | 2012-11-23 03:49:01 +0000 | [diff] [blame] | 1368 | dmacfg |= GEM_BF(FBLDO, 16); |
| 1369 | dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); |
Steffen Trumtrar | a1ae385 | 2013-03-27 23:07:06 +0000 | [diff] [blame] | 1370 | dmacfg &= ~GEM_BIT(ENDIA); |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1371 | gem_writel(bp, DMACFG, dmacfg); |
| 1372 | } |
| 1373 | } |
| 1374 | |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 1375 | /* |
| 1376 | * Configure peripheral capacities according to integration options used |
| 1377 | */ |
| 1378 | static void macb_configure_caps(struct macb *bp) |
| 1379 | { |
| 1380 | if (macb_is_gem(bp)) { |
Jongsung Kim | 01276ed | 2013-07-09 17:36:00 +0900 | [diff] [blame] | 1381 | if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0) |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 1382 | bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; |
| 1383 | } |
| 1384 | } |
| 1385 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1386 | static void macb_init_hw(struct macb *bp) |
| 1387 | { |
| 1388 | u32 config; |
| 1389 | |
| 1390 | macb_reset_hw(bp); |
Joachim Eastwood | 314bccc | 2012-11-07 08:14:52 +0000 | [diff] [blame] | 1391 | macb_set_hwaddr(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1392 | |
Jamie Iles | 70c9f3d | 2011-03-09 16:22:54 +0000 | [diff] [blame] | 1393 | config = macb_mdc_clk_div(bp); |
Havard Skinnemoen | 29bc2e1 | 2012-10-31 06:04:58 +0000 | [diff] [blame] | 1394 | config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1395 | config |= MACB_BIT(PAE); /* PAuse Enable */ |
| 1396 | config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ |
Peter Korsgaard | 8dd4bd0 | 2010-04-07 21:53:41 -0700 | [diff] [blame] | 1397 | config |= MACB_BIT(BIG); /* Receive oversized frames */ |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1398 | if (bp->dev->flags & IFF_PROMISC) |
| 1399 | config |= MACB_BIT(CAF); /* Copy All Frames */ |
| 1400 | if (!(bp->dev->flags & IFF_BROADCAST)) |
| 1401 | config |= MACB_BIT(NBC); /* No BroadCast */ |
Jamie Iles | 757a03c | 2011-03-09 16:29:59 +0000 | [diff] [blame] | 1402 | config |= macb_dbw(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1403 | macb_writel(bp, NCFGR, config); |
Vitalii Demianets | 26cdfb4 | 2012-11-02 07:09:24 +0000 | [diff] [blame] | 1404 | bp->speed = SPEED_10; |
| 1405 | bp->duplex = DUPLEX_HALF; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1406 | |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1407 | macb_configure_dma(bp); |
Nicolas Ferre | 581df9e | 2013-05-14 03:00:16 +0000 | [diff] [blame] | 1408 | macb_configure_caps(bp); |
Jamie Iles | 0116da4 | 2011-03-14 17:38:30 +0000 | [diff] [blame] | 1409 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1410 | /* Initialize TX and RX buffers */ |
| 1411 | macb_writel(bp, RBQP, bp->rx_ring_dma); |
| 1412 | macb_writel(bp, TBQP, bp->tx_ring_dma); |
| 1413 | |
| 1414 | /* Enable TX and RX */ |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1415 | macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1416 | |
| 1417 | /* Enable interrupts */ |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 1418 | macb_writel(bp, IER, (MACB_RX_INT_FLAGS |
| 1419 | | MACB_TX_INT_FLAGS |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1420 | | MACB_BIT(HRESP))); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1421 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1422 | } |
| 1423 | |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1424 | /* |
| 1425 | * The hash address register is 64 bits long and takes up two |
| 1426 | * locations in the memory map. The least significant bits are stored |
| 1427 | * in EMAC_HSL and the most significant bits in EMAC_HSH. |
| 1428 | * |
| 1429 | * The unicast hash enable and the multicast hash enable bits in the |
| 1430 | * network configuration register enable the reception of hash matched |
| 1431 | * frames. The destination address is reduced to a 6 bit index into |
| 1432 | * the 64 bit hash register using the following hash function. The |
| 1433 | * hash function is an exclusive or of every sixth bit of the |
| 1434 | * destination address. |
| 1435 | * |
| 1436 | * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] |
| 1437 | * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] |
| 1438 | * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] |
| 1439 | * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] |
| 1440 | * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] |
| 1441 | * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] |
| 1442 | * |
| 1443 | * da[0] represents the least significant bit of the first byte |
| 1444 | * received, that is, the multicast/unicast indicator, and da[47] |
| 1445 | * represents the most significant bit of the last byte received. If |
| 1446 | * the hash index, hi[n], points to a bit that is set in the hash |
| 1447 | * register then the frame will be matched according to whether the |
| 1448 | * frame is multicast or unicast. A multicast match will be signalled |
| 1449 | * if the multicast hash enable bit is set, da[0] is 1 and the hash |
| 1450 | * index points to a bit set in the hash register. A unicast match |
| 1451 | * will be signalled if the unicast hash enable bit is set, da[0] is 0 |
| 1452 | * and the hash index points to a bit set in the hash register. To |
| 1453 | * receive all multicast frames, the hash register should be set with |
| 1454 | * all ones and the multicast hash enable bit should be set in the |
| 1455 | * network configuration register. |
| 1456 | */ |
| 1457 | |
| 1458 | static inline int hash_bit_value(int bitnr, __u8 *addr) |
| 1459 | { |
| 1460 | if (addr[bitnr / 8] & (1 << (bitnr % 8))) |
| 1461 | return 1; |
| 1462 | return 0; |
| 1463 | } |
| 1464 | |
| 1465 | /* |
| 1466 | * Return the hash index value for the specified address. |
| 1467 | */ |
| 1468 | static int hash_get_index(__u8 *addr) |
| 1469 | { |
| 1470 | int i, j, bitval; |
| 1471 | int hash_index = 0; |
| 1472 | |
| 1473 | for (j = 0; j < 6; j++) { |
| 1474 | for (i = 0, bitval = 0; i < 8; i++) |
| 1475 | bitval ^= hash_bit_value(i*6 + j, addr); |
| 1476 | |
| 1477 | hash_index |= (bitval << j); |
| 1478 | } |
| 1479 | |
| 1480 | return hash_index; |
| 1481 | } |
| 1482 | |
| 1483 | /* |
| 1484 | * Add multicast addresses to the internal multicast-hash table. |
| 1485 | */ |
| 1486 | static void macb_sethashtable(struct net_device *dev) |
| 1487 | { |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1488 | struct netdev_hw_addr *ha; |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1489 | unsigned long mc_filter[2]; |
Jiri Pirko | f9dcbcc | 2010-02-23 09:19:49 +0000 | [diff] [blame] | 1490 | unsigned int bitnr; |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1491 | struct macb *bp = netdev_priv(dev); |
| 1492 | |
| 1493 | mc_filter[0] = mc_filter[1] = 0; |
| 1494 | |
Jiri Pirko | 22bedad3 | 2010-04-01 21:22:57 +0000 | [diff] [blame] | 1495 | netdev_for_each_mc_addr(ha, dev) { |
| 1496 | bitnr = hash_get_index(ha->addr); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1497 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
| 1498 | } |
| 1499 | |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1500 | macb_or_gem_writel(bp, HRB, mc_filter[0]); |
| 1501 | macb_or_gem_writel(bp, HRT, mc_filter[1]); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1502 | } |
| 1503 | |
| 1504 | /* |
| 1505 | * Enable/Disable promiscuous and multicast modes. |
| 1506 | */ |
Joachim Eastwood | e0da1f1 | 2012-10-18 11:01:15 +0000 | [diff] [blame] | 1507 | void macb_set_rx_mode(struct net_device *dev) |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1508 | { |
| 1509 | unsigned long cfg; |
| 1510 | struct macb *bp = netdev_priv(dev); |
| 1511 | |
| 1512 | cfg = macb_readl(bp, NCFGR); |
| 1513 | |
| 1514 | if (dev->flags & IFF_PROMISC) |
| 1515 | /* Enable promiscuous mode */ |
| 1516 | cfg |= MACB_BIT(CAF); |
| 1517 | else if (dev->flags & (~IFF_PROMISC)) |
| 1518 | /* Disable promiscuous mode */ |
| 1519 | cfg &= ~MACB_BIT(CAF); |
| 1520 | |
| 1521 | if (dev->flags & IFF_ALLMULTI) { |
| 1522 | /* Enable all multicast mode */ |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1523 | macb_or_gem_writel(bp, HRB, -1); |
| 1524 | macb_or_gem_writel(bp, HRT, -1); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1525 | cfg |= MACB_BIT(NCFGR_MTI); |
Jiri Pirko | 4cd24ea | 2010-02-08 04:30:35 +0000 | [diff] [blame] | 1526 | } else if (!netdev_mc_empty(dev)) { |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1527 | /* Enable specific multicasts */ |
| 1528 | macb_sethashtable(dev); |
| 1529 | cfg |= MACB_BIT(NCFGR_MTI); |
| 1530 | } else if (dev->flags & (~IFF_ALLMULTI)) { |
| 1531 | /* Disable all multicast mode */ |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1532 | macb_or_gem_writel(bp, HRB, 0); |
| 1533 | macb_or_gem_writel(bp, HRT, 0); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1534 | cfg &= ~MACB_BIT(NCFGR_MTI); |
| 1535 | } |
| 1536 | |
| 1537 | macb_writel(bp, NCFGR, cfg); |
| 1538 | } |
Joachim Eastwood | e0da1f1 | 2012-10-18 11:01:15 +0000 | [diff] [blame] | 1539 | EXPORT_SYMBOL_GPL(macb_set_rx_mode); |
Patrice Vilchez | 446ebd0 | 2007-07-12 19:07:25 +0200 | [diff] [blame] | 1540 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1541 | static int macb_open(struct net_device *dev) |
| 1542 | { |
| 1543 | struct macb *bp = netdev_priv(dev); |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1544 | size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1545 | int err; |
| 1546 | |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1547 | netdev_dbg(bp->dev, "open\n"); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1548 | |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 1549 | /* carrier starts down */ |
| 1550 | netif_carrier_off(dev); |
| 1551 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1552 | /* if the phy is not yet register, retry later*/ |
| 1553 | if (!bp->phy_dev) |
| 1554 | return -EAGAIN; |
| 1555 | |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1556 | /* RX buffers initialization */ |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1557 | macb_init_rx_buffer_size(bp, bufsz); |
Nicolas Ferre | 1b44791 | 2013-06-04 21:57:11 +0000 | [diff] [blame] | 1558 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1559 | err = macb_alloc_consistent(bp); |
| 1560 | if (err) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1561 | netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", |
| 1562 | err); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1563 | return err; |
| 1564 | } |
| 1565 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1566 | napi_enable(&bp->napi); |
| 1567 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1568 | bp->macbgem_ops.mog_init_rings(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1569 | macb_init_hw(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1570 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1571 | /* schedule a link state check */ |
| 1572 | phy_start(bp->phy_dev); |
| 1573 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1574 | netif_start_queue(dev); |
| 1575 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1576 | return 0; |
| 1577 | } |
| 1578 | |
| 1579 | static int macb_close(struct net_device *dev) |
| 1580 | { |
| 1581 | struct macb *bp = netdev_priv(dev); |
| 1582 | unsigned long flags; |
| 1583 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1584 | netif_stop_queue(dev); |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1585 | napi_disable(&bp->napi); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1586 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1587 | if (bp->phy_dev) |
| 1588 | phy_stop(bp->phy_dev); |
| 1589 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1590 | spin_lock_irqsave(&bp->lock, flags); |
| 1591 | macb_reset_hw(bp); |
| 1592 | netif_carrier_off(dev); |
| 1593 | spin_unlock_irqrestore(&bp->lock, flags); |
| 1594 | |
| 1595 | macb_free_consistent(bp); |
| 1596 | |
| 1597 | return 0; |
| 1598 | } |
| 1599 | |
Jamie Iles | a494ed8 | 2011-03-09 16:26:35 +0000 | [diff] [blame] | 1600 | static void gem_update_stats(struct macb *bp) |
| 1601 | { |
| 1602 | u32 __iomem *reg = bp->regs + GEM_OTX; |
| 1603 | u32 *p = &bp->hw_stats.gem.tx_octets_31_0; |
| 1604 | u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1; |
| 1605 | |
| 1606 | for (; p < end; p++, reg++) |
| 1607 | *p += __raw_readl(reg); |
| 1608 | } |
| 1609 | |
| 1610 | static struct net_device_stats *gem_get_stats(struct macb *bp) |
| 1611 | { |
| 1612 | struct gem_stats *hwstat = &bp->hw_stats.gem; |
| 1613 | struct net_device_stats *nstat = &bp->stats; |
| 1614 | |
| 1615 | gem_update_stats(bp); |
| 1616 | |
| 1617 | nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + |
| 1618 | hwstat->rx_alignment_errors + |
| 1619 | hwstat->rx_resource_errors + |
| 1620 | hwstat->rx_overruns + |
| 1621 | hwstat->rx_oversize_frames + |
| 1622 | hwstat->rx_jabbers + |
| 1623 | hwstat->rx_undersized_frames + |
| 1624 | hwstat->rx_length_field_frame_errors); |
| 1625 | nstat->tx_errors = (hwstat->tx_late_collisions + |
| 1626 | hwstat->tx_excessive_collisions + |
| 1627 | hwstat->tx_underrun + |
| 1628 | hwstat->tx_carrier_sense_errors); |
| 1629 | nstat->multicast = hwstat->rx_multicast_frames; |
| 1630 | nstat->collisions = (hwstat->tx_single_collision_frames + |
| 1631 | hwstat->tx_multiple_collision_frames + |
| 1632 | hwstat->tx_excessive_collisions); |
| 1633 | nstat->rx_length_errors = (hwstat->rx_oversize_frames + |
| 1634 | hwstat->rx_jabbers + |
| 1635 | hwstat->rx_undersized_frames + |
| 1636 | hwstat->rx_length_field_frame_errors); |
| 1637 | nstat->rx_over_errors = hwstat->rx_resource_errors; |
| 1638 | nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; |
| 1639 | nstat->rx_frame_errors = hwstat->rx_alignment_errors; |
| 1640 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
| 1641 | nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; |
| 1642 | nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; |
| 1643 | nstat->tx_fifo_errors = hwstat->tx_underrun; |
| 1644 | |
| 1645 | return nstat; |
| 1646 | } |
| 1647 | |
Joachim Eastwood | 2ea32ee | 2012-11-07 08:14:54 +0000 | [diff] [blame] | 1648 | struct net_device_stats *macb_get_stats(struct net_device *dev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1649 | { |
| 1650 | struct macb *bp = netdev_priv(dev); |
| 1651 | struct net_device_stats *nstat = &bp->stats; |
Jamie Iles | a494ed8 | 2011-03-09 16:26:35 +0000 | [diff] [blame] | 1652 | struct macb_stats *hwstat = &bp->hw_stats.macb; |
| 1653 | |
| 1654 | if (macb_is_gem(bp)) |
| 1655 | return gem_get_stats(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1656 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1657 | /* read stats from hardware */ |
| 1658 | macb_update_stats(bp); |
| 1659 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1660 | /* Convert HW stats into netdevice stats */ |
| 1661 | nstat->rx_errors = (hwstat->rx_fcs_errors + |
| 1662 | hwstat->rx_align_errors + |
| 1663 | hwstat->rx_resource_errors + |
| 1664 | hwstat->rx_overruns + |
| 1665 | hwstat->rx_oversize_pkts + |
| 1666 | hwstat->rx_jabbers + |
| 1667 | hwstat->rx_undersize_pkts + |
| 1668 | hwstat->sqe_test_errors + |
| 1669 | hwstat->rx_length_mismatch); |
| 1670 | nstat->tx_errors = (hwstat->tx_late_cols + |
| 1671 | hwstat->tx_excessive_cols + |
| 1672 | hwstat->tx_underruns + |
| 1673 | hwstat->tx_carrier_errors); |
| 1674 | nstat->collisions = (hwstat->tx_single_cols + |
| 1675 | hwstat->tx_multiple_cols + |
| 1676 | hwstat->tx_excessive_cols); |
| 1677 | nstat->rx_length_errors = (hwstat->rx_oversize_pkts + |
| 1678 | hwstat->rx_jabbers + |
| 1679 | hwstat->rx_undersize_pkts + |
| 1680 | hwstat->rx_length_mismatch); |
Alexander Stein | b19f7f7 | 2011-04-13 05:03:24 +0000 | [diff] [blame] | 1681 | nstat->rx_over_errors = hwstat->rx_resource_errors + |
| 1682 | hwstat->rx_overruns; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1683 | nstat->rx_crc_errors = hwstat->rx_fcs_errors; |
| 1684 | nstat->rx_frame_errors = hwstat->rx_align_errors; |
| 1685 | nstat->rx_fifo_errors = hwstat->rx_overruns; |
| 1686 | /* XXX: What does "missed" mean? */ |
| 1687 | nstat->tx_aborted_errors = hwstat->tx_excessive_cols; |
| 1688 | nstat->tx_carrier_errors = hwstat->tx_carrier_errors; |
| 1689 | nstat->tx_fifo_errors = hwstat->tx_underruns; |
| 1690 | /* Don't know about heartbeat or window errors... */ |
| 1691 | |
| 1692 | return nstat; |
| 1693 | } |
Joachim Eastwood | 2ea32ee | 2012-11-07 08:14:54 +0000 | [diff] [blame] | 1694 | EXPORT_SYMBOL_GPL(macb_get_stats); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1695 | |
| 1696 | static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 1697 | { |
| 1698 | struct macb *bp = netdev_priv(dev); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1699 | struct phy_device *phydev = bp->phy_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1700 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1701 | if (!phydev) |
| 1702 | return -ENODEV; |
| 1703 | |
| 1704 | return phy_ethtool_gset(phydev, cmd); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1705 | } |
| 1706 | |
| 1707 | static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 1708 | { |
| 1709 | struct macb *bp = netdev_priv(dev); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1710 | struct phy_device *phydev = bp->phy_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1711 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1712 | if (!phydev) |
| 1713 | return -ENODEV; |
| 1714 | |
| 1715 | return phy_ethtool_sset(phydev, cmd); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1716 | } |
| 1717 | |
Nicolas Ferre | d1d1b53 | 2012-10-31 06:04:56 +0000 | [diff] [blame] | 1718 | static int macb_get_regs_len(struct net_device *netdev) |
| 1719 | { |
| 1720 | return MACB_GREGS_NBR * sizeof(u32); |
| 1721 | } |
| 1722 | |
| 1723 | static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
| 1724 | void *p) |
| 1725 | { |
| 1726 | struct macb *bp = netdev_priv(dev); |
| 1727 | unsigned int tail, head; |
| 1728 | u32 *regs_buff = p; |
| 1729 | |
| 1730 | regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) |
| 1731 | | MACB_GREGS_VERSION; |
| 1732 | |
| 1733 | tail = macb_tx_ring_wrap(bp->tx_tail); |
| 1734 | head = macb_tx_ring_wrap(bp->tx_head); |
| 1735 | |
| 1736 | regs_buff[0] = macb_readl(bp, NCR); |
| 1737 | regs_buff[1] = macb_or_gem_readl(bp, NCFGR); |
| 1738 | regs_buff[2] = macb_readl(bp, NSR); |
| 1739 | regs_buff[3] = macb_readl(bp, TSR); |
| 1740 | regs_buff[4] = macb_readl(bp, RBQP); |
| 1741 | regs_buff[5] = macb_readl(bp, TBQP); |
| 1742 | regs_buff[6] = macb_readl(bp, RSR); |
| 1743 | regs_buff[7] = macb_readl(bp, IMR); |
| 1744 | |
| 1745 | regs_buff[8] = tail; |
| 1746 | regs_buff[9] = head; |
| 1747 | regs_buff[10] = macb_tx_dma(bp, tail); |
| 1748 | regs_buff[11] = macb_tx_dma(bp, head); |
| 1749 | |
| 1750 | if (macb_is_gem(bp)) { |
| 1751 | regs_buff[12] = gem_readl(bp, USRIO); |
| 1752 | regs_buff[13] = gem_readl(bp, DMACFG); |
| 1753 | } |
| 1754 | } |
| 1755 | |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1756 | const struct ethtool_ops macb_ethtool_ops = { |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1757 | .get_settings = macb_get_settings, |
| 1758 | .set_settings = macb_set_settings, |
Nicolas Ferre | d1d1b53 | 2012-10-31 06:04:56 +0000 | [diff] [blame] | 1759 | .get_regs_len = macb_get_regs_len, |
| 1760 | .get_regs = macb_get_regs, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1761 | .get_link = ethtool_op_get_link, |
Richard Cochran | 17f393e | 2012-04-03 22:59:31 +0000 | [diff] [blame] | 1762 | .get_ts_info = ethtool_op_get_ts_info, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1763 | }; |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1764 | EXPORT_SYMBOL_GPL(macb_ethtool_ops); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1765 | |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1766 | int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1767 | { |
| 1768 | struct macb *bp = netdev_priv(dev); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1769 | struct phy_device *phydev = bp->phy_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1770 | |
| 1771 | if (!netif_running(dev)) |
| 1772 | return -EINVAL; |
| 1773 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1774 | if (!phydev) |
| 1775 | return -ENODEV; |
| 1776 | |
Richard Cochran | 28b0411 | 2010-07-17 08:48:55 +0000 | [diff] [blame] | 1777 | return phy_mii_ioctl(phydev, rq, cmd); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1778 | } |
Joachim Eastwood | 0005f54 | 2012-10-18 11:01:12 +0000 | [diff] [blame] | 1779 | EXPORT_SYMBOL_GPL(macb_ioctl); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1780 | |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1781 | static const struct net_device_ops macb_netdev_ops = { |
| 1782 | .ndo_open = macb_open, |
| 1783 | .ndo_stop = macb_close, |
| 1784 | .ndo_start_xmit = macb_start_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 1785 | .ndo_set_rx_mode = macb_set_rx_mode, |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1786 | .ndo_get_stats = macb_get_stats, |
| 1787 | .ndo_do_ioctl = macb_ioctl, |
| 1788 | .ndo_validate_addr = eth_validate_addr, |
| 1789 | .ndo_change_mtu = eth_change_mtu, |
| 1790 | .ndo_set_mac_address = eth_mac_addr, |
Thomas Petazzoni | 6e8cf5c | 2009-05-04 11:08:41 -0700 | [diff] [blame] | 1791 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 1792 | .ndo_poll_controller = macb_poll_controller, |
| 1793 | #endif |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1794 | }; |
| 1795 | |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1796 | #if defined(CONFIG_OF) |
| 1797 | static const struct of_device_id macb_dt_ids[] = { |
| 1798 | { .compatible = "cdns,at32ap7000-macb" }, |
| 1799 | { .compatible = "cdns,at91sam9260-macb" }, |
| 1800 | { .compatible = "cdns,macb" }, |
| 1801 | { .compatible = "cdns,pc302-gem" }, |
| 1802 | { .compatible = "cdns,gem" }, |
| 1803 | { /* sentinel */ } |
| 1804 | }; |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1805 | MODULE_DEVICE_TABLE(of, macb_dt_ids); |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1806 | #endif |
| 1807 | |
Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 1808 | static int __init macb_probe(struct platform_device *pdev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1809 | { |
Jamie Iles | 84e0cdb | 2011-03-08 20:17:06 +0000 | [diff] [blame] | 1810 | struct macb_platform_data *pdata; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1811 | struct resource *regs; |
| 1812 | struct net_device *dev; |
| 1813 | struct macb *bp; |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1814 | struct phy_device *phydev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1815 | u32 config; |
| 1816 | int err = -ENXIO; |
Jean-Christophe PLAGNIOL-VILLARD | 8ef29f8a | 2012-10-31 06:04:59 +0000 | [diff] [blame] | 1817 | struct pinctrl *pinctrl; |
Guenter Roeck | 5090704 | 2013-04-02 09:35:09 +0000 | [diff] [blame] | 1818 | const char *mac; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1819 | |
| 1820 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 1821 | if (!regs) { |
| 1822 | dev_err(&pdev->dev, "no mmio resource defined\n"); |
| 1823 | goto err_out; |
| 1824 | } |
| 1825 | |
Jean-Christophe PLAGNIOL-VILLARD | 8ef29f8a | 2012-10-31 06:04:59 +0000 | [diff] [blame] | 1826 | pinctrl = devm_pinctrl_get_select_default(&pdev->dev); |
| 1827 | if (IS_ERR(pinctrl)) { |
| 1828 | err = PTR_ERR(pinctrl); |
| 1829 | if (err == -EPROBE_DEFER) |
| 1830 | goto err_out; |
| 1831 | |
| 1832 | dev_warn(&pdev->dev, "No pinctrl provided\n"); |
| 1833 | } |
| 1834 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1835 | err = -ENOMEM; |
| 1836 | dev = alloc_etherdev(sizeof(*bp)); |
Joe Perches | 41de8d4 | 2012-01-29 13:47:52 +0000 | [diff] [blame] | 1837 | if (!dev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1838 | goto err_out; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1839 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1840 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 1841 | |
| 1842 | /* TODO: Actually, we have some interesting features... */ |
| 1843 | dev->features |= 0; |
| 1844 | |
| 1845 | bp = netdev_priv(dev); |
| 1846 | bp->pdev = pdev; |
| 1847 | bp->dev = dev; |
| 1848 | |
| 1849 | spin_lock_init(&bp->lock); |
Nicolas Ferre | e86cd53 | 2012-10-31 06:04:57 +0000 | [diff] [blame] | 1850 | INIT_WORK(&bp->tx_error_task, macb_tx_error_task); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1851 | |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1852 | bp->pclk = devm_clk_get(&pdev->dev, "pclk"); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1853 | if (IS_ERR(bp->pclk)) { |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1854 | err = PTR_ERR(bp->pclk); |
| 1855 | dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1856 | goto err_out_free_dev; |
| 1857 | } |
Jamie Iles | 461845d | 2011-03-08 20:19:23 +0000 | [diff] [blame] | 1858 | |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1859 | bp->hclk = devm_clk_get(&pdev->dev, "hclk"); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1860 | if (IS_ERR(bp->hclk)) { |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1861 | err = PTR_ERR(bp->hclk); |
| 1862 | dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); |
| 1863 | goto err_out_free_dev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1864 | } |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1865 | |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 1866 | bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk"); |
| 1867 | |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1868 | err = clk_prepare_enable(bp->pclk); |
| 1869 | if (err) { |
| 1870 | dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); |
| 1871 | goto err_out_free_dev; |
| 1872 | } |
| 1873 | |
| 1874 | err = clk_prepare_enable(bp->hclk); |
| 1875 | if (err) { |
| 1876 | dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); |
| 1877 | goto err_out_disable_pclk; |
| 1878 | } |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1879 | |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 1880 | if (!IS_ERR(bp->tx_clk)) { |
| 1881 | err = clk_prepare_enable(bp->tx_clk); |
| 1882 | if (err) { |
| 1883 | dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", |
| 1884 | err); |
| 1885 | goto err_out_disable_hclk; |
| 1886 | } |
| 1887 | } |
| 1888 | |
Soren Brinkmann | 60fe716 | 2013-12-10 16:07:21 -0800 | [diff] [blame] | 1889 | bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1890 | if (!bp->regs) { |
| 1891 | dev_err(&pdev->dev, "failed to map registers, aborting.\n"); |
| 1892 | err = -ENOMEM; |
| 1893 | goto err_out_disable_clocks; |
| 1894 | } |
| 1895 | |
| 1896 | dev->irq = platform_get_irq(pdev, 0); |
Soren Brinkmann | 0a4acf0 | 2013-12-10 16:07:22 -0800 | [diff] [blame] | 1897 | err = devm_request_irq(&pdev->dev, dev->irq, macb_interrupt, 0, |
| 1898 | dev->name, dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1899 | if (err) { |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1900 | dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", |
| 1901 | dev->irq, err); |
Soren Brinkmann | 60fe716 | 2013-12-10 16:07:21 -0800 | [diff] [blame] | 1902 | goto err_out_disable_clocks; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1903 | } |
| 1904 | |
Alexander Beregalov | 5f1fa99 | 2009-04-11 07:42:26 +0000 | [diff] [blame] | 1905 | dev->netdev_ops = &macb_netdev_ops; |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 1906 | netif_napi_add(dev, &bp->napi, macb_poll, 64); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1907 | dev->ethtool_ops = &macb_ethtool_ops; |
| 1908 | |
| 1909 | dev->base_addr = regs->start; |
| 1910 | |
Nicolas Ferre | 4df9513 | 2013-06-04 21:57:12 +0000 | [diff] [blame] | 1911 | /* setup appropriated routines according to adapter type */ |
| 1912 | if (macb_is_gem(bp)) { |
| 1913 | bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; |
| 1914 | bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; |
| 1915 | bp->macbgem_ops.mog_init_rings = gem_init_rings; |
| 1916 | bp->macbgem_ops.mog_rx = gem_rx; |
| 1917 | } else { |
| 1918 | bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; |
| 1919 | bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; |
| 1920 | bp->macbgem_ops.mog_init_rings = macb_init_rings; |
| 1921 | bp->macbgem_ops.mog_rx = macb_rx; |
| 1922 | } |
| 1923 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1924 | /* Set MII management clock divider */ |
Jamie Iles | 70c9f3d | 2011-03-09 16:22:54 +0000 | [diff] [blame] | 1925 | config = macb_mdc_clk_div(bp); |
Jamie Iles | 757a03c | 2011-03-09 16:29:59 +0000 | [diff] [blame] | 1926 | config |= macb_dbw(bp); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1927 | macb_writel(bp, NCFGR, config); |
| 1928 | |
Guenter Roeck | 5090704 | 2013-04-02 09:35:09 +0000 | [diff] [blame] | 1929 | mac = of_get_mac_address(pdev->dev.of_node); |
| 1930 | if (mac) |
| 1931 | memcpy(bp->dev->dev_addr, mac, ETH_ALEN); |
| 1932 | else |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1933 | macb_get_hwaddr(bp); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1934 | |
Guenter Roeck | 5090704 | 2013-04-02 09:35:09 +0000 | [diff] [blame] | 1935 | err = of_get_phy_mode(pdev->dev.of_node); |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1936 | if (err < 0) { |
Jingoo Han | c607a0d | 2013-08-30 14:12:21 +0900 | [diff] [blame] | 1937 | pdata = dev_get_platdata(&pdev->dev); |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 1938 | if (pdata && pdata->is_rmii) |
| 1939 | bp->phy_interface = PHY_INTERFACE_MODE_RMII; |
| 1940 | else |
| 1941 | bp->phy_interface = PHY_INTERFACE_MODE_MII; |
| 1942 | } else { |
| 1943 | bp->phy_interface = err; |
| 1944 | } |
| 1945 | |
Patrice Vilchez | 140b755 | 2012-10-31 06:04:50 +0000 | [diff] [blame] | 1946 | if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) |
| 1947 | macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII)); |
| 1948 | else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1949 | #if defined(CONFIG_ARCH_AT91) |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1950 | macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | |
| 1951 | MACB_BIT(CLKEN))); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1952 | #else |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1953 | macb_or_gem_writel(bp, USRIO, 0); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1954 | #endif |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1955 | else |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1956 | #if defined(CONFIG_ARCH_AT91) |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1957 | macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1958 | #else |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1959 | macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); |
Andrew Victor | 0cc8674 | 2007-02-07 16:40:44 +0100 | [diff] [blame] | 1960 | #endif |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1961 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1962 | err = register_netdev(dev); |
| 1963 | if (err) { |
| 1964 | dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); |
Soren Brinkmann | 0a4acf0 | 2013-12-10 16:07:22 -0800 | [diff] [blame] | 1965 | goto err_out_disable_clocks; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1966 | } |
| 1967 | |
Nicolas Ferre | 72ca820 | 2013-04-14 22:04:33 +0000 | [diff] [blame] | 1968 | err = macb_mii_init(bp); |
| 1969 | if (err) |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1970 | goto err_out_unregister_netdev; |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1971 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1972 | platform_set_drvdata(pdev, dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1973 | |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 1974 | netif_carrier_off(dev); |
| 1975 | |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 1976 | netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", |
| 1977 | macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, |
| 1978 | dev->irq, dev->dev_addr); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1979 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1980 | phydev = bp->phy_dev; |
Jamie Iles | c220f8c | 2011-03-08 20:27:08 +0000 | [diff] [blame] | 1981 | netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", |
| 1982 | phydev->drv->name, dev_name(&phydev->dev), phydev->irq); |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1983 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1984 | return 0; |
| 1985 | |
frederic RODO | 6c36a70 | 2007-07-12 19:07:24 +0200 | [diff] [blame] | 1986 | err_out_unregister_netdev: |
| 1987 | unregister_netdev(dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1988 | err_out_disable_clocks: |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 1989 | if (!IS_ERR(bp->tx_clk)) |
| 1990 | clk_disable_unprepare(bp->tx_clk); |
| 1991 | err_out_disable_hclk: |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1992 | clk_disable_unprepare(bp->hclk); |
Soren Brinkmann | b48e0ba | 2013-12-10 16:07:20 -0800 | [diff] [blame] | 1993 | err_out_disable_pclk: |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 1994 | clk_disable_unprepare(bp->pclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1995 | err_out_free_dev: |
| 1996 | free_netdev(dev); |
| 1997 | err_out: |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 1998 | return err; |
| 1999 | } |
| 2000 | |
Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 2001 | static int __exit macb_remove(struct platform_device *pdev) |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2002 | { |
| 2003 | struct net_device *dev; |
| 2004 | struct macb *bp; |
| 2005 | |
| 2006 | dev = platform_get_drvdata(pdev); |
| 2007 | |
| 2008 | if (dev) { |
| 2009 | bp = netdev_priv(dev); |
Atsushi Nemoto | 84b7901 | 2008-04-10 23:30:07 +0900 | [diff] [blame] | 2010 | if (bp->phy_dev) |
| 2011 | phy_disconnect(bp->phy_dev); |
Lennert Buytenhek | 298cf9b | 2008-10-08 16:29:57 -0700 | [diff] [blame] | 2012 | mdiobus_unregister(bp->mii_bus); |
| 2013 | kfree(bp->mii_bus->irq); |
| 2014 | mdiobus_free(bp->mii_bus); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2015 | unregister_netdev(dev); |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 2016 | if (!IS_ERR(bp->tx_clk)) |
| 2017 | clk_disable_unprepare(bp->tx_clk); |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 2018 | clk_disable_unprepare(bp->hclk); |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 2019 | clk_disable_unprepare(bp->pclk); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2020 | free_netdev(dev); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2021 | } |
| 2022 | |
| 2023 | return 0; |
| 2024 | } |
| 2025 | |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2026 | #ifdef CONFIG_PM |
Soren Brinkmann | 0dfc3e1 | 2013-12-10 16:07:19 -0800 | [diff] [blame] | 2027 | static int macb_suspend(struct device *dev) |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2028 | { |
Soren Brinkmann | 0dfc3e1 | 2013-12-10 16:07:19 -0800 | [diff] [blame] | 2029 | struct platform_device *pdev = to_platform_device(dev); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2030 | struct net_device *netdev = platform_get_drvdata(pdev); |
| 2031 | struct macb *bp = netdev_priv(netdev); |
| 2032 | |
Nicolas Ferre | 03fc472 | 2012-07-03 23:14:13 +0000 | [diff] [blame] | 2033 | netif_carrier_off(netdev); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2034 | netif_device_detach(netdev); |
| 2035 | |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 2036 | if (!IS_ERR(bp->tx_clk)) |
| 2037 | clk_disable_unprepare(bp->tx_clk); |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 2038 | clk_disable_unprepare(bp->hclk); |
| 2039 | clk_disable_unprepare(bp->pclk); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2040 | |
| 2041 | return 0; |
| 2042 | } |
| 2043 | |
Soren Brinkmann | 0dfc3e1 | 2013-12-10 16:07:19 -0800 | [diff] [blame] | 2044 | static int macb_resume(struct device *dev) |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2045 | { |
Soren Brinkmann | 0dfc3e1 | 2013-12-10 16:07:19 -0800 | [diff] [blame] | 2046 | struct platform_device *pdev = to_platform_device(dev); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2047 | struct net_device *netdev = platform_get_drvdata(pdev); |
| 2048 | struct macb *bp = netdev_priv(netdev); |
| 2049 | |
Steffen Trumtrar | ace5801 | 2013-03-27 23:07:07 +0000 | [diff] [blame] | 2050 | clk_prepare_enable(bp->pclk); |
| 2051 | clk_prepare_enable(bp->hclk); |
Soren Brinkmann | e1824df | 2013-12-10 16:07:23 -0800 | [diff] [blame] | 2052 | if (!IS_ERR(bp->tx_clk)) |
| 2053 | clk_prepare_enable(bp->tx_clk); |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2054 | |
| 2055 | netif_device_attach(netdev); |
| 2056 | |
| 2057 | return 0; |
| 2058 | } |
Haavard Skinnemoen | c1f598f | 2008-03-04 13:39:29 +0100 | [diff] [blame] | 2059 | #endif |
| 2060 | |
Soren Brinkmann | 0dfc3e1 | 2013-12-10 16:07:19 -0800 | [diff] [blame] | 2061 | static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume); |
| 2062 | |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2063 | static struct platform_driver macb_driver = { |
Haavard Skinnemoen | 06c3fd6 | 2008-01-31 13:10:22 +0100 | [diff] [blame] | 2064 | .remove = __exit_p(macb_remove), |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2065 | .driver = { |
| 2066 | .name = "macb", |
Kay Sievers | 72abb46 | 2008-04-18 13:50:44 -0700 | [diff] [blame] | 2067 | .owner = THIS_MODULE, |
Jean-Christophe PLAGNIOL-VILLARD | fb97a84 | 2011-11-18 15:29:25 +0100 | [diff] [blame] | 2068 | .of_match_table = of_match_ptr(macb_dt_ids), |
Soren Brinkmann | 0dfc3e1 | 2013-12-10 16:07:19 -0800 | [diff] [blame] | 2069 | .pm = &macb_pm_ops, |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2070 | }, |
| 2071 | }; |
| 2072 | |
Jingoo Han | b543a8d | 2013-03-04 16:43:18 +0000 | [diff] [blame] | 2073 | module_platform_driver_probe(macb_driver, macb_probe); |
Haavard Skinnemoen | 89e5785 | 2006-11-09 14:51:17 +0100 | [diff] [blame] | 2074 | |
| 2075 | MODULE_LICENSE("GPL"); |
Jamie Iles | f75ba50 | 2011-11-08 10:12:32 +0000 | [diff] [blame] | 2076 | MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); |
Jean Delvare | e05503e | 2011-05-18 16:49:24 +0200 | [diff] [blame] | 2077 | MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); |
Kay Sievers | 72abb46 | 2008-04-18 13:50:44 -0700 | [diff] [blame] | 2078 | MODULE_ALIAS("platform:macb"); |