blob: 3d3ead4a0f6f976a4e13a650b22ba1e5e3b2d6ab [file] [log] [blame]
Matteo Croced95b39c2007-10-14 18:10:13 +02001/*
2 * Copyright (C) 2006, 2007 Eugene Konev
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include <linux/module.h>
Alexey Dobriyan539d3ee2011-06-10 03:36:43 +000020#include <linux/interrupt.h>
Matteo Croced95b39c2007-10-14 18:10:13 +020021#include <linux/moduleparam.h>
22
23#include <linux/sched.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/errno.h>
27#include <linux/types.h>
28#include <linux/delay.h>
Matteo Croced95b39c2007-10-14 18:10:13 +020029
30#include <linux/netdevice.h>
Florian Fainelli30765d02010-03-07 00:55:26 +000031#include <linux/if_vlan.h>
Matteo Croced95b39c2007-10-14 18:10:13 +020032#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/mii.h>
36#include <linux/phy.h>
Eugene Konevb88219f2007-10-24 10:42:03 +080037#include <linux/phy_fixed.h>
Matteo Croced95b39c2007-10-14 18:10:13 +020038#include <linux/platform_device.h>
39#include <linux/dma-mapping.h>
Florian Fainelli780019d2010-01-27 09:10:06 +010040#include <linux/clk.h>
Florian Fainelli559764d2010-08-08 10:09:39 +000041#include <linux/gpio.h>
Arun Sharma600634972011-07-26 16:09:06 -070042#include <linux/atomic.h>
Matteo Croced95b39c2007-10-14 18:10:13 +020043
44MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
45MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
46MODULE_LICENSE("GPL");
Kay Sievers72abb462008-04-18 13:50:44 -070047MODULE_ALIAS("platform:cpmac");
Matteo Croced95b39c2007-10-14 18:10:13 +020048
49static int debug_level = 8;
50static int dumb_switch;
51
52/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
53module_param(debug_level, int, 0444);
54module_param(dumb_switch, int, 0444);
55
56MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
57MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
58
Florian Fainelli25dc27d2010-03-07 00:55:50 +000059#define CPMAC_VERSION "0.5.2"
Florian Fainelli30765d02010-03-07 00:55:26 +000060/* frame size + 802.1q tag + FCS size */
61#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
Matteo Croced95b39c2007-10-14 18:10:13 +020062#define CPMAC_QUEUES 8
63
64/* Ethernet registers */
65#define CPMAC_TX_CONTROL 0x0004
66#define CPMAC_TX_TEARDOWN 0x0008
67#define CPMAC_RX_CONTROL 0x0014
68#define CPMAC_RX_TEARDOWN 0x0018
69#define CPMAC_MBP 0x0100
Varka Bhadramaf595152014-07-10 11:05:39 +053070#define MBP_RXPASSCRC 0x40000000
71#define MBP_RXQOS 0x20000000
72#define MBP_RXNOCHAIN 0x10000000
73#define MBP_RXCMF 0x01000000
74#define MBP_RXSHORT 0x00800000
75#define MBP_RXCEF 0x00400000
76#define MBP_RXPROMISC 0x00200000
77#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
78#define MBP_RXBCAST 0x00002000
79#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
80#define MBP_RXMCAST 0x00000020
81#define MBP_MCASTCHAN(channel) ((channel) & 0x7)
Matteo Croced95b39c2007-10-14 18:10:13 +020082#define CPMAC_UNICAST_ENABLE 0x0104
83#define CPMAC_UNICAST_CLEAR 0x0108
84#define CPMAC_MAX_LENGTH 0x010c
85#define CPMAC_BUFFER_OFFSET 0x0110
86#define CPMAC_MAC_CONTROL 0x0160
Varka Bhadramaf595152014-07-10 11:05:39 +053087#define MAC_TXPTYPE 0x00000200
88#define MAC_TXPACE 0x00000040
89#define MAC_MII 0x00000020
90#define MAC_TXFLOW 0x00000010
91#define MAC_RXFLOW 0x00000008
92#define MAC_MTEST 0x00000004
93#define MAC_LOOPBACK 0x00000002
94#define MAC_FDX 0x00000001
Matteo Croced95b39c2007-10-14 18:10:13 +020095#define CPMAC_MAC_STATUS 0x0164
Varka Bhadramaf595152014-07-10 11:05:39 +053096#define MAC_STATUS_QOS 0x00000004
97#define MAC_STATUS_RXFLOW 0x00000002
98#define MAC_STATUS_TXFLOW 0x00000001
Matteo Croced95b39c2007-10-14 18:10:13 +020099#define CPMAC_TX_INT_ENABLE 0x0178
100#define CPMAC_TX_INT_CLEAR 0x017c
101#define CPMAC_MAC_INT_VECTOR 0x0180
Varka Bhadramaf595152014-07-10 11:05:39 +0530102#define MAC_INT_STATUS 0x00080000
103#define MAC_INT_HOST 0x00040000
104#define MAC_INT_RX 0x00020000
105#define MAC_INT_TX 0x00010000
Matteo Croced95b39c2007-10-14 18:10:13 +0200106#define CPMAC_MAC_EOI_VECTOR 0x0184
107#define CPMAC_RX_INT_ENABLE 0x0198
108#define CPMAC_RX_INT_CLEAR 0x019c
109#define CPMAC_MAC_INT_ENABLE 0x01a8
110#define CPMAC_MAC_INT_CLEAR 0x01ac
Florian Fainelli559764d2010-08-08 10:09:39 +0000111#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
Matteo Croced95b39c2007-10-14 18:10:13 +0200112#define CPMAC_MAC_ADDR_MID 0x01d0
113#define CPMAC_MAC_ADDR_HI 0x01d4
114#define CPMAC_MAC_HASH_LO 0x01d8
115#define CPMAC_MAC_HASH_HI 0x01dc
116#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
117#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
118#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
119#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
120#define CPMAC_REG_END 0x0680
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530121
122/* Rx/Tx statistics
Matteo Croced95b39c2007-10-14 18:10:13 +0200123 * TODO: use some of them to fill stats in cpmac_stats()
124 */
125#define CPMAC_STATS_RX_GOOD 0x0200
126#define CPMAC_STATS_RX_BCAST 0x0204
127#define CPMAC_STATS_RX_MCAST 0x0208
128#define CPMAC_STATS_RX_PAUSE 0x020c
129#define CPMAC_STATS_RX_CRC 0x0210
130#define CPMAC_STATS_RX_ALIGN 0x0214
131#define CPMAC_STATS_RX_OVER 0x0218
132#define CPMAC_STATS_RX_JABBER 0x021c
133#define CPMAC_STATS_RX_UNDER 0x0220
134#define CPMAC_STATS_RX_FRAG 0x0224
135#define CPMAC_STATS_RX_FILTER 0x0228
136#define CPMAC_STATS_RX_QOSFILTER 0x022c
137#define CPMAC_STATS_RX_OCTETS 0x0230
138
139#define CPMAC_STATS_TX_GOOD 0x0234
140#define CPMAC_STATS_TX_BCAST 0x0238
141#define CPMAC_STATS_TX_MCAST 0x023c
142#define CPMAC_STATS_TX_PAUSE 0x0240
143#define CPMAC_STATS_TX_DEFER 0x0244
144#define CPMAC_STATS_TX_COLLISION 0x0248
145#define CPMAC_STATS_TX_SINGLECOLL 0x024c
146#define CPMAC_STATS_TX_MULTICOLL 0x0250
147#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
148#define CPMAC_STATS_TX_LATECOLL 0x0258
149#define CPMAC_STATS_TX_UNDERRUN 0x025c
150#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
151#define CPMAC_STATS_TX_OCTETS 0x0264
152
153#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
154#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
155 (reg)))
156
157/* MDIO bus */
158#define CPMAC_MDIO_VERSION 0x0000
159#define CPMAC_MDIO_CONTROL 0x0004
Varka Bhadramaf595152014-07-10 11:05:39 +0530160#define MDIOC_IDLE 0x80000000
161#define MDIOC_ENABLE 0x40000000
162#define MDIOC_PREAMBLE 0x00100000
163#define MDIOC_FAULT 0x00080000
164#define MDIOC_FAULTDETECT 0x00040000
165#define MDIOC_INTTEST 0x00020000
166#define MDIOC_CLKDIV(div) ((div) & 0xff)
Matteo Croced95b39c2007-10-14 18:10:13 +0200167#define CPMAC_MDIO_ALIVE 0x0008
168#define CPMAC_MDIO_LINK 0x000c
169#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
Varka Bhadramaf595152014-07-10 11:05:39 +0530170#define MDIO_BUSY 0x80000000
171#define MDIO_WRITE 0x40000000
172#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
173#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
174#define MDIO_DATA(data) ((data) & 0xffff)
Matteo Croced95b39c2007-10-14 18:10:13 +0200175#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
Varka Bhadramaf595152014-07-10 11:05:39 +0530176#define PHYSEL_LINKSEL 0x00000040
177#define PHYSEL_LINKINT 0x00000020
Matteo Croced95b39c2007-10-14 18:10:13 +0200178
179struct cpmac_desc {
180 u32 hw_next;
181 u32 hw_data;
182 u16 buflen;
183 u16 bufflags;
184 u16 datalen;
185 u16 dataflags;
186#define CPMAC_SOP 0x8000
187#define CPMAC_EOP 0x4000
188#define CPMAC_OWN 0x2000
189#define CPMAC_EOQ 0x1000
190 struct sk_buff *skb;
191 struct cpmac_desc *next;
Matteo Crocef917d582008-05-14 00:58:32 +0200192 struct cpmac_desc *prev;
Matteo Croced95b39c2007-10-14 18:10:13 +0200193 dma_addr_t mapping;
194 dma_addr_t data_mapping;
195};
196
197struct cpmac_priv {
198 spinlock_t lock;
199 spinlock_t rx_lock;
200 struct cpmac_desc *rx_head;
201 int ring_size;
202 struct cpmac_desc *desc_ring;
203 dma_addr_t dma_ring;
204 void __iomem *regs;
205 struct mii_bus *mii_bus;
206 struct phy_device *phy;
David S. Miller21a8cfe2009-05-26 21:10:22 -0700207 char phy_name[MII_BUS_ID_SIZE + 3];
Matteo Croced95b39c2007-10-14 18:10:13 +0200208 int oldlink, oldspeed, oldduplex;
209 u32 msg_enable;
210 struct net_device *dev;
211 struct work_struct reset_work;
212 struct platform_device *pdev;
Eugene Konev67d129d2007-10-24 10:42:02 +0800213 struct napi_struct napi;
Matteo Crocef917d582008-05-14 00:58:32 +0200214 atomic_t reset_pending;
Matteo Croced95b39c2007-10-14 18:10:13 +0200215};
216
217static irqreturn_t cpmac_irq(int, void *);
218static void cpmac_hw_start(struct net_device *dev);
219static void cpmac_hw_stop(struct net_device *dev);
220static int cpmac_stop(struct net_device *dev);
221static int cpmac_open(struct net_device *dev);
222
223static void cpmac_dump_regs(struct net_device *dev)
224{
225 int i;
226 struct cpmac_priv *priv = netdev_priv(dev);
Varka Bhadram59329d82014-07-10 11:05:43 +0530227
Matteo Croced95b39c2007-10-14 18:10:13 +0200228 for (i = 0; i < CPMAC_REG_END; i += 4) {
229 if (i % 16 == 0) {
230 if (i)
Florian Fainelli559764d2010-08-08 10:09:39 +0000231 pr_cont("\n");
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530232 netdev_dbg(dev, "reg[%p]:", priv->regs + i);
Matteo Croced95b39c2007-10-14 18:10:13 +0200233 }
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530234 pr_debug(" %08x", cpmac_read(priv->regs, i));
Matteo Croced95b39c2007-10-14 18:10:13 +0200235 }
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530236 pr_debug("\n");
Matteo Croced95b39c2007-10-14 18:10:13 +0200237}
238
239static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
240{
241 int i;
Varka Bhadram59329d82014-07-10 11:05:43 +0530242
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530243 netdev_dbg(dev, "desc[%p]:", desc);
Matteo Croced95b39c2007-10-14 18:10:13 +0200244 for (i = 0; i < sizeof(*desc) / 4; i++)
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530245 pr_debug(" %08x", ((u32 *)desc)[i]);
246 pr_debug("\n");
Matteo Croced95b39c2007-10-14 18:10:13 +0200247}
248
Matteo Crocef917d582008-05-14 00:58:32 +0200249static void cpmac_dump_all_desc(struct net_device *dev)
250{
251 struct cpmac_priv *priv = netdev_priv(dev);
252 struct cpmac_desc *dump = priv->rx_head;
Varka Bhadram59329d82014-07-10 11:05:43 +0530253
Matteo Crocef917d582008-05-14 00:58:32 +0200254 do {
255 cpmac_dump_desc(dev, dump);
256 dump = dump->next;
257 } while (dump != priv->rx_head);
258}
259
Matteo Croced95b39c2007-10-14 18:10:13 +0200260static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
261{
262 int i;
Varka Bhadram59329d82014-07-10 11:05:43 +0530263
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530264 netdev_dbg(dev, "skb 0x%p, len=%d\n", skb, skb->len);
Matteo Croced95b39c2007-10-14 18:10:13 +0200265 for (i = 0; i < skb->len; i++) {
266 if (i % 16 == 0) {
267 if (i)
Florian Fainelli559764d2010-08-08 10:09:39 +0000268 pr_cont("\n");
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530269 netdev_dbg(dev, "data[%p]:", skb->data + i);
Matteo Croced95b39c2007-10-14 18:10:13 +0200270 }
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530271 pr_debug(" %02x", ((u8 *)skb->data)[i]);
Matteo Croced95b39c2007-10-14 18:10:13 +0200272 }
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530273 pr_debug("\n");
Matteo Croced95b39c2007-10-14 18:10:13 +0200274}
275
276static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
277{
278 u32 val;
279
280 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
281 cpu_relax();
282 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
283 MDIO_PHY(phy_id));
284 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
285 cpu_relax();
286 return MDIO_DATA(val);
287}
288
289static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
290 int reg, u16 val)
291{
292 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
293 cpu_relax();
294 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
295 MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
296 return 0;
297}
298
299static int cpmac_mdio_reset(struct mii_bus *bus)
300{
Florian Fainelli780019d2010-01-27 09:10:06 +0100301 struct clk *cpmac_clk;
302
303 cpmac_clk = clk_get(&bus->dev, "cpmac");
304 if (IS_ERR(cpmac_clk)) {
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530305 pr_err("unable to get cpmac clock\n");
Florian Fainelli780019d2010-01-27 09:10:06 +0100306 return -1;
307 }
Matteo Croced95b39c2007-10-14 18:10:13 +0200308 ar7_device_reset(AR7_RESET_BIT_MDIO);
309 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
Florian Fainelli780019d2010-01-27 09:10:06 +0100310 MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
Matteo Croced95b39c2007-10-14 18:10:13 +0200311 return 0;
312}
313
314static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
315
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700316static struct mii_bus *cpmac_mii;
Matteo Croced95b39c2007-10-14 18:10:13 +0200317
Matteo Croced95b39c2007-10-14 18:10:13 +0200318static void cpmac_set_multicast_list(struct net_device *dev)
319{
Jiri Pirko22bedad32010-04-01 21:22:57 +0000320 struct netdev_hw_addr *ha;
Matteo Croced95b39c2007-10-14 18:10:13 +0200321 u8 tmp;
322 u32 mbp, bit, hash[2] = { 0, };
323 struct cpmac_priv *priv = netdev_priv(dev);
324
325 mbp = cpmac_read(priv->regs, CPMAC_MBP);
326 if (dev->flags & IFF_PROMISC) {
327 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
328 MBP_RXPROMISC);
329 } else {
330 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
331 if (dev->flags & IFF_ALLMULTI) {
332 /* enable all multicast mode */
333 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
334 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
335 } else {
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530336 /* cpmac uses some strange mac address hashing
Matteo Croced95b39c2007-10-14 18:10:13 +0200337 * (not crc32)
338 */
Jiri Pirko22bedad32010-04-01 21:22:57 +0000339 netdev_for_each_mc_addr(ha, dev) {
Matteo Croced95b39c2007-10-14 18:10:13 +0200340 bit = 0;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000341 tmp = ha->addr[0];
Matteo Croced95b39c2007-10-14 18:10:13 +0200342 bit ^= (tmp >> 2) ^ (tmp << 4);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000343 tmp = ha->addr[1];
Matteo Croced95b39c2007-10-14 18:10:13 +0200344 bit ^= (tmp >> 4) ^ (tmp << 2);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000345 tmp = ha->addr[2];
Matteo Croced95b39c2007-10-14 18:10:13 +0200346 bit ^= (tmp >> 6) ^ tmp;
Jiri Pirko22bedad32010-04-01 21:22:57 +0000347 tmp = ha->addr[3];
Matteo Croced95b39c2007-10-14 18:10:13 +0200348 bit ^= (tmp >> 2) ^ (tmp << 4);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000349 tmp = ha->addr[4];
Matteo Croced95b39c2007-10-14 18:10:13 +0200350 bit ^= (tmp >> 4) ^ (tmp << 2);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000351 tmp = ha->addr[5];
Matteo Croced95b39c2007-10-14 18:10:13 +0200352 bit ^= (tmp >> 6) ^ tmp;
353 bit &= 0x3f;
354 hash[bit / 32] |= 1 << (bit % 32);
355 }
356
357 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
358 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
359 }
360 }
361}
362
Eugene Konev67d129d2007-10-24 10:42:02 +0800363static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
Matteo Croced95b39c2007-10-14 18:10:13 +0200364 struct cpmac_desc *desc)
365{
366 struct sk_buff *skb, *result = NULL;
367
368 if (unlikely(netif_msg_hw(priv)))
Eugene Konev67d129d2007-10-24 10:42:02 +0800369 cpmac_dump_desc(priv->dev, desc);
Matteo Croced95b39c2007-10-14 18:10:13 +0200370 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
371 if (unlikely(!desc->datalen)) {
372 if (netif_msg_rx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530373 netdev_warn(priv->dev, "rx: spurious interrupt\n");
374
Matteo Croced95b39c2007-10-14 18:10:13 +0200375 return NULL;
376 }
377
Eric Dumazet89d71a62009-10-13 05:34:20 +0000378 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
Matteo Croced95b39c2007-10-14 18:10:13 +0200379 if (likely(skb)) {
Matteo Croced95b39c2007-10-14 18:10:13 +0200380 skb_put(desc->skb, desc->datalen);
Eugene Konev67d129d2007-10-24 10:42:02 +0800381 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700382 skb_checksum_none_assert(desc->skb);
Eugene Konev67d129d2007-10-24 10:42:02 +0800383 priv->dev->stats.rx_packets++;
384 priv->dev->stats.rx_bytes += desc->datalen;
Matteo Croced95b39c2007-10-14 18:10:13 +0200385 result = desc->skb;
Eugene Konev67d129d2007-10-24 10:42:02 +0800386 dma_unmap_single(&priv->dev->dev, desc->data_mapping,
387 CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
Matteo Croced95b39c2007-10-14 18:10:13 +0200388 desc->skb = skb;
Eugene Konev67d129d2007-10-24 10:42:02 +0800389 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
Matteo Croced95b39c2007-10-14 18:10:13 +0200390 CPMAC_SKB_SIZE,
391 DMA_FROM_DEVICE);
392 desc->hw_data = (u32)desc->data_mapping;
393 if (unlikely(netif_msg_pktdata(priv))) {
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530394 netdev_dbg(priv->dev, "received packet:\n");
Eugene Konev67d129d2007-10-24 10:42:02 +0800395 cpmac_dump_skb(priv->dev, result);
Matteo Croced95b39c2007-10-14 18:10:13 +0200396 }
397 } else {
398 if (netif_msg_rx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530399 netdev_warn(priv->dev,
400 "low on skbs, dropping packet\n");
401
Eugene Konev67d129d2007-10-24 10:42:02 +0800402 priv->dev->stats.rx_dropped++;
Matteo Croced95b39c2007-10-14 18:10:13 +0200403 }
404
405 desc->buflen = CPMAC_SKB_SIZE;
406 desc->dataflags = CPMAC_OWN;
407
408 return result;
409}
410
Eugene Konev67d129d2007-10-24 10:42:02 +0800411static int cpmac_poll(struct napi_struct *napi, int budget)
Matteo Croced95b39c2007-10-14 18:10:13 +0200412{
413 struct sk_buff *skb;
Matteo Crocef917d582008-05-14 00:58:32 +0200414 struct cpmac_desc *desc, *restart;
Eugene Konev67d129d2007-10-24 10:42:02 +0800415 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
Matteo Crocef917d582008-05-14 00:58:32 +0200416 int received = 0, processed = 0;
Matteo Croced95b39c2007-10-14 18:10:13 +0200417
418 spin_lock(&priv->rx_lock);
419 if (unlikely(!priv->rx_head)) {
420 if (netif_msg_rx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530421 netdev_warn(priv->dev, "rx: polling, but no queue\n");
422
Matteo Crocef917d582008-05-14 00:58:32 +0200423 spin_unlock(&priv->rx_lock);
Ben Hutchings288379f2009-01-19 16:43:59 -0800424 napi_complete(napi);
Matteo Croced95b39c2007-10-14 18:10:13 +0200425 return 0;
426 }
427
428 desc = priv->rx_head;
Matteo Crocef917d582008-05-14 00:58:32 +0200429 restart = NULL;
Eugene Konev67d129d2007-10-24 10:42:02 +0800430 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
Matteo Crocef917d582008-05-14 00:58:32 +0200431 processed++;
432
433 if ((desc->dataflags & CPMAC_EOQ) != 0) {
434 /* The last update to eoq->hw_next didn't happen
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530435 * soon enough, and the receiver stopped here.
436 * Remember this descriptor so we can restart
437 * the receiver after freeing some space.
438 */
Matteo Crocef917d582008-05-14 00:58:32 +0200439 if (unlikely(restart)) {
440 if (netif_msg_rx_err(priv))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530441 netdev_err(priv->dev, "poll found a"
442 " duplicate EOQ: %p and %p\n",
443 restart, desc);
Matteo Crocef917d582008-05-14 00:58:32 +0200444 goto fatal_error;
445 }
446
447 restart = desc->next;
448 }
449
Eugene Konev67d129d2007-10-24 10:42:02 +0800450 skb = cpmac_rx_one(priv, desc);
Matteo Croced95b39c2007-10-14 18:10:13 +0200451 if (likely(skb)) {
452 netif_receive_skb(skb);
453 received++;
454 }
455 desc = desc->next;
456 }
457
Matteo Crocef917d582008-05-14 00:58:32 +0200458 if (desc != priv->rx_head) {
459 /* We freed some buffers, but not the whole ring,
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530460 * add what we did free to the rx list
461 */
Matteo Crocef917d582008-05-14 00:58:32 +0200462 desc->prev->hw_next = (u32)0;
463 priv->rx_head->prev->hw_next = priv->rx_head->mapping;
464 }
465
466 /* Optimization: If we did not actually process an EOQ (perhaps because
467 * of quota limits), check to see if the tail of the queue has EOQ set.
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530468 * We should immediately restart in that case so that the receiver can
469 * restart and run in parallel with more packet processing.
470 * This lets us handle slightly larger bursts before running
471 * out of ring space (assuming dev->weight < ring_size)
472 */
Matteo Crocef917d582008-05-14 00:58:32 +0200473
474 if (!restart &&
475 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
476 == CPMAC_EOQ &&
477 (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
478 /* reset EOQ so the poll loop (above) doesn't try to
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530479 * restart this when it eventually gets to this descriptor.
480 */
Matteo Crocef917d582008-05-14 00:58:32 +0200481 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
482 restart = priv->rx_head;
483 }
484
485 if (restart) {
486 priv->dev->stats.rx_errors++;
487 priv->dev->stats.rx_fifo_errors++;
488 if (netif_msg_rx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530489 netdev_warn(priv->dev, "rx dma ring overrun\n");
Matteo Crocef917d582008-05-14 00:58:32 +0200490
491 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
492 if (netif_msg_drv(priv))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530493 netdev_err(priv->dev, "cpmac_poll is trying "
494 "to restart rx from a descriptor "
495 "that's not free: %p\n", restart);
Julia Lawall9e1634a2010-08-05 10:28:31 +0000496 goto fatal_error;
Matteo Crocef917d582008-05-14 00:58:32 +0200497 }
498
499 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
500 }
501
Matteo Croced95b39c2007-10-14 18:10:13 +0200502 priv->rx_head = desc;
503 spin_unlock(&priv->rx_lock);
Matteo Croced95b39c2007-10-14 18:10:13 +0200504 if (unlikely(netif_msg_rx_status(priv)))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530505 netdev_dbg(priv->dev, "poll processed %d packets\n", received);
506
Matteo Crocef917d582008-05-14 00:58:32 +0200507 if (processed == 0) {
508 /* we ran out of packets to read,
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530509 * revert to interrupt-driven mode
510 */
Ben Hutchings288379f2009-01-19 16:43:59 -0800511 napi_complete(napi);
Matteo Croced95b39c2007-10-14 18:10:13 +0200512 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
513 return 0;
514 }
515
516 return 1;
Matteo Crocef917d582008-05-14 00:58:32 +0200517
518fatal_error:
519 /* Something went horribly wrong.
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530520 * Reset hardware to try to recover rather than wedging.
521 */
Matteo Crocef917d582008-05-14 00:58:32 +0200522 if (netif_msg_drv(priv)) {
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530523 netdev_err(priv->dev, "cpmac_poll is confused. "
524 "Resetting hardware\n");
Matteo Crocef917d582008-05-14 00:58:32 +0200525 cpmac_dump_all_desc(priv->dev);
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530526 netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
527 cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
528 cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
Matteo Crocef917d582008-05-14 00:58:32 +0200529 }
530
531 spin_unlock(&priv->rx_lock);
Ben Hutchings288379f2009-01-19 16:43:59 -0800532 napi_complete(napi);
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700533 netif_tx_stop_all_queues(priv->dev);
Matteo Crocef917d582008-05-14 00:58:32 +0200534 napi_disable(&priv->napi);
535
536 atomic_inc(&priv->reset_pending);
537 cpmac_hw_stop(priv->dev);
538 if (!schedule_work(&priv->reset_work))
539 atomic_dec(&priv->reset_pending);
540 return 0;
541
Matteo Croced95b39c2007-10-14 18:10:13 +0200542}
543
544static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
545{
546 int queue, len;
547 struct cpmac_desc *desc;
548 struct cpmac_priv *priv = netdev_priv(dev);
549
Matteo Crocef917d582008-05-14 00:58:32 +0200550 if (unlikely(atomic_read(&priv->reset_pending)))
551 return NETDEV_TX_BUSY;
552
Matteo Croce6cd043d2007-10-23 19:12:22 +0200553 if (unlikely(skb_padto(skb, ETH_ZLEN)))
554 return NETDEV_TX_OK;
Matteo Croced95b39c2007-10-14 18:10:13 +0200555
556 len = max(skb->len, ETH_ZLEN);
Matteo Croceba596a02008-01-12 19:05:23 +0100557 queue = skb_get_queue_mapping(skb);
Matteo Croced95b39c2007-10-14 18:10:13 +0200558 netif_stop_subqueue(dev, queue);
Matteo Croced95b39c2007-10-14 18:10:13 +0200559
560 desc = &priv->desc_ring[queue];
561 if (unlikely(desc->dataflags & CPMAC_OWN)) {
562 if (netif_msg_tx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530563 netdev_warn(dev, "tx dma ring full\n");
564
Matteo Croce6cd043d2007-10-23 19:12:22 +0200565 return NETDEV_TX_BUSY;
Matteo Croced95b39c2007-10-14 18:10:13 +0200566 }
567
568 spin_lock(&priv->lock);
Matteo Croced95b39c2007-10-14 18:10:13 +0200569 spin_unlock(&priv->lock);
570 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
571 desc->skb = skb;
572 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
573 DMA_TO_DEVICE);
574 desc->hw_data = (u32)desc->data_mapping;
575 desc->datalen = len;
576 desc->buflen = len;
577 if (unlikely(netif_msg_tx_queued(priv)))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530578 netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
Matteo Croced95b39c2007-10-14 18:10:13 +0200579 if (unlikely(netif_msg_hw(priv)))
580 cpmac_dump_desc(dev, desc);
581 if (unlikely(netif_msg_pktdata(priv)))
582 cpmac_dump_skb(dev, skb);
583 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
584
Matteo Croce6cd043d2007-10-23 19:12:22 +0200585 return NETDEV_TX_OK;
Matteo Croced95b39c2007-10-14 18:10:13 +0200586}
587
588static void cpmac_end_xmit(struct net_device *dev, int queue)
589{
590 struct cpmac_desc *desc;
591 struct cpmac_priv *priv = netdev_priv(dev);
592
593 desc = &priv->desc_ring[queue];
594 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
595 if (likely(desc->skb)) {
596 spin_lock(&priv->lock);
597 dev->stats.tx_packets++;
598 dev->stats.tx_bytes += desc->skb->len;
599 spin_unlock(&priv->lock);
600 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
601 DMA_TO_DEVICE);
602
603 if (unlikely(netif_msg_tx_done(priv)))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530604 netdev_dbg(dev, "sent 0x%p, len=%d\n",
605 desc->skb, desc->skb->len);
Matteo Croced95b39c2007-10-14 18:10:13 +0200606
607 dev_kfree_skb_irq(desc->skb);
608 desc->skb = NULL;
Stefan Weil0220ff72009-05-31 10:59:15 +0000609 if (__netif_subqueue_stopped(dev, queue))
Matteo Croced95b39c2007-10-14 18:10:13 +0200610 netif_wake_subqueue(dev, queue);
Matteo Croced95b39c2007-10-14 18:10:13 +0200611 } else {
612 if (netif_msg_tx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530613 netdev_warn(dev, "end_xmit: spurious interrupt\n");
Stefan Weil0220ff72009-05-31 10:59:15 +0000614 if (__netif_subqueue_stopped(dev, queue))
Matteo Croced95b39c2007-10-14 18:10:13 +0200615 netif_wake_subqueue(dev, queue);
Matteo Croced95b39c2007-10-14 18:10:13 +0200616 }
617}
618
619static void cpmac_hw_stop(struct net_device *dev)
620{
621 int i;
622 struct cpmac_priv *priv = netdev_priv(dev);
Jingoo Hana0ea2ac2013-08-30 14:05:02 +0900623 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
Matteo Croced95b39c2007-10-14 18:10:13 +0200624
625 ar7_device_reset(pdata->reset_bit);
626 cpmac_write(priv->regs, CPMAC_RX_CONTROL,
627 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
628 cpmac_write(priv->regs, CPMAC_TX_CONTROL,
629 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
630 for (i = 0; i < 8; i++) {
631 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
632 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
633 }
634 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
635 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
636 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
637 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
638 cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
639 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
640}
641
642static void cpmac_hw_start(struct net_device *dev)
643{
644 int i;
645 struct cpmac_priv *priv = netdev_priv(dev);
Jingoo Hana0ea2ac2013-08-30 14:05:02 +0900646 struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
Matteo Croced95b39c2007-10-14 18:10:13 +0200647
648 ar7_device_reset(pdata->reset_bit);
649 for (i = 0; i < 8; i++) {
650 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
651 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
652 }
653 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
654
655 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
656 MBP_RXMCAST);
657 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
658 for (i = 0; i < 8; i++)
659 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
660 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
661 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
662 (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
663 (dev->dev_addr[3] << 24));
664 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
665 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
666 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
667 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
668 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
669 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
670 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
671 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
672 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
673
674 cpmac_write(priv->regs, CPMAC_RX_CONTROL,
675 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
676 cpmac_write(priv->regs, CPMAC_TX_CONTROL,
677 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
678 cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
679 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
680 MAC_FDX);
681}
682
683static void cpmac_clear_rx(struct net_device *dev)
684{
685 struct cpmac_priv *priv = netdev_priv(dev);
686 struct cpmac_desc *desc;
687 int i;
Varka Bhadram59329d82014-07-10 11:05:43 +0530688
Matteo Croced95b39c2007-10-14 18:10:13 +0200689 if (unlikely(!priv->rx_head))
690 return;
691 desc = priv->rx_head;
692 for (i = 0; i < priv->ring_size; i++) {
693 if ((desc->dataflags & CPMAC_OWN) == 0) {
694 if (netif_msg_rx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530695 netdev_warn(dev, "packet dropped\n");
Matteo Croced95b39c2007-10-14 18:10:13 +0200696 if (unlikely(netif_msg_hw(priv)))
697 cpmac_dump_desc(dev, desc);
698 desc->dataflags = CPMAC_OWN;
699 dev->stats.rx_dropped++;
700 }
Matteo Crocef917d582008-05-14 00:58:32 +0200701 desc->hw_next = desc->next->mapping;
Matteo Croced95b39c2007-10-14 18:10:13 +0200702 desc = desc->next;
703 }
Matteo Crocef917d582008-05-14 00:58:32 +0200704 priv->rx_head->prev->hw_next = 0;
Matteo Croced95b39c2007-10-14 18:10:13 +0200705}
706
707static void cpmac_clear_tx(struct net_device *dev)
708{
709 struct cpmac_priv *priv = netdev_priv(dev);
710 int i;
Varka Bhadram59329d82014-07-10 11:05:43 +0530711
Matteo Croced95b39c2007-10-14 18:10:13 +0200712 if (unlikely(!priv->desc_ring))
713 return;
Matteo Croce6cd043d2007-10-23 19:12:22 +0200714 for (i = 0; i < CPMAC_QUEUES; i++) {
715 priv->desc_ring[i].dataflags = 0;
Matteo Croced95b39c2007-10-14 18:10:13 +0200716 if (priv->desc_ring[i].skb) {
717 dev_kfree_skb_any(priv->desc_ring[i].skb);
Matteo Crocef917d582008-05-14 00:58:32 +0200718 priv->desc_ring[i].skb = NULL;
Matteo Croced95b39c2007-10-14 18:10:13 +0200719 }
Matteo Croce6cd043d2007-10-23 19:12:22 +0200720 }
Matteo Croced95b39c2007-10-14 18:10:13 +0200721}
722
723static void cpmac_hw_error(struct work_struct *work)
724{
725 struct cpmac_priv *priv =
726 container_of(work, struct cpmac_priv, reset_work);
727
728 spin_lock(&priv->rx_lock);
729 cpmac_clear_rx(priv->dev);
730 spin_unlock(&priv->rx_lock);
731 cpmac_clear_tx(priv->dev);
732 cpmac_hw_start(priv->dev);
Matteo Crocef917d582008-05-14 00:58:32 +0200733 barrier();
734 atomic_dec(&priv->reset_pending);
735
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700736 netif_tx_wake_all_queues(priv->dev);
Matteo Crocef917d582008-05-14 00:58:32 +0200737 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
738}
739
740static void cpmac_check_status(struct net_device *dev)
741{
742 struct cpmac_priv *priv = netdev_priv(dev);
743
744 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
745 int rx_channel = (macstatus >> 8) & 7;
746 int rx_code = (macstatus >> 12) & 15;
747 int tx_channel = (macstatus >> 16) & 7;
748 int tx_code = (macstatus >> 20) & 15;
749
750 if (rx_code || tx_code) {
751 if (netif_msg_drv(priv) && net_ratelimit()) {
752 /* Can't find any documentation on what these
Varka Bhadram8bcd5c62014-07-10 11:05:40 +0530753 * error codes actually are. So just log them and hope..
Matteo Crocef917d582008-05-14 00:58:32 +0200754 */
755 if (rx_code)
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530756 netdev_warn(dev, "host error %d on rx "
757 "channel %d (macstatus %08x), resetting\n",
758 rx_code, rx_channel, macstatus);
Matteo Crocef917d582008-05-14 00:58:32 +0200759 if (tx_code)
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530760 netdev_warn(dev, "host error %d on tx "
761 "channel %d (macstatus %08x), resetting\n",
762 tx_code, tx_channel, macstatus);
Matteo Crocef917d582008-05-14 00:58:32 +0200763 }
764
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700765 netif_tx_stop_all_queues(dev);
Matteo Crocef917d582008-05-14 00:58:32 +0200766 cpmac_hw_stop(dev);
767 if (schedule_work(&priv->reset_work))
768 atomic_inc(&priv->reset_pending);
769 if (unlikely(netif_msg_hw(priv)))
770 cpmac_dump_regs(dev);
771 }
772 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
Matteo Croced95b39c2007-10-14 18:10:13 +0200773}
774
775static irqreturn_t cpmac_irq(int irq, void *dev_id)
776{
777 struct net_device *dev = dev_id;
778 struct cpmac_priv *priv;
779 int queue;
780 u32 status;
781
Matteo Croced95b39c2007-10-14 18:10:13 +0200782 priv = netdev_priv(dev);
783
784 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
785
786 if (unlikely(netif_msg_intr(priv)))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530787 netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
Matteo Croced95b39c2007-10-14 18:10:13 +0200788
789 if (status & MAC_INT_TX)
790 cpmac_end_xmit(dev, (status & 7));
791
792 if (status & MAC_INT_RX) {
793 queue = (status >> 8) & 7;
Ben Hutchings288379f2009-01-19 16:43:59 -0800794 if (napi_schedule_prep(&priv->napi)) {
Eugene Konev67d129d2007-10-24 10:42:02 +0800795 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
Ben Hutchings288379f2009-01-19 16:43:59 -0800796 __napi_schedule(&priv->napi);
Eugene Konev67d129d2007-10-24 10:42:02 +0800797 }
Matteo Croced95b39c2007-10-14 18:10:13 +0200798 }
799
800 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
801
Matteo Crocef917d582008-05-14 00:58:32 +0200802 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
803 cpmac_check_status(dev);
Matteo Croced95b39c2007-10-14 18:10:13 +0200804
805 return IRQ_HANDLED;
806}
807
808static void cpmac_tx_timeout(struct net_device *dev)
809{
Matteo Crocef917d582008-05-14 00:58:32 +0200810 struct cpmac_priv *priv = netdev_priv(dev);
Matteo Croced95b39c2007-10-14 18:10:13 +0200811
812 spin_lock(&priv->lock);
813 dev->stats.tx_errors++;
814 spin_unlock(&priv->lock);
815 if (netif_msg_tx_err(priv) && net_ratelimit())
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530816 netdev_warn(dev, "transmit timeout\n");
Matteo Crocef917d582008-05-14 00:58:32 +0200817
818 atomic_inc(&priv->reset_pending);
819 barrier();
820 cpmac_clear_tx(dev);
821 barrier();
822 atomic_dec(&priv->reset_pending);
823
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700824 netif_tx_wake_all_queues(priv->dev);
Matteo Croced95b39c2007-10-14 18:10:13 +0200825}
826
827static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
828{
829 struct cpmac_priv *priv = netdev_priv(dev);
Varka Bhadram59329d82014-07-10 11:05:43 +0530830
Matteo Croced95b39c2007-10-14 18:10:13 +0200831 if (!(netif_running(dev)))
832 return -EINVAL;
833 if (!priv->phy)
834 return -EINVAL;
Matteo Croced95b39c2007-10-14 18:10:13 +0200835
Richard Cochran28b04112010-07-17 08:48:55 +0000836 return phy_mii_ioctl(priv->phy, ifr, cmd);
Matteo Croced95b39c2007-10-14 18:10:13 +0200837}
838
839static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
840{
841 struct cpmac_priv *priv = netdev_priv(dev);
842
843 if (priv->phy)
844 return phy_ethtool_gset(priv->phy, cmd);
845
846 return -EINVAL;
847}
848
849static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
850{
851 struct cpmac_priv *priv = netdev_priv(dev);
852
853 if (!capable(CAP_NET_ADMIN))
854 return -EPERM;
855
856 if (priv->phy)
857 return phy_ethtool_sset(priv->phy, cmd);
858
859 return -EINVAL;
860}
861
Florian Fainelli559764d2010-08-08 10:09:39 +0000862static void cpmac_get_ringparam(struct net_device *dev,
863 struct ethtool_ringparam *ring)
Matteo Croced95b39c2007-10-14 18:10:13 +0200864{
865 struct cpmac_priv *priv = netdev_priv(dev);
866
867 ring->rx_max_pending = 1024;
868 ring->rx_mini_max_pending = 1;
869 ring->rx_jumbo_max_pending = 1;
870 ring->tx_max_pending = 1;
871
872 ring->rx_pending = priv->ring_size;
873 ring->rx_mini_pending = 1;
874 ring->rx_jumbo_pending = 1;
875 ring->tx_pending = 1;
876}
877
Florian Fainelli559764d2010-08-08 10:09:39 +0000878static int cpmac_set_ringparam(struct net_device *dev,
879 struct ethtool_ringparam *ring)
Matteo Croced95b39c2007-10-14 18:10:13 +0200880{
881 struct cpmac_priv *priv = netdev_priv(dev);
882
Matteo Croce6cd043d2007-10-23 19:12:22 +0200883 if (netif_running(dev))
Matteo Croced95b39c2007-10-14 18:10:13 +0200884 return -EBUSY;
885 priv->ring_size = ring->rx_pending;
886 return 0;
887}
888
889static void cpmac_get_drvinfo(struct net_device *dev,
890 struct ethtool_drvinfo *info)
891{
Jiri Pirko7826d432013-01-06 00:44:26 +0000892 strlcpy(info->driver, "cpmac", sizeof(info->driver));
893 strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
894 snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
Matteo Croced95b39c2007-10-14 18:10:13 +0200895 info->regdump_len = 0;
896}
897
898static const struct ethtool_ops cpmac_ethtool_ops = {
899 .get_settings = cpmac_get_settings,
900 .set_settings = cpmac_set_settings,
901 .get_drvinfo = cpmac_get_drvinfo,
902 .get_link = ethtool_op_get_link,
903 .get_ringparam = cpmac_get_ringparam,
904 .set_ringparam = cpmac_set_ringparam,
905};
906
907static void cpmac_adjust_link(struct net_device *dev)
908{
909 struct cpmac_priv *priv = netdev_priv(dev);
910 int new_state = 0;
911
912 spin_lock(&priv->lock);
913 if (priv->phy->link) {
David S. Millerfd2ea0a2008-07-17 01:56:23 -0700914 netif_tx_start_all_queues(dev);
Matteo Croced95b39c2007-10-14 18:10:13 +0200915 if (priv->phy->duplex != priv->oldduplex) {
916 new_state = 1;
917 priv->oldduplex = priv->phy->duplex;
918 }
919
920 if (priv->phy->speed != priv->oldspeed) {
921 new_state = 1;
922 priv->oldspeed = priv->phy->speed;
923 }
924
925 if (!priv->oldlink) {
926 new_state = 1;
927 priv->oldlink = 1;
Matteo Croced95b39c2007-10-14 18:10:13 +0200928 }
929 } else if (priv->oldlink) {
Matteo Croced95b39c2007-10-14 18:10:13 +0200930 new_state = 1;
931 priv->oldlink = 0;
932 priv->oldspeed = 0;
933 priv->oldduplex = -1;
934 }
935
936 if (new_state && netif_msg_link(priv) && net_ratelimit())
937 phy_print_status(priv->phy);
938
939 spin_unlock(&priv->lock);
940}
941
942static int cpmac_open(struct net_device *dev)
943{
944 int i, size, res;
945 struct cpmac_priv *priv = netdev_priv(dev);
946 struct resource *mem;
947 struct cpmac_desc *desc;
948 struct sk_buff *skb;
949
Matteo Croced95b39c2007-10-14 18:10:13 +0200950 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
Dan Carpenter7e307c72010-06-30 13:12:01 -0700951 if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
Matteo Croced95b39c2007-10-14 18:10:13 +0200952 if (netif_msg_drv(priv))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530953 netdev_err(dev, "failed to request registers\n");
954
Matteo Croced95b39c2007-10-14 18:10:13 +0200955 res = -ENXIO;
956 goto fail_reserve;
957 }
958
Dan Carpenter7e307c72010-06-30 13:12:01 -0700959 priv->regs = ioremap(mem->start, resource_size(mem));
Matteo Croced95b39c2007-10-14 18:10:13 +0200960 if (!priv->regs) {
961 if (netif_msg_drv(priv))
Varka Bhadramf160a2d2014-07-10 11:05:41 +0530962 netdev_err(dev, "failed to remap registers\n");
963
Matteo Croced95b39c2007-10-14 18:10:13 +0200964 res = -ENXIO;
965 goto fail_remap;
966 }
967
968 size = priv->ring_size + CPMAC_QUEUES;
969 priv->desc_ring = dma_alloc_coherent(&dev->dev,
970 sizeof(struct cpmac_desc) * size,
971 &priv->dma_ring,
972 GFP_KERNEL);
973 if (!priv->desc_ring) {
974 res = -ENOMEM;
975 goto fail_alloc;
976 }
977
978 for (i = 0; i < size; i++)
979 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
980
981 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
982 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
Eric Dumazet89d71a62009-10-13 05:34:20 +0000983 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
Matteo Croced95b39c2007-10-14 18:10:13 +0200984 if (unlikely(!skb)) {
985 res = -ENOMEM;
986 goto fail_desc;
987 }
Matteo Croced95b39c2007-10-14 18:10:13 +0200988 desc->skb = skb;
989 desc->data_mapping = dma_map_single(&dev->dev, skb->data,
990 CPMAC_SKB_SIZE,
991 DMA_FROM_DEVICE);
992 desc->hw_data = (u32)desc->data_mapping;
993 desc->buflen = CPMAC_SKB_SIZE;
994 desc->dataflags = CPMAC_OWN;
995 desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
Matteo Crocef917d582008-05-14 00:58:32 +0200996 desc->next->prev = desc;
Matteo Croced95b39c2007-10-14 18:10:13 +0200997 desc->hw_next = (u32)desc->next->mapping;
998 }
999
Matteo Crocef917d582008-05-14 00:58:32 +02001000 priv->rx_head->prev->hw_next = (u32)0;
1001
Florian Fainelli559764d2010-08-08 10:09:39 +00001002 res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
1003 if (res) {
Matteo Croced95b39c2007-10-14 18:10:13 +02001004 if (netif_msg_drv(priv))
Varka Bhadramf160a2d2014-07-10 11:05:41 +05301005 netdev_err(dev, "failed to obtain irq\n");
1006
Matteo Croced95b39c2007-10-14 18:10:13 +02001007 goto fail_irq;
1008 }
1009
Matteo Crocef917d582008-05-14 00:58:32 +02001010 atomic_set(&priv->reset_pending, 0);
Matteo Croced95b39c2007-10-14 18:10:13 +02001011 INIT_WORK(&priv->reset_work, cpmac_hw_error);
1012 cpmac_hw_start(dev);
1013
Eugene Konev67d129d2007-10-24 10:42:02 +08001014 napi_enable(&priv->napi);
Matteo Croced95b39c2007-10-14 18:10:13 +02001015 priv->phy->state = PHY_CHANGELINK;
1016 phy_start(priv->phy);
1017
1018 return 0;
1019
1020fail_irq:
1021fail_desc:
1022 for (i = 0; i < priv->ring_size; i++) {
1023 if (priv->rx_head[i].skb) {
1024 dma_unmap_single(&dev->dev,
1025 priv->rx_head[i].data_mapping,
1026 CPMAC_SKB_SIZE,
1027 DMA_FROM_DEVICE);
1028 kfree_skb(priv->rx_head[i].skb);
1029 }
1030 }
1031fail_alloc:
1032 kfree(priv->desc_ring);
1033 iounmap(priv->regs);
1034
1035fail_remap:
Dan Carpenter7e307c72010-06-30 13:12:01 -07001036 release_mem_region(mem->start, resource_size(mem));
Matteo Croced95b39c2007-10-14 18:10:13 +02001037
1038fail_reserve:
Matteo Croced95b39c2007-10-14 18:10:13 +02001039 return res;
1040}
1041
1042static int cpmac_stop(struct net_device *dev)
1043{
1044 int i;
1045 struct cpmac_priv *priv = netdev_priv(dev);
1046 struct resource *mem;
1047
David S. Millerfd2ea0a2008-07-17 01:56:23 -07001048 netif_tx_stop_all_queues(dev);
Matteo Croced95b39c2007-10-14 18:10:13 +02001049
1050 cancel_work_sync(&priv->reset_work);
Eugene Konev67d129d2007-10-24 10:42:02 +08001051 napi_disable(&priv->napi);
Matteo Croced95b39c2007-10-14 18:10:13 +02001052 phy_stop(priv->phy);
Matteo Croced95b39c2007-10-14 18:10:13 +02001053
1054 cpmac_hw_stop(dev);
1055
1056 for (i = 0; i < 8; i++)
1057 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
1058 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
1059 cpmac_write(priv->regs, CPMAC_MBP, 0);
1060
1061 free_irq(dev->irq, dev);
1062 iounmap(priv->regs);
1063 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
Dan Carpenter7e307c72010-06-30 13:12:01 -07001064 release_mem_region(mem->start, resource_size(mem));
Matteo Croced95b39c2007-10-14 18:10:13 +02001065 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
1066 for (i = 0; i < priv->ring_size; i++) {
1067 if (priv->rx_head[i].skb) {
1068 dma_unmap_single(&dev->dev,
1069 priv->rx_head[i].data_mapping,
1070 CPMAC_SKB_SIZE,
1071 DMA_FROM_DEVICE);
1072 kfree_skb(priv->rx_head[i].skb);
1073 }
1074 }
1075
1076 dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
1077 (CPMAC_QUEUES + priv->ring_size),
1078 priv->desc_ring, priv->dma_ring);
1079 return 0;
1080}
1081
Alexander Beregalov63ef7d82009-04-15 12:52:36 +00001082static const struct net_device_ops cpmac_netdev_ops = {
1083 .ndo_open = cpmac_open,
1084 .ndo_stop = cpmac_stop,
1085 .ndo_start_xmit = cpmac_start_xmit,
1086 .ndo_tx_timeout = cpmac_tx_timeout,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001087 .ndo_set_rx_mode = cpmac_set_multicast_list,
Florian Fainelli6a9b6542009-06-24 16:32:33 -07001088 .ndo_do_ioctl = cpmac_ioctl,
Alexander Beregalov63ef7d82009-04-15 12:52:36 +00001089 .ndo_change_mtu = eth_change_mtu,
1090 .ndo_validate_addr = eth_validate_addr,
1091 .ndo_set_mac_address = eth_mac_addr,
1092};
1093
Matteo Croced95b39c2007-10-14 18:10:13 +02001094static int external_switch;
1095
Bill Pembertonf57ae662012-12-03 09:23:43 -05001096static int cpmac_probe(struct platform_device *pdev)
Matteo Croced95b39c2007-10-14 18:10:13 +02001097{
Florian Fainelli69bd4ae2009-05-31 10:57:07 +00001098 int rc, phy_id;
Florian Fainelli762c6aa2009-09-15 21:44:22 +00001099 char mdio_bus_id[MII_BUS_ID_SIZE];
Matteo Croced95b39c2007-10-14 18:10:13 +02001100 struct resource *mem;
1101 struct cpmac_priv *priv;
1102 struct net_device *dev;
1103 struct plat_cpmac_data *pdata;
1104
Jingoo Hana0ea2ac2013-08-30 14:05:02 +09001105 pdata = dev_get_platdata(&pdev->dev);
Matteo Croced95b39c2007-10-14 18:10:13 +02001106
Florian Fainelli76e61ea2009-08-04 10:52:52 +00001107 if (external_switch || dumb_switch) {
Florian Fainellia19c5d62012-02-13 01:23:20 +00001108 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
Florian Fainelli76e61ea2009-08-04 10:52:52 +00001109 phy_id = pdev->id;
1110 } else {
1111 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
1112 if (!(pdata->phy_mask & (1 << phy_id)))
1113 continue;
1114 if (!cpmac_mii->phy_map[phy_id])
1115 continue;
Florian Fainelli762c6aa2009-09-15 21:44:22 +00001116 strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
Florian Fainelli76e61ea2009-08-04 10:52:52 +00001117 break;
1118 }
Matteo Croced95b39c2007-10-14 18:10:13 +02001119 }
1120
1121 if (phy_id == PHY_MAX_ADDR) {
Florian Fainelli559764d2010-08-08 10:09:39 +00001122 dev_err(&pdev->dev, "no PHY present, falling back "
Varka Bhadramf160a2d2014-07-10 11:05:41 +05301123 "to switch on MDIO bus 0\n");
Florian Fainellia19c5d62012-02-13 01:23:20 +00001124 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
Florian Fainelli9fba1c32010-03-07 00:55:47 +00001125 phy_id = pdev->id;
Matteo Croced95b39c2007-10-14 18:10:13 +02001126 }
1127
1128 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
Joe Perches41de8d42012-01-29 13:47:52 +00001129 if (!dev)
Matteo Croced95b39c2007-10-14 18:10:13 +02001130 return -ENOMEM;
Matteo Croced95b39c2007-10-14 18:10:13 +02001131
1132 platform_set_drvdata(pdev, dev);
1133 priv = netdev_priv(dev);
1134
1135 priv->pdev = pdev;
1136 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
1137 if (!mem) {
1138 rc = -ENODEV;
1139 goto fail;
1140 }
1141
1142 dev->irq = platform_get_irq_byname(pdev, "irq");
1143
Alexander Beregalov63ef7d82009-04-15 12:52:36 +00001144 dev->netdev_ops = &cpmac_netdev_ops;
1145 dev->ethtool_ops = &cpmac_ethtool_ops;
Matteo Croced95b39c2007-10-14 18:10:13 +02001146
Eugene Konev67d129d2007-10-24 10:42:02 +08001147 netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
1148
Matteo Croced95b39c2007-10-14 18:10:13 +02001149 spin_lock_init(&priv->lock);
1150 spin_lock_init(&priv->rx_lock);
1151 priv->dev = dev;
1152 priv->ring_size = 64;
1153 priv->msg_enable = netif_msg_init(debug_level, 0xff);
Julia Lawall2447f2f2009-12-13 05:35:45 +00001154 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
Eugene Konevb88219f2007-10-24 10:42:03 +08001155
Florian Fainelli559764d2010-08-08 10:09:39 +00001156 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
1157 mdio_bus_id, phy_id);
Florian Fainelli76e61ea2009-08-04 10:52:52 +00001158
Florian Fainellif9a8f832013-01-14 00:52:52 +00001159 priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
1160 PHY_INTERFACE_MODE_MII);
Florian Fainelli76e61ea2009-08-04 10:52:52 +00001161
Eugene Konevb88219f2007-10-24 10:42:03 +08001162 if (IS_ERR(priv->phy)) {
1163 if (netif_msg_drv(priv))
Varka Bhadramf160a2d2014-07-10 11:05:41 +05301164 dev_err(&pdev->dev, "Could not attach to PHY\n");
1165
Florian Fainellied770f02010-06-20 22:07:48 +00001166 rc = PTR_ERR(priv->phy);
1167 goto fail;
Eugene Konevb88219f2007-10-24 10:42:03 +08001168 }
Matteo Croced95b39c2007-10-14 18:10:13 +02001169
Florian Fainelli559764d2010-08-08 10:09:39 +00001170 rc = register_netdev(dev);
1171 if (rc) {
Varka Bhadramf160a2d2014-07-10 11:05:41 +05301172 dev_err(&pdev->dev, "Could not register net device\n");
Matteo Croced95b39c2007-10-14 18:10:13 +02001173 goto fail;
1174 }
1175
1176 if (netif_msg_probe(priv)) {
Varka Bhadramf160a2d2014-07-10 11:05:41 +05301177 dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
1178 "mac: %pM\n", (void *)mem->start, dev->irq,
1179 priv->phy_name, dev->dev_addr);
Matteo Croced95b39c2007-10-14 18:10:13 +02001180 }
1181 return 0;
1182
1183fail:
1184 free_netdev(dev);
1185 return rc;
1186}
1187
Bill Pembertonf57ae662012-12-03 09:23:43 -05001188static int cpmac_remove(struct platform_device *pdev)
Matteo Croced95b39c2007-10-14 18:10:13 +02001189{
1190 struct net_device *dev = platform_get_drvdata(pdev);
Varka Bhadram59329d82014-07-10 11:05:43 +05301191
Matteo Croced95b39c2007-10-14 18:10:13 +02001192 unregister_netdev(dev);
1193 free_netdev(dev);
1194 return 0;
1195}
1196
1197static struct platform_driver cpmac_driver = {
Varka Bhadram96a8d3c2014-07-10 11:05:42 +05301198 .driver = {
1199 .name = "cpmac",
1200 .owner = THIS_MODULE,
1201 },
1202 .probe = cpmac_probe,
Bill Pembertonf57ae662012-12-03 09:23:43 -05001203 .remove = cpmac_remove,
Matteo Croced95b39c2007-10-14 18:10:13 +02001204};
1205
Bill Pembertonf57ae662012-12-03 09:23:43 -05001206int cpmac_init(void)
Matteo Croced95b39c2007-10-14 18:10:13 +02001207{
1208 u32 mask;
1209 int i, res;
1210
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001211 cpmac_mii = mdiobus_alloc();
1212 if (cpmac_mii == NULL)
1213 return -ENOMEM;
Matteo Croced95b39c2007-10-14 18:10:13 +02001214
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001215 cpmac_mii->name = "cpmac-mii";
1216 cpmac_mii->read = cpmac_mdio_read;
1217 cpmac_mii->write = cpmac_mdio_write;
1218 cpmac_mii->reset = cpmac_mdio_reset;
1219 cpmac_mii->irq = mii_irqs;
1220
1221 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
1222
1223 if (!cpmac_mii->priv) {
Varka Bhadramf160a2d2014-07-10 11:05:41 +05301224 pr_err("Can't ioremap mdio registers\n");
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001225 res = -ENXIO;
1226 goto fail_alloc;
Matteo Croced95b39c2007-10-14 18:10:13 +02001227 }
1228
1229#warning FIXME: unhardcode gpio&reset bits
1230 ar7_gpio_disable(26);
1231 ar7_gpio_disable(27);
1232 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
1233 ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
1234 ar7_device_reset(AR7_RESET_BIT_EPHY);
1235
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001236 cpmac_mii->reset(cpmac_mii);
Matteo Croced95b39c2007-10-14 18:10:13 +02001237
Florian Fainelli559764d2010-08-08 10:09:39 +00001238 for (i = 0; i < 300; i++) {
1239 mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
1240 if (mask)
Matteo Croced95b39c2007-10-14 18:10:13 +02001241 break;
1242 else
Florian Fainellie4540aa2009-08-04 10:52:57 +00001243 msleep(10);
Florian Fainelli559764d2010-08-08 10:09:39 +00001244 }
Matteo Croced95b39c2007-10-14 18:10:13 +02001245
1246 mask &= 0x7fffffff;
1247 if (mask & (mask - 1)) {
1248 external_switch = 1;
1249 mask = 0;
1250 }
1251
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001252 cpmac_mii->phy_mask = ~(mask | 0x80000000);
Florian Fainellid1733f02012-01-09 23:59:21 +00001253 snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
Matteo Croced95b39c2007-10-14 18:10:13 +02001254
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001255 res = mdiobus_register(cpmac_mii);
Matteo Croced95b39c2007-10-14 18:10:13 +02001256 if (res)
1257 goto fail_mii;
1258
1259 res = platform_driver_register(&cpmac_driver);
1260 if (res)
1261 goto fail_cpmac;
1262
1263 return 0;
1264
1265fail_cpmac:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001266 mdiobus_unregister(cpmac_mii);
Matteo Croced95b39c2007-10-14 18:10:13 +02001267
1268fail_mii:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001269 iounmap(cpmac_mii->priv);
1270
1271fail_alloc:
1272 mdiobus_free(cpmac_mii);
Matteo Croced95b39c2007-10-14 18:10:13 +02001273
1274 return res;
1275}
1276
Bill Pembertonf57ae662012-12-03 09:23:43 -05001277void cpmac_exit(void)
Matteo Croced95b39c2007-10-14 18:10:13 +02001278{
1279 platform_driver_unregister(&cpmac_driver);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001280 mdiobus_unregister(cpmac_mii);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07001281 iounmap(cpmac_mii->priv);
Dan Carpenter48a29512010-03-02 22:46:10 +00001282 mdiobus_free(cpmac_mii);
Matteo Croced95b39c2007-10-14 18:10:13 +02001283}
1284
1285module_init(cpmac_init);
1286module_exit(cpmac_exit);