blob: 9b825780b3be02829b54799b0e287ed2c8976a09 [file] [log] [blame]
David S. Millerecba38a2006-06-23 21:23:01 -07001/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Once again I am out to prove that every ethernet
3 * controller out there can be most efficiently programmed
4 * if you make it look like a LANCE.
5 *
David S. Miller8e912b32008-08-27 00:12:27 -07006 * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/types.h>
12#include <linux/errno.h>
13#include <linux/fcntl.h>
14#include <linux/interrupt.h>
15#include <linux/ioport.h>
16#include <linux/in.h>
17#include <linux/slab.h>
18#include <linux/string.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/crc32.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/ethtool.h>
26#include <linux/bitops.h>
David S. Miller738f2b72008-08-27 18:09:11 -070027#include <linux/dma-mapping.h>
David S. Miller8e912b32008-08-27 00:12:27 -070028#include <linux/of.h>
29#include <linux/of_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/io.h>
32#include <asm/dma.h>
33#include <asm/byteorder.h>
34#include <asm/idprom.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/openprom.h>
36#include <asm/oplib.h>
37#include <asm/auxio.h>
38#include <asm/pgtable.h>
39#include <asm/irq.h>
40
41#include "sunqe.h"
42
Tom 'spot' Callaway10158282005-04-24 20:35:20 -070043#define DRV_NAME "sunqe"
David S. Miller8e912b32008-08-27 00:12:27 -070044#define DRV_VERSION "4.1"
45#define DRV_RELDATE "August 27, 2008"
David S. Millerecba38a2006-06-23 21:23:01 -070046#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
Tom 'spot' Callaway10158282005-04-24 20:35:20 -070047
48static char version[] =
49 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
50
51MODULE_VERSION(DRV_VERSION);
52MODULE_AUTHOR(DRV_AUTHOR);
53MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
54MODULE_LICENSE("GPL");
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static struct sunqec *root_qec_dev;
57
58static void qe_set_multicast(struct net_device *dev);
59
60#define QEC_RESET_TRIES 200
61
62static inline int qec_global_reset(void __iomem *gregs)
63{
64 int tries = QEC_RESET_TRIES;
65
66 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
67 while (--tries) {
68 u32 tmp = sbus_readl(gregs + GLOB_CTRL);
69 if (tmp & GLOB_CTRL_RESET) {
70 udelay(20);
71 continue;
72 }
73 break;
74 }
75 if (tries)
76 return 0;
77 printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
78 return -1;
79}
80
81#define MACE_RESET_RETRIES 200
82#define QE_RESET_RETRIES 200
83
84static inline int qe_stop(struct sunqe *qep)
85{
86 void __iomem *cregs = qep->qcregs;
87 void __iomem *mregs = qep->mregs;
88 int tries;
89
90 /* Reset the MACE, then the QEC channel. */
91 sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
92 tries = MACE_RESET_RETRIES;
93 while (--tries) {
94 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
95 if (tmp & MREGS_BCONFIG_RESET) {
96 udelay(20);
97 continue;
98 }
99 break;
100 }
101 if (!tries) {
102 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
103 return -1;
104 }
105
106 sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
107 tries = QE_RESET_RETRIES;
108 while (--tries) {
109 u32 tmp = sbus_readl(cregs + CREG_CTRL);
110 if (tmp & CREG_CTRL_RESET) {
111 udelay(20);
112 continue;
113 }
114 break;
115 }
116 if (!tries) {
117 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
118 return -1;
119 }
120 return 0;
121}
122
123static void qe_init_rings(struct sunqe *qep)
124{
125 struct qe_init_block *qb = qep->qe_block;
126 struct sunqe_buffers *qbufs = qep->buffers;
127 __u32 qbufs_dvma = qep->buffers_dvma;
128 int i;
129
130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
131 memset(qb, 0, sizeof(struct qe_init_block));
132 memset(qbufs, 0, sizeof(struct sunqe_buffers));
133 for (i = 0; i < RX_RING_SIZE; i++) {
134 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
135 qb->qe_rxd[i].rx_flags =
136 (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
137 }
138}
139
140static int qe_init(struct sunqe *qep, int from_irq)
141{
142 struct sunqec *qecp = qep->parent;
143 void __iomem *cregs = qep->qcregs;
144 void __iomem *mregs = qep->mregs;
145 void __iomem *gregs = qecp->gregs;
146 unsigned char *e = &qep->dev->dev_addr[0];
147 u32 tmp;
148 int i;
149
150 /* Shut it up. */
151 if (qe_stop(qep))
152 return -EAGAIN;
153
154 /* Setup initial rx/tx init block pointers. */
155 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
156 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
157
158 /* Enable/mask the various irq's. */
159 sbus_writel(0, cregs + CREG_RIMASK);
160 sbus_writel(1, cregs + CREG_TIMASK);
161
162 sbus_writel(0, cregs + CREG_QMASK);
163 sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
164
165 /* Setup the FIFO pointers into QEC local memory. */
166 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
167 sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
168 sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
169
170 tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
171 sbus_readl(gregs + GLOB_RSIZE);
172 sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
173 sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
174
175 /* Clear the channel collision counter. */
176 sbus_writel(0, cregs + CREG_CCNT);
177
178 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
179 sbus_writel(0, cregs + CREG_PIPG);
180
181 /* Now dork with the AMD MACE. */
182 sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
183 sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
184 sbus_writeb(0, mregs + MREGS_RXFCNTL);
185
186 /* The QEC dma's the rx'd packets from local memory out to main memory,
187 * and therefore it interrupts when the packet reception is "complete".
188 * So don't listen for the MACE talking about it.
189 */
190 sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
191 sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
192 sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
193 MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
194 mregs + MREGS_FCONFIG);
195
196 /* Only usable interface on QuadEther is twisted pair. */
197 sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
198
199 /* Tell MACE we are changing the ether address. */
200 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
201 mregs + MREGS_IACONFIG);
202 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
203 barrier();
204 sbus_writeb(e[0], mregs + MREGS_ETHADDR);
205 sbus_writeb(e[1], mregs + MREGS_ETHADDR);
206 sbus_writeb(e[2], mregs + MREGS_ETHADDR);
207 sbus_writeb(e[3], mregs + MREGS_ETHADDR);
208 sbus_writeb(e[4], mregs + MREGS_ETHADDR);
209 sbus_writeb(e[5], mregs + MREGS_ETHADDR);
210
211 /* Clear out the address filter. */
212 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
213 mregs + MREGS_IACONFIG);
214 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
215 barrier();
216 for (i = 0; i < 8; i++)
217 sbus_writeb(0, mregs + MREGS_FILTER);
218
219 /* Address changes are now complete. */
220 sbus_writeb(0, mregs + MREGS_IACONFIG);
221
222 qe_init_rings(qep);
223
224 /* Wait a little bit for the link to come up... */
225 mdelay(5);
226 if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
227 int tries = 50;
228
Roel Kluin46578a692009-02-02 21:39:02 -0800229 while (--tries) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 u8 tmp;
231
232 mdelay(5);
233 barrier();
234 tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
235 if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
236 break;
237 }
238 if (tries == 0)
239 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
240 }
241
242 /* Missed packet counter is cleared on a read. */
243 sbus_readb(mregs + MREGS_MPCNT);
244
245 /* Reload multicast information, this will enable the receiver
246 * and transmitter.
247 */
248 qe_set_multicast(qep->dev);
249
250 /* QEC should now start to show interrupts. */
251 return 0;
252}
253
254/* Grrr, certain error conditions completely lock up the AMD MACE,
255 * so when we get these we _must_ reset the chip.
256 */
257static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
258{
259 struct net_device *dev = qep->dev;
260 int mace_hwbug_workaround = 0;
261
262 if (qe_status & CREG_STAT_EDEFER) {
263 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700264 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 }
266
267 if (qe_status & CREG_STAT_CLOSS) {
268 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700269 dev->stats.tx_errors++;
270 dev->stats.tx_carrier_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 }
272
273 if (qe_status & CREG_STAT_ERETRIES) {
274 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700275 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 mace_hwbug_workaround = 1;
277 }
278
279 if (qe_status & CREG_STAT_LCOLL) {
280 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700281 dev->stats.tx_errors++;
282 dev->stats.collisions++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 mace_hwbug_workaround = 1;
284 }
285
286 if (qe_status & CREG_STAT_FUFLOW) {
287 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700288 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 mace_hwbug_workaround = 1;
290 }
291
292 if (qe_status & CREG_STAT_JERROR) {
293 printk(KERN_ERR "%s: Jabber error.\n", dev->name);
294 }
295
296 if (qe_status & CREG_STAT_BERROR) {
297 printk(KERN_ERR "%s: Babble error.\n", dev->name);
298 }
299
300 if (qe_status & CREG_STAT_CCOFLOW) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700301 dev->stats.tx_errors += 256;
302 dev->stats.collisions += 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 }
304
305 if (qe_status & CREG_STAT_TXDERROR) {
306 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700307 dev->stats.tx_errors++;
308 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 mace_hwbug_workaround = 1;
310 }
311
312 if (qe_status & CREG_STAT_TXLERR) {
313 printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700314 dev->stats.tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 mace_hwbug_workaround = 1;
316 }
317
318 if (qe_status & CREG_STAT_TXPERR) {
319 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700320 dev->stats.tx_errors++;
321 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 mace_hwbug_workaround = 1;
323 }
324
325 if (qe_status & CREG_STAT_TXSERR) {
326 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700327 dev->stats.tx_errors++;
328 dev->stats.tx_aborted_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 mace_hwbug_workaround = 1;
330 }
331
332 if (qe_status & CREG_STAT_RCCOFLOW) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700333 dev->stats.rx_errors += 256;
334 dev->stats.collisions += 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 }
336
337 if (qe_status & CREG_STAT_RUOFLOW) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700338 dev->stats.rx_errors += 256;
339 dev->stats.rx_over_errors += 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 }
341
342 if (qe_status & CREG_STAT_MCOFLOW) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700343 dev->stats.rx_errors += 256;
344 dev->stats.rx_missed_errors += 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 }
346
347 if (qe_status & CREG_STAT_RXFOFLOW) {
348 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700349 dev->stats.rx_errors++;
350 dev->stats.rx_over_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 }
352
353 if (qe_status & CREG_STAT_RLCOLL) {
354 printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700355 dev->stats.rx_errors++;
356 dev->stats.collisions++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358
359 if (qe_status & CREG_STAT_FCOFLOW) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700360 dev->stats.rx_errors += 256;
361 dev->stats.rx_frame_errors += 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 }
363
364 if (qe_status & CREG_STAT_CECOFLOW) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700365 dev->stats.rx_errors += 256;
366 dev->stats.rx_crc_errors += 256;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368
369 if (qe_status & CREG_STAT_RXDROP) {
370 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700371 dev->stats.rx_errors++;
372 dev->stats.rx_dropped++;
373 dev->stats.rx_missed_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
376 if (qe_status & CREG_STAT_RXSMALL) {
377 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700378 dev->stats.rx_errors++;
379 dev->stats.rx_length_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 }
381
382 if (qe_status & CREG_STAT_RXLERR) {
383 printk(KERN_ERR "%s: Receive late error.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700384 dev->stats.rx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 mace_hwbug_workaround = 1;
386 }
387
388 if (qe_status & CREG_STAT_RXPERR) {
389 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700390 dev->stats.rx_errors++;
391 dev->stats.rx_missed_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 mace_hwbug_workaround = 1;
393 }
394
395 if (qe_status & CREG_STAT_RXSERR) {
396 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700397 dev->stats.rx_errors++;
398 dev->stats.rx_missed_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 mace_hwbug_workaround = 1;
400 }
401
402 if (mace_hwbug_workaround)
403 qe_init(qep, 1);
404 return mace_hwbug_workaround;
405}
406
407/* Per-QE receive interrupt service routine. Just like on the happy meal
408 * we receive directly into skb's with a small packet copy water mark.
409 */
410static void qe_rx(struct sunqe *qep)
411{
412 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700413 struct net_device *dev = qep->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 struct qe_rxd *this;
415 struct sunqe_buffers *qbufs = qep->buffers;
416 __u32 qbufs_dvma = qep->buffers_dvma;
Joe Perches720a43e2013-03-08 15:03:25 +0000417 int elem = qep->rx_new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 u32 flags;
419
420 this = &rxbase[elem];
421 while (!((flags = this->rx_flags) & RXD_OWN)) {
422 struct sk_buff *skb;
423 unsigned char *this_qbuf =
424 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
425 __u32 this_qbuf_dvma = qbufs_dvma +
426 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
427 struct qe_rxd *end_rxd =
428 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
429 int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
430
431 /* Check for errors. */
432 if (len < ETH_ZLEN) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700433 dev->stats.rx_errors++;
434 dev->stats.rx_length_errors++;
435 dev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 } else {
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +0000437 skb = netdev_alloc_skb(dev, len + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (skb == NULL) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700439 dev->stats.rx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 skb_reserve(skb, 2);
442 skb_put(skb, len);
Joe Perches64699332012-06-04 12:44:16 +0000443 skb_copy_to_linear_data(skb, this_qbuf,
David S. Miller8c7b7fa2007-07-10 22:08:12 -0700444 len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 skb->protocol = eth_type_trans(skb, qep->dev);
446 netif_rx(skb);
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700447 dev->stats.rx_packets++;
448 dev->stats.rx_bytes += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 }
450 }
451 end_rxd->rx_addr = this_qbuf_dvma;
452 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 elem = NEXT_RX(elem);
455 this = &rxbase[elem];
456 }
457 qep->rx_new = elem;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458}
459
460static void qe_tx_reclaim(struct sunqe *qep);
461
462/* Interrupts for all QE's get filtered out via the QEC master controller,
463 * so we just run through each qe and check to see who is signaling
464 * and thus needs to be serviced.
465 */
David Howells7d12e782006-10-05 14:55:46 +0100466static irqreturn_t qec_interrupt(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
Jeff Garzikc31f28e2006-10-06 14:56:04 -0400468 struct sunqec *qecp = dev_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 u32 qec_status;
470 int channel = 0;
471
472 /* Latch the status now. */
473 qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
474 while (channel < 4) {
475 if (qec_status & 0xf) {
476 struct sunqe *qep = qecp->qes[channel];
477 u32 qe_status;
478
479 qe_status = sbus_readl(qep->qcregs + CREG_STAT);
480 if (qe_status & CREG_STAT_ERRORS) {
481 if (qe_is_bolixed(qep, qe_status))
482 goto next;
483 }
484 if (qe_status & CREG_STAT_RXIRQ)
485 qe_rx(qep);
486 if (netif_queue_stopped(qep->dev) &&
487 (qe_status & CREG_STAT_TXIRQ)) {
488 spin_lock(&qep->lock);
489 qe_tx_reclaim(qep);
490 if (TX_BUFFS_AVAIL(qep) > 0) {
491 /* Wake net queue and return to
492 * lazy tx reclaim.
493 */
494 netif_wake_queue(qep->dev);
495 sbus_writel(1, qep->qcregs + CREG_TIMASK);
496 }
497 spin_unlock(&qep->lock);
498 }
499 next:
500 ;
501 }
502 qec_status >>= 4;
503 channel++;
504 }
505
506 return IRQ_HANDLED;
507}
508
509static int qe_open(struct net_device *dev)
510{
Wang Chen8f15ea42008-11-12 23:38:36 -0800511 struct sunqe *qep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
513 qep->mconfig = (MREGS_MCONFIG_TXENAB |
514 MREGS_MCONFIG_RXENAB |
515 MREGS_MCONFIG_MBAENAB);
516 return qe_init(qep, 0);
517}
518
519static int qe_close(struct net_device *dev)
520{
Wang Chen8f15ea42008-11-12 23:38:36 -0800521 struct sunqe *qep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
523 qe_stop(qep);
524 return 0;
525}
526
527/* Reclaim TX'd frames from the ring. This must always run under
528 * the IRQ protected qep->lock.
529 */
530static void qe_tx_reclaim(struct sunqe *qep)
531{
532 struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
533 int elem = qep->tx_old;
534
535 while (elem != qep->tx_new) {
536 u32 flags = txbase[elem].tx_flags;
537
538 if (flags & TXD_OWN)
539 break;
540 elem = NEXT_TX(elem);
541 }
542 qep->tx_old = elem;
543}
544
545static void qe_tx_timeout(struct net_device *dev)
546{
Wang Chen8f15ea42008-11-12 23:38:36 -0800547 struct sunqe *qep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 int tx_full;
549
550 spin_lock_irq(&qep->lock);
551
552 /* Try to reclaim, if that frees up some tx
553 * entries, we're fine.
554 */
555 qe_tx_reclaim(qep);
556 tx_full = TX_BUFFS_AVAIL(qep) <= 0;
557
558 spin_unlock_irq(&qep->lock);
559
560 if (! tx_full)
561 goto out;
562
563 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
564 qe_init(qep, 1);
565
566out:
567 netif_wake_queue(dev);
568}
569
570/* Get a packet queued to go onto the wire. */
571static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
572{
Wang Chen8f15ea42008-11-12 23:38:36 -0800573 struct sunqe *qep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 struct sunqe_buffers *qbufs = qep->buffers;
575 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
576 unsigned char *txbuf;
577 int len, entry;
578
579 spin_lock_irq(&qep->lock);
580
581 qe_tx_reclaim(qep);
582
583 len = skb->len;
584 entry = qep->tx_new;
585
586 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
587 txbuf_dvma = qbufs_dvma +
588 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
589
590 /* Avoid a race... */
591 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
592
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -0300593 skb_copy_from_linear_data(skb, txbuf, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
595 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
596 qep->qe_block->qe_txd[entry].tx_flags =
597 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
598 qep->tx_new = NEXT_TX(entry);
599
600 /* Get it going. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
602
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700603 dev->stats.tx_packets++;
604 dev->stats.tx_bytes += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606 if (TX_BUFFS_AVAIL(qep) <= 0) {
607 /* Halt the net queue and enable tx interrupts.
608 * When the tx queue empties the tx irq handler
609 * will wake up the queue and return us back to
610 * the lazy tx reclaim scheme.
611 */
612 netif_stop_queue(dev);
613 sbus_writel(0, qep->qcregs + CREG_TIMASK);
614 }
615 spin_unlock_irq(&qep->lock);
616
617 dev_kfree_skb(skb);
618
Patrick McHardy6ed10652009-06-23 06:03:08 +0000619 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622static void qe_set_multicast(struct net_device *dev)
623{
Wang Chen8f15ea42008-11-12 23:38:36 -0800624 struct sunqe *qep = netdev_priv(dev);
Jiri Pirko22bedad32010-04-01 21:22:57 +0000625 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 u8 new_mconfig = qep->mconfig;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 int i;
628 u32 crc;
629
630 /* Lock out others. */
631 netif_stop_queue(dev);
632
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000633 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
635 qep->mregs + MREGS_IACONFIG);
636 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
637 barrier();
638 for (i = 0; i < 8; i++)
639 sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
640 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
641 } else if (dev->flags & IFF_PROMISC) {
642 new_mconfig |= MREGS_MCONFIG_PROMISC;
643 } else {
644 u16 hash_table[4];
645 u8 *hbytes = (unsigned char *) &hash_table[0];
646
Jiri Pirko55085902010-02-18 00:42:54 +0000647 memset(hash_table, 0, sizeof(hash_table));
Jiri Pirko22bedad32010-04-01 21:22:57 +0000648 netdev_for_each_mc_addr(ha, dev) {
Tobias Klauser498d8e22011-07-07 22:06:26 +0000649 crc = ether_crc_le(6, ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 crc >>= 26;
651 hash_table[crc >> 4] |= 1 << (crc & 0xf);
652 }
653 /* Program the qe with the new filter value. */
654 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
655 qep->mregs + MREGS_IACONFIG);
656 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
657 barrier();
658 for (i = 0; i < 8; i++) {
659 u8 tmp = *hbytes++;
660 sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
661 }
662 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
663 }
664
665 /* Any change of the logical address filter, the physical address,
666 * or enabling/disabling promiscuous mode causes the MACE to disable
667 * the receiver. So we must re-enable them here or else the MACE
668 * refuses to listen to anything on the network. Sheesh, took
669 * me a day or two to find this bug.
670 */
671 qep->mconfig = new_mconfig;
672 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
673
674 /* Let us get going again. */
675 netif_wake_queue(dev);
676}
677
678/* Ethtool support... */
679static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
680{
David S. Miller8e912b32008-08-27 00:12:27 -0700681 const struct linux_prom_registers *regs;
Wang Chen8f15ea42008-11-12 23:38:36 -0800682 struct sunqe *qep = netdev_priv(dev);
Grant Likely2dc11582010-08-06 09:25:50 -0600683 struct platform_device *op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
Jiri Pirko7826d432013-01-06 00:44:26 +0000685 strlcpy(info->driver, "sunqe", sizeof(info->driver));
686 strlcpy(info->version, "3.0", sizeof(info->version));
David S. Miller8e912b32008-08-27 00:12:27 -0700687
688 op = qep->op;
Grant Likely61c7a082010-04-13 16:12:29 -0700689 regs = of_get_property(op->dev.of_node, "reg", NULL);
David S. Miller8e912b32008-08-27 00:12:27 -0700690 if (regs)
Jiri Pirko7826d432013-01-06 00:44:26 +0000691 snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
692 regs->which_io);
David S. Miller8e912b32008-08-27 00:12:27 -0700693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694}
695
696static u32 qe_get_link(struct net_device *dev)
697{
Wang Chen8f15ea42008-11-12 23:38:36 -0800698 struct sunqe *qep = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 void __iomem *mregs = qep->mregs;
700 u8 phyconfig;
701
702 spin_lock_irq(&qep->lock);
703 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
704 spin_unlock_irq(&qep->lock);
705
Eric Dumazet807540b2010-09-23 05:40:09 +0000706 return phyconfig & MREGS_PHYCONFIG_LSTAT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707}
708
Jeff Garzik7282d492006-09-13 14:30:00 -0400709static const struct ethtool_ops qe_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 .get_drvinfo = qe_get_drvinfo,
711 .get_link = qe_get_link,
712};
713
714/* This is only called once at boot time for each card probed. */
Grant Likely2dc11582010-08-06 09:25:50 -0600715static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716{
717 u8 bsizes = qecp->qec_bursts;
718
David S. Miller63237ee2008-08-26 23:33:42 -0700719 if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
721 } else if (bsizes & DMA_BURST32) {
722 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
723 } else {
724 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
725 }
726
727 /* Packetsize only used in 100baseT BigMAC configurations,
728 * set it to zero just to be on the safe side.
729 */
730 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
731
732 /* Set the local memsize register, divided up to one piece per QE channel. */
David S. Miller8e912b32008-08-27 00:12:27 -0700733 sbus_writel((resource_size(&op->resource[1]) >> 2),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 qecp->gregs + GLOB_MSIZE);
735
736 /* Divide up the local QEC memory amongst the 4 QE receiver and
737 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
738 */
David S. Miller8e912b32008-08-27 00:12:27 -0700739 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 qecp->gregs + GLOB_TSIZE);
David S. Miller8e912b32008-08-27 00:12:27 -0700741 sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 qecp->gregs + GLOB_RSIZE);
743}
744
Bill Pembertonf73d12b2012-12-03 09:24:02 -0500745static u8 qec_get_burst(struct device_node *dp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 u8 bsizes, bsizes_more;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
David S. Millerecba38a2006-06-23 21:23:01 -0700749 /* Find and set the burst sizes for the QEC, since it
750 * does the actual dma for all 4 channels.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 */
David S. Millerecba38a2006-06-23 21:23:01 -0700752 bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 bsizes &= 0xff;
David S. Millerecba38a2006-06-23 21:23:01 -0700754 bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 if (bsizes_more != 0xff)
757 bsizes &= bsizes_more;
758 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
David S. Millerecba38a2006-06-23 21:23:01 -0700759 (bsizes & DMA_BURST32)==0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 bsizes = (DMA_BURST32 - 1);
761
David S. Millerecba38a2006-06-23 21:23:01 -0700762 return bsizes;
763}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Bill Pembertonf73d12b2012-12-03 09:24:02 -0500765static struct sunqec *get_qec(struct platform_device *child)
David S. Millerecba38a2006-06-23 21:23:01 -0700766{
Grant Likely2dc11582010-08-06 09:25:50 -0600767 struct platform_device *op = to_platform_device(child->dev.parent);
David S. Millerecba38a2006-06-23 21:23:01 -0700768 struct sunqec *qecp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Jingoo Han8513fbd2013-05-23 00:52:31 +0000770 qecp = platform_get_drvdata(op);
David S. Millerecba38a2006-06-23 21:23:01 -0700771 if (!qecp) {
772 qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
773 if (qecp) {
774 u32 ctrl;
775
David S. Miller8e912b32008-08-27 00:12:27 -0700776 qecp->op = op;
777 qecp->gregs = of_ioremap(&op->resource[0], 0,
778 GLOB_REG_SIZE,
779 "QEC Global Registers");
David S. Millerecba38a2006-06-23 21:23:01 -0700780 if (!qecp->gregs)
781 goto fail;
782
783 /* Make sure the QEC is in MACE mode. */
784 ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
785 ctrl &= 0xf0000000;
786 if (ctrl != GLOB_CTRL_MMODE) {
787 printk(KERN_ERR "qec: Not in MACE mode!\n");
788 goto fail;
789 }
790
791 if (qec_global_reset(qecp->gregs))
792 goto fail;
793
Grant Likely61c7a082010-04-13 16:12:29 -0700794 qecp->qec_bursts = qec_get_burst(op->dev.of_node);
David S. Millerecba38a2006-06-23 21:23:01 -0700795
David S. Miller8e912b32008-08-27 00:12:27 -0700796 qec_init_once(qecp, op);
David S. Millerecba38a2006-06-23 21:23:01 -0700797
Grant Likely1636f8a2010-06-18 11:09:58 -0600798 if (request_irq(op->archdata.irqs[0], qec_interrupt,
Thomas Gleixner1fb9df52006-07-01 19:29:39 -0700799 IRQF_SHARED, "qec", (void *) qecp)) {
David S. Millerecba38a2006-06-23 21:23:01 -0700800 printk(KERN_ERR "qec: Can't register irq.\n");
801 goto fail;
802 }
803
Jingoo Han8513fbd2013-05-23 00:52:31 +0000804 platform_set_drvdata(op, qecp);
David S. Miller8e912b32008-08-27 00:12:27 -0700805
David S. Millerecba38a2006-06-23 21:23:01 -0700806 qecp->next_module = root_qec_dev;
807 root_qec_dev = qecp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
David S. Millerecba38a2006-06-23 21:23:01 -0700809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
David S. Millerecba38a2006-06-23 21:23:01 -0700811 return qecp;
812
813fail:
814 if (qecp->gregs)
David S. Miller8e912b32008-08-27 00:12:27 -0700815 of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
David S. Millerecba38a2006-06-23 21:23:01 -0700816 kfree(qecp);
817 return NULL;
818}
819
David S. Millerecd41372009-03-23 13:33:37 -0700820static const struct net_device_ops qec_ops = {
821 .ndo_open = qe_open,
822 .ndo_stop = qe_close,
823 .ndo_start_xmit = qe_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000824 .ndo_set_rx_mode = qe_set_multicast,
David S. Millerecd41372009-03-23 13:33:37 -0700825 .ndo_tx_timeout = qe_tx_timeout,
David S. Millerdac46962009-03-23 14:29:24 -0700826 .ndo_change_mtu = eth_change_mtu,
827 .ndo_set_mac_address = eth_mac_addr,
828 .ndo_validate_addr = eth_validate_addr,
David S. Millerecd41372009-03-23 13:33:37 -0700829};
830
Bill Pembertonf73d12b2012-12-03 09:24:02 -0500831static int qec_ether_init(struct platform_device *op)
David S. Millerecba38a2006-06-23 21:23:01 -0700832{
833 static unsigned version_printed;
834 struct net_device *dev;
David S. Millerecba38a2006-06-23 21:23:01 -0700835 struct sunqec *qecp;
David S. Miller8e912b32008-08-27 00:12:27 -0700836 struct sunqe *qe;
David S. Millerecba38a2006-06-23 21:23:01 -0700837 int i, res;
838
839 if (version_printed++ == 0)
840 printk(KERN_INFO "%s", version);
841
842 dev = alloc_etherdev(sizeof(struct sunqe));
843 if (!dev)
844 return -ENOMEM;
845
Joe Perchesd458cdf2013-10-01 19:04:40 -0700846 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
Marcel van Niesd0dc1122007-04-21 15:31:58 -0700847
David S. Millerecba38a2006-06-23 21:23:01 -0700848 qe = netdev_priv(dev);
849
David S. Miller8e912b32008-08-27 00:12:27 -0700850 res = -ENODEV;
851
Grant Likely61c7a082010-04-13 16:12:29 -0700852 i = of_getintprop_default(op->dev.of_node, "channel#", -1);
David S. Miller8e912b32008-08-27 00:12:27 -0700853 if (i == -1)
854 goto fail;
David S. Millerecba38a2006-06-23 21:23:01 -0700855 qe->channel = i;
856 spin_lock_init(&qe->lock);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400857
David S. Miller8e912b32008-08-27 00:12:27 -0700858 qecp = get_qec(op);
David S. Millerecba38a2006-06-23 21:23:01 -0700859 if (!qecp)
860 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
David S. Millerecba38a2006-06-23 21:23:01 -0700862 qecp->qes[qe->channel] = qe;
863 qe->dev = dev;
864 qe->parent = qecp;
David S. Miller8e912b32008-08-27 00:12:27 -0700865 qe->op = op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
David S. Millerecba38a2006-06-23 21:23:01 -0700867 res = -ENOMEM;
David S. Miller8e912b32008-08-27 00:12:27 -0700868 qe->qcregs = of_ioremap(&op->resource[0], 0,
869 CREG_REG_SIZE, "QEC Channel Registers");
David S. Millerecba38a2006-06-23 21:23:01 -0700870 if (!qe->qcregs) {
871 printk(KERN_ERR "qe: Cannot map channel registers.\n");
872 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
874
David S. Miller8e912b32008-08-27 00:12:27 -0700875 qe->mregs = of_ioremap(&op->resource[1], 0,
876 MREGS_REG_SIZE, "QE MACE Registers");
David S. Millerecba38a2006-06-23 21:23:01 -0700877 if (!qe->mregs) {
878 printk(KERN_ERR "qe: Cannot map MACE registers.\n");
879 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 }
881
David S. Miller8e912b32008-08-27 00:12:27 -0700882 qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
David S. Miller738f2b72008-08-27 18:09:11 -0700883 &qe->qblock_dvma, GFP_ATOMIC);
David S. Miller8e912b32008-08-27 00:12:27 -0700884 qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
David S. Miller738f2b72008-08-27 18:09:11 -0700885 &qe->buffers_dvma, GFP_ATOMIC);
David S. Millerecba38a2006-06-23 21:23:01 -0700886 if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
887 qe->buffers == NULL || qe->buffers_dvma == 0)
888 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
David S. Millerecba38a2006-06-23 21:23:01 -0700890 /* Stop this QE. */
891 qe_stop(qe);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
David S. Miller8e912b32008-08-27 00:12:27 -0700893 SET_NETDEV_DEV(dev, &op->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894
David S. Millerecba38a2006-06-23 21:23:01 -0700895 dev->watchdog_timeo = 5*HZ;
Grant Likely1636f8a2010-06-18 11:09:58 -0600896 dev->irq = op->archdata.irqs[0];
David S. Millerecba38a2006-06-23 21:23:01 -0700897 dev->dma = 0;
898 dev->ethtool_ops = &qe_ethtool_ops;
David S. Millerecd41372009-03-23 13:33:37 -0700899 dev->netdev_ops = &qec_ops;
David S. Millerecba38a2006-06-23 21:23:01 -0700900
901 res = register_netdev(dev);
902 if (res)
903 goto fail;
904
Jingoo Han8513fbd2013-05-23 00:52:31 +0000905 platform_set_drvdata(op, qe);
David S. Millerecba38a2006-06-23 21:23:01 -0700906
Danny Kukawkad5589502012-02-24 03:45:57 +0000907 printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
908 dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 return 0;
910
David S. Millerecba38a2006-06-23 21:23:01 -0700911fail:
912 if (qe->qcregs)
David S. Miller8e912b32008-08-27 00:12:27 -0700913 of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
David S. Millerecba38a2006-06-23 21:23:01 -0700914 if (qe->mregs)
David S. Miller8e912b32008-08-27 00:12:27 -0700915 of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
David S. Millerecba38a2006-06-23 21:23:01 -0700916 if (qe->qe_block)
David S. Miller8e912b32008-08-27 00:12:27 -0700917 dma_free_coherent(&op->dev, PAGE_SIZE,
918 qe->qe_block, qe->qblock_dvma);
David S. Millerecba38a2006-06-23 21:23:01 -0700919 if (qe->buffers)
David S. Miller8e912b32008-08-27 00:12:27 -0700920 dma_free_coherent(&op->dev,
David S. Miller738f2b72008-08-27 18:09:11 -0700921 sizeof(struct sunqe_buffers),
922 qe->buffers,
923 qe->buffers_dvma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924
David S. Millerecba38a2006-06-23 21:23:01 -0700925 free_netdev(dev);
926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return res;
928}
929
Bill Pembertonf73d12b2012-12-03 09:24:02 -0500930static int qec_sbus_probe(struct platform_device *op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931{
David S. Miller8e912b32008-08-27 00:12:27 -0700932 return qec_ether_init(op);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933}
934
Bill Pembertonf73d12b2012-12-03 09:24:02 -0500935static int qec_sbus_remove(struct platform_device *op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
Jingoo Han8513fbd2013-05-23 00:52:31 +0000937 struct sunqe *qp = platform_get_drvdata(op);
David S. Millerecba38a2006-06-23 21:23:01 -0700938 struct net_device *net_dev = qp->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
Marcel van Niesd0dc1122007-04-21 15:31:58 -0700940 unregister_netdev(net_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
David S. Miller8e912b32008-08-27 00:12:27 -0700942 of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
943 of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
944 dma_free_coherent(&op->dev, PAGE_SIZE,
945 qp->qe_block, qp->qblock_dvma);
946 dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
947 qp->buffers, qp->buffers_dvma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948
David S. Millerecba38a2006-06-23 21:23:01 -0700949 free_netdev(net_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 return 0;
952}
953
David S. Millerfd098312008-08-31 01:23:17 -0700954static const struct of_device_id qec_sbus_match[] = {
David S. Millerecba38a2006-06-23 21:23:01 -0700955 {
956 .name = "qe",
957 },
958 {},
959};
960
961MODULE_DEVICE_TABLE(of, qec_sbus_match);
962
Grant Likely74888762011-02-22 21:05:51 -0700963static struct platform_driver qec_sbus_driver = {
Grant Likely40182942010-04-13 16:13:02 -0700964 .driver = {
965 .name = "qec",
Grant Likely40182942010-04-13 16:13:02 -0700966 .of_match_table = qec_sbus_match,
967 },
David S. Millerecba38a2006-06-23 21:23:01 -0700968 .probe = qec_sbus_probe,
Bill Pembertonf73d12b2012-12-03 09:24:02 -0500969 .remove = qec_sbus_remove,
David S. Millerecba38a2006-06-23 21:23:01 -0700970};
971
972static int __init qec_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973{
Grant Likely74888762011-02-22 21:05:51 -0700974 return platform_driver_register(&qec_sbus_driver);
David S. Millerecba38a2006-06-23 21:23:01 -0700975}
976
977static void __exit qec_exit(void)
978{
Grant Likely74888762011-02-22 21:05:51 -0700979 platform_driver_unregister(&qec_sbus_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
981 while (root_qec_dev) {
David S. Millerecba38a2006-06-23 21:23:01 -0700982 struct sunqec *next = root_qec_dev->next_module;
Grant Likely2dc11582010-08-06 09:25:50 -0600983 struct platform_device *op = root_qec_dev->op;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Grant Likely1636f8a2010-06-18 11:09:58 -0600985 free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
David S. Miller8e912b32008-08-27 00:12:27 -0700986 of_iounmap(&op->resource[0], root_qec_dev->gregs,
987 GLOB_REG_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 kfree(root_qec_dev);
David S. Millerecba38a2006-06-23 21:23:01 -0700989
990 root_qec_dev = next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 }
992}
993
David S. Millerecba38a2006-06-23 21:23:01 -0700994module_init(qec_init);
995module_exit(qec_exit);