blob: 82c3767ec5f8b5e937ca7cd13687fd1178220195 [file] [log] [blame]
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001/*
2 * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
3 *
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +00004 * 2005-2010 (c) Aeroflex Gaisler AB
Kristoffer Glembod4c41132010-02-15 03:33:44 +00005 *
6 * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
7 * available in the GRLIB VHDL IP core library.
8 *
9 * Full documentation of both cores can be found here:
10 * http://www.gaisler.com/products/grlib/grip.pdf
11 *
12 * The Gigabit version supports scatter/gather DMA, any alignment of
13 * buffers and checksum offloading.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Contributors: Kristoffer Glembo
21 * Daniel Hellstrom
22 * Marko Isomaki
23 */
24
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000025#include <linux/dma-mapping.h>
Kristoffer Glembod4c41132010-02-15 03:33:44 +000026#include <linux/module.h>
27#include <linux/uaccess.h>
28#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000029#include <linux/interrupt.h>
Kristoffer Glembod4c41132010-02-15 03:33:44 +000030#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/ethtool.h>
33#include <linux/skbuff.h>
34#include <linux/io.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
37#include <linux/of_device.h>
38#include <linux/of_platform.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Kristoffer Glembod4c41132010-02-15 03:33:44 +000040#include <asm/cacheflush.h>
41#include <asm/byteorder.h>
42
43#ifdef CONFIG_SPARC
44#include <asm/idprom.h>
45#endif
46
47#include "greth.h"
48
49#define GRETH_DEF_MSG_ENABLE \
50 (NETIF_MSG_DRV | \
51 NETIF_MSG_PROBE | \
52 NETIF_MSG_LINK | \
53 NETIF_MSG_IFDOWN | \
54 NETIF_MSG_IFUP | \
55 NETIF_MSG_RX_ERR | \
56 NETIF_MSG_TX_ERR)
57
58static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
59module_param(greth_debug, int, 0);
60MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
61
62/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
63static int macaddr[6];
64module_param_array(macaddr, int, NULL, 0);
65MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
66
67static int greth_edcl = 1;
68module_param(greth_edcl, int, 0);
69MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
70
71static int greth_open(struct net_device *dev);
kirjanov@gmail.com41a655b2010-02-24 10:25:33 +000072static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
73 struct net_device *dev);
74static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
75 struct net_device *dev);
Kristoffer Glembod4c41132010-02-15 03:33:44 +000076static int greth_rx(struct net_device *dev, int limit);
77static int greth_rx_gbit(struct net_device *dev, int limit);
78static void greth_clean_tx(struct net_device *dev);
79static void greth_clean_tx_gbit(struct net_device *dev);
80static irqreturn_t greth_interrupt(int irq, void *dev_id);
81static int greth_close(struct net_device *dev);
82static int greth_set_mac_add(struct net_device *dev, void *p);
83static void greth_set_multicast_list(struct net_device *dev);
84
85#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
86#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
87#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
88#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
89
90#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
91#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
92#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
93
94static void greth_print_rx_packet(void *addr, int len)
95{
96 print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
97 addr, len, true);
98}
99
100static void greth_print_tx_packet(struct sk_buff *skb)
101{
102 int i;
103 int length;
104
105 if (skb_shinfo(skb)->nr_frags == 0)
106 length = skb->len;
107 else
108 length = skb_headlen(skb);
109
110 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
111 skb->data, length, true);
112
113 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
114
115 print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
116 phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
117 skb_shinfo(skb)->frags[i].page_offset,
118 length, true);
119 }
120}
121
122static inline void greth_enable_tx(struct greth_private *greth)
123{
124 wmb();
125 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
126}
127
128static inline void greth_disable_tx(struct greth_private *greth)
129{
130 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
131}
132
133static inline void greth_enable_rx(struct greth_private *greth)
134{
135 wmb();
136 GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
137}
138
139static inline void greth_disable_rx(struct greth_private *greth)
140{
141 GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
142}
143
144static inline void greth_enable_irqs(struct greth_private *greth)
145{
146 GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
147}
148
149static inline void greth_disable_irqs(struct greth_private *greth)
150{
151 GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
152}
153
154static inline void greth_write_bd(u32 *bd, u32 val)
155{
156 __raw_writel(cpu_to_be32(val), bd);
157}
158
159static inline u32 greth_read_bd(u32 *bd)
160{
161 return be32_to_cpu(__raw_readl(bd));
162}
163
164static void greth_clean_rings(struct greth_private *greth)
165{
166 int i;
167 struct greth_bd *rx_bdp = greth->rx_bd_base;
168 struct greth_bd *tx_bdp = greth->tx_bd_base;
169
170 if (greth->gbit_mac) {
171
172 /* Free and unmap RX buffers */
173 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
174 if (greth->rx_skbuff[i] != NULL) {
175 dev_kfree_skb(greth->rx_skbuff[i]);
176 dma_unmap_single(greth->dev,
177 greth_read_bd(&rx_bdp->addr),
178 MAX_FRAME_SIZE+NET_IP_ALIGN,
179 DMA_FROM_DEVICE);
180 }
181 }
182
183 /* TX buffers */
184 while (greth->tx_free < GRETH_TXBD_NUM) {
185
186 struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
187 int nr_frags = skb_shinfo(skb)->nr_frags;
188 tx_bdp = greth->tx_bd_base + greth->tx_last;
189 greth->tx_last = NEXT_TX(greth->tx_last);
190
191 dma_unmap_single(greth->dev,
192 greth_read_bd(&tx_bdp->addr),
193 skb_headlen(skb),
194 DMA_TO_DEVICE);
195
196 for (i = 0; i < nr_frags; i++) {
197 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
198 tx_bdp = greth->tx_bd_base + greth->tx_last;
199
200 dma_unmap_page(greth->dev,
201 greth_read_bd(&tx_bdp->addr),
202 frag->size,
203 DMA_TO_DEVICE);
204
205 greth->tx_last = NEXT_TX(greth->tx_last);
206 }
207 greth->tx_free += nr_frags+1;
208 dev_kfree_skb(skb);
209 }
210
211
212 } else { /* 10/100 Mbps MAC */
213
214 for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
215 kfree(greth->rx_bufs[i]);
216 dma_unmap_single(greth->dev,
217 greth_read_bd(&rx_bdp->addr),
218 MAX_FRAME_SIZE,
219 DMA_FROM_DEVICE);
220 }
221 for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
222 kfree(greth->tx_bufs[i]);
223 dma_unmap_single(greth->dev,
224 greth_read_bd(&tx_bdp->addr),
225 MAX_FRAME_SIZE,
226 DMA_TO_DEVICE);
227 }
228 }
229}
230
231static int greth_init_rings(struct greth_private *greth)
232{
233 struct sk_buff *skb;
234 struct greth_bd *rx_bd, *tx_bd;
235 u32 dma_addr;
236 int i;
237
238 rx_bd = greth->rx_bd_base;
239 tx_bd = greth->tx_bd_base;
240
241 /* Initialize descriptor rings and buffers */
242 if (greth->gbit_mac) {
243
244 for (i = 0; i < GRETH_RXBD_NUM; i++) {
245 skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
246 if (skb == NULL) {
247 if (netif_msg_ifup(greth))
248 dev_err(greth->dev, "Error allocating DMA ring.\n");
249 goto cleanup;
250 }
251 skb_reserve(skb, NET_IP_ALIGN);
252 dma_addr = dma_map_single(greth->dev,
253 skb->data,
254 MAX_FRAME_SIZE+NET_IP_ALIGN,
255 DMA_FROM_DEVICE);
256
257 if (dma_mapping_error(greth->dev, dma_addr)) {
258 if (netif_msg_ifup(greth))
259 dev_err(greth->dev, "Could not create initial DMA mapping\n");
260 goto cleanup;
261 }
262 greth->rx_skbuff[i] = skb;
263 greth_write_bd(&rx_bd[i].addr, dma_addr);
264 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
265 }
266
267 } else {
268
269 /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
270 for (i = 0; i < GRETH_RXBD_NUM; i++) {
271
272 greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
273
274 if (greth->rx_bufs[i] == NULL) {
275 if (netif_msg_ifup(greth))
276 dev_err(greth->dev, "Error allocating DMA ring.\n");
277 goto cleanup;
278 }
279
280 dma_addr = dma_map_single(greth->dev,
281 greth->rx_bufs[i],
282 MAX_FRAME_SIZE,
283 DMA_FROM_DEVICE);
284
285 if (dma_mapping_error(greth->dev, dma_addr)) {
286 if (netif_msg_ifup(greth))
287 dev_err(greth->dev, "Could not create initial DMA mapping\n");
288 goto cleanup;
289 }
290 greth_write_bd(&rx_bd[i].addr, dma_addr);
291 greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
292 }
293 for (i = 0; i < GRETH_TXBD_NUM; i++) {
294
295 greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
296
297 if (greth->tx_bufs[i] == NULL) {
298 if (netif_msg_ifup(greth))
299 dev_err(greth->dev, "Error allocating DMA ring.\n");
300 goto cleanup;
301 }
302
303 dma_addr = dma_map_single(greth->dev,
304 greth->tx_bufs[i],
305 MAX_FRAME_SIZE,
306 DMA_TO_DEVICE);
307
308 if (dma_mapping_error(greth->dev, dma_addr)) {
309 if (netif_msg_ifup(greth))
310 dev_err(greth->dev, "Could not create initial DMA mapping\n");
311 goto cleanup;
312 }
313 greth_write_bd(&tx_bd[i].addr, dma_addr);
314 greth_write_bd(&tx_bd[i].stat, 0);
315 }
316 }
317 greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
318 greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
319
320 /* Initialize pointers. */
321 greth->rx_cur = 0;
322 greth->tx_next = 0;
323 greth->tx_last = 0;
324 greth->tx_free = GRETH_TXBD_NUM;
325
326 /* Initialize descriptor base address */
327 GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
328 GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
329
330 return 0;
331
332cleanup:
333 greth_clean_rings(greth);
334 return -ENOMEM;
335}
336
337static int greth_open(struct net_device *dev)
338{
339 struct greth_private *greth = netdev_priv(dev);
340 int err;
341
342 err = greth_init_rings(greth);
343 if (err) {
344 if (netif_msg_ifup(greth))
345 dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
346 return err;
347 }
348
349 err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
350 if (err) {
351 if (netif_msg_ifup(greth))
352 dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
353 greth_clean_rings(greth);
354 return err;
355 }
356
357 if (netif_msg_ifup(greth))
358 dev_dbg(&dev->dev, " starting queue\n");
359 netif_start_queue(dev);
360
Daniel Hellstrombbe9e632011-01-14 03:02:38 +0000361 GRETH_REGSAVE(greth->regs->status, 0xFF);
362
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000363 napi_enable(&greth->napi);
364
365 greth_enable_irqs(greth);
366 greth_enable_tx(greth);
367 greth_enable_rx(greth);
368 return 0;
369
370}
371
372static int greth_close(struct net_device *dev)
373{
374 struct greth_private *greth = netdev_priv(dev);
375
376 napi_disable(&greth->napi);
377
Daniel Hellstrombbe9e632011-01-14 03:02:38 +0000378 greth_disable_irqs(greth);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000379 greth_disable_tx(greth);
Daniel Hellstrombbe9e632011-01-14 03:02:38 +0000380 greth_disable_rx(greth);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000381
382 netif_stop_queue(dev);
383
384 free_irq(greth->irq, (void *) dev);
385
386 greth_clean_rings(greth);
387
388 return 0;
389}
390
kirjanov@gmail.com41a655b2010-02-24 10:25:33 +0000391static netdev_tx_t
392greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000393{
394 struct greth_private *greth = netdev_priv(dev);
395 struct greth_bd *bdp;
396 int err = NETDEV_TX_OK;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000397 u32 status, dma_addr, ctrl;
398 unsigned long flags;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000399
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000400 /* Clean TX Ring */
401 greth_clean_tx(greth->netdev);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000402
403 if (unlikely(greth->tx_free <= 0)) {
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000404 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
405 ctrl = GRETH_REGLOAD(greth->regs->control);
406 /* Enable TX IRQ only if not already in poll() routine */
407 if (ctrl & GRETH_RXI)
408 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000409 netif_stop_queue(dev);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000410 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000411 return NETDEV_TX_BUSY;
412 }
413
414 if (netif_msg_pktdata(greth))
415 greth_print_tx_packet(skb);
416
417
418 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
419 dev->stats.tx_errors++;
420 goto out;
421 }
422
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000423 bdp = greth->tx_bd_base + greth->tx_next;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000424 dma_addr = greth_read_bd(&bdp->addr);
425
426 memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
427
428 dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
429
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000430 status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000431
432 /* Wrap around descriptor ring */
433 if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
434 status |= GRETH_BD_WR;
435 }
436
437 greth->tx_next = NEXT_TX(greth->tx_next);
438 greth->tx_free--;
439
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000440 /* Write descriptor control word and enable transmission */
441 greth_write_bd(&bdp->stat, status);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000442 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000443 greth_enable_tx(greth);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000444 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000445
446out:
447 dev_kfree_skb(skb);
448 return err;
449}
450
451
kirjanov@gmail.com41a655b2010-02-24 10:25:33 +0000452static netdev_tx_t
453greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000454{
455 struct greth_private *greth = netdev_priv(dev);
456 struct greth_bd *bdp;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000457 u32 status = 0, dma_addr, ctrl;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000458 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000459 unsigned long flags;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000460
461 nr_frags = skb_shinfo(skb)->nr_frags;
462
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000463 /* Clean TX Ring */
464 greth_clean_tx_gbit(dev);
465
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000466 if (greth->tx_free < nr_frags + 1) {
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000467 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
468 ctrl = GRETH_REGLOAD(greth->regs->control);
469 /* Enable TX IRQ only if not already in poll() routine */
470 if (ctrl & GRETH_RXI)
471 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000472 netif_stop_queue(dev);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000473 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000474 err = NETDEV_TX_BUSY;
475 goto out;
476 }
477
478 if (netif_msg_pktdata(greth))
479 greth_print_tx_packet(skb);
480
481 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
482 dev->stats.tx_errors++;
483 goto out;
484 }
485
486 /* Save skb pointer. */
487 greth->tx_skbuff[greth->tx_next] = skb;
488
489 /* Linear buf */
490 if (nr_frags != 0)
491 status = GRETH_TXBD_MORE;
492
493 status |= GRETH_TXBD_CSALL;
494 status |= skb_headlen(skb) & GRETH_BD_LEN;
495 if (greth->tx_next == GRETH_TXBD_NUM_MASK)
496 status |= GRETH_BD_WR;
497
498
499 bdp = greth->tx_bd_base + greth->tx_next;
500 greth_write_bd(&bdp->stat, status);
501 dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
502
503 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
504 goto map_error;
505
506 greth_write_bd(&bdp->addr, dma_addr);
507
508 curr_tx = NEXT_TX(greth->tx_next);
509
510 /* Frags */
511 for (i = 0; i < nr_frags; i++) {
512 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
513 greth->tx_skbuff[curr_tx] = NULL;
514 bdp = greth->tx_bd_base + curr_tx;
515
Daniel Hellstrom2a2bc012011-01-14 03:02:39 +0000516 status = GRETH_TXBD_CSALL | GRETH_BD_EN;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000517 status |= frag->size & GRETH_BD_LEN;
518
519 /* Wrap around descriptor ring */
520 if (curr_tx == GRETH_TXBD_NUM_MASK)
521 status |= GRETH_BD_WR;
522
523 /* More fragments left */
524 if (i < nr_frags - 1)
525 status |= GRETH_TXBD_MORE;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000526 else
527 status |= GRETH_BD_IE; /* enable IRQ on last fragment */
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000528
529 greth_write_bd(&bdp->stat, status);
530
531 dma_addr = dma_map_page(greth->dev,
532 frag->page,
533 frag->page_offset,
534 frag->size,
535 DMA_TO_DEVICE);
536
537 if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
538 goto frag_map_error;
539
540 greth_write_bd(&bdp->addr, dma_addr);
541
542 curr_tx = NEXT_TX(curr_tx);
543 }
544
545 wmb();
546
Daniel Hellstrom2a2bc012011-01-14 03:02:39 +0000547 /* Enable the descriptor chain by enabling the first descriptor */
548 bdp = greth->tx_bd_base + greth->tx_next;
549 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
550 greth->tx_next = curr_tx;
551 greth->tx_free -= nr_frags + 1;
552
553 wmb();
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000554
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000555 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000556 greth_enable_tx(greth);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000557 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000558
559 return NETDEV_TX_OK;
560
561frag_map_error:
Daniel Hellstrom2a2bc012011-01-14 03:02:39 +0000562 /* Unmap SKB mappings that succeeded and disable descriptor */
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000563 for (i = 0; greth->tx_next + i != curr_tx; i++) {
564 bdp = greth->tx_bd_base + greth->tx_next + i;
565 dma_unmap_single(greth->dev,
566 greth_read_bd(&bdp->addr),
567 greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
568 DMA_TO_DEVICE);
Daniel Hellstrom2a2bc012011-01-14 03:02:39 +0000569 greth_write_bd(&bdp->stat, 0);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000570 }
571map_error:
572 if (net_ratelimit())
573 dev_warn(greth->dev, "Could not create TX DMA mapping\n");
574 dev_kfree_skb(skb);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000575out:
576 return err;
577}
578
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000579static irqreturn_t greth_interrupt(int irq, void *dev_id)
580{
581 struct net_device *dev = dev_id;
582 struct greth_private *greth;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000583 u32 status, ctrl;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000584 irqreturn_t retval = IRQ_NONE;
585
586 greth = netdev_priv(dev);
587
588 spin_lock(&greth->devlock);
589
590 /* Get the interrupt events that caused us to be here. */
591 status = GRETH_REGLOAD(greth->regs->status);
592
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000593 /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be
594 * set regardless of whether IRQ is enabled or not. Especially
595 * important when shared IRQ.
596 */
597 ctrl = GRETH_REGLOAD(greth->regs->control);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000598
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000599 /* Handle rx and tx interrupts through poll */
600 if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) ||
601 ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) {
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000602 retval = IRQ_HANDLED;
603
604 /* Disable interrupts and schedule poll() */
605 greth_disable_irqs(greth);
606 napi_schedule(&greth->napi);
607 }
608
609 mmiowb();
610 spin_unlock(&greth->devlock);
611
612 return retval;
613}
614
615static void greth_clean_tx(struct net_device *dev)
616{
617 struct greth_private *greth;
618 struct greth_bd *bdp;
619 u32 stat;
620
621 greth = netdev_priv(dev);
622
623 while (1) {
624 bdp = greth->tx_bd_base + greth->tx_last;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000625 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
626 mb();
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000627 stat = greth_read_bd(&bdp->stat);
628
629 if (unlikely(stat & GRETH_BD_EN))
630 break;
631
632 if (greth->tx_free == GRETH_TXBD_NUM)
633 break;
634
635 /* Check status for errors */
636 if (unlikely(stat & GRETH_TXBD_STATUS)) {
637 dev->stats.tx_errors++;
638 if (stat & GRETH_TXBD_ERR_AL)
639 dev->stats.tx_aborted_errors++;
640 if (stat & GRETH_TXBD_ERR_UE)
641 dev->stats.tx_fifo_errors++;
642 }
643 dev->stats.tx_packets++;
644 greth->tx_last = NEXT_TX(greth->tx_last);
645 greth->tx_free++;
646 }
647
648 if (greth->tx_free > 0) {
649 netif_wake_queue(dev);
650 }
651
652}
653
654static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
655{
656 /* Check status for errors */
657 if (unlikely(stat & GRETH_TXBD_STATUS)) {
658 dev->stats.tx_errors++;
659 if (stat & GRETH_TXBD_ERR_AL)
660 dev->stats.tx_aborted_errors++;
661 if (stat & GRETH_TXBD_ERR_UE)
662 dev->stats.tx_fifo_errors++;
663 if (stat & GRETH_TXBD_ERR_LC)
664 dev->stats.tx_aborted_errors++;
665 }
666 dev->stats.tx_packets++;
667}
668
669static void greth_clean_tx_gbit(struct net_device *dev)
670{
671 struct greth_private *greth;
672 struct greth_bd *bdp, *bdp_last_frag;
673 struct sk_buff *skb;
674 u32 stat;
675 int nr_frags, i;
676
677 greth = netdev_priv(dev);
678
679 while (greth->tx_free < GRETH_TXBD_NUM) {
680
681 skb = greth->tx_skbuff[greth->tx_last];
682
683 nr_frags = skb_shinfo(skb)->nr_frags;
684
685 /* We only clean fully completed SKBs */
686 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000687
688 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
689 mb();
690 stat = greth_read_bd(&bdp_last_frag->stat);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000691
692 if (stat & GRETH_BD_EN)
693 break;
694
695 greth->tx_skbuff[greth->tx_last] = NULL;
696
697 greth_update_tx_stats(dev, stat);
698
699 bdp = greth->tx_bd_base + greth->tx_last;
700
701 greth->tx_last = NEXT_TX(greth->tx_last);
702
703 dma_unmap_single(greth->dev,
704 greth_read_bd(&bdp->addr),
705 skb_headlen(skb),
706 DMA_TO_DEVICE);
707
708 for (i = 0; i < nr_frags; i++) {
709 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
710 bdp = greth->tx_bd_base + greth->tx_last;
711
712 dma_unmap_page(greth->dev,
713 greth_read_bd(&bdp->addr),
714 frag->size,
715 DMA_TO_DEVICE);
716
717 greth->tx_last = NEXT_TX(greth->tx_last);
718 }
719 greth->tx_free += nr_frags+1;
720 dev_kfree_skb(skb);
721 }
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000722
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000723 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1)))
724 netif_wake_queue(dev);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000725}
726
727static int greth_rx(struct net_device *dev, int limit)
728{
729 struct greth_private *greth;
730 struct greth_bd *bdp;
731 struct sk_buff *skb;
732 int pkt_len;
733 int bad, count;
734 u32 status, dma_addr;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000735 unsigned long flags;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000736
737 greth = netdev_priv(dev);
738
739 for (count = 0; count < limit; ++count) {
740
741 bdp = greth->rx_bd_base + greth->rx_cur;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000742 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
743 mb();
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000744 status = greth_read_bd(&bdp->stat);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000745
746 if (unlikely(status & GRETH_BD_EN)) {
747 break;
748 }
749
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000750 dma_addr = greth_read_bd(&bdp->addr);
751 bad = 0;
752
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000753 /* Check status for errors. */
754 if (unlikely(status & GRETH_RXBD_STATUS)) {
755 if (status & GRETH_RXBD_ERR_FT) {
756 dev->stats.rx_length_errors++;
757 bad = 1;
758 }
759 if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
760 dev->stats.rx_frame_errors++;
761 bad = 1;
762 }
763 if (status & GRETH_RXBD_ERR_CRC) {
764 dev->stats.rx_crc_errors++;
765 bad = 1;
766 }
767 }
768 if (unlikely(bad)) {
769 dev->stats.rx_errors++;
770
771 } else {
772
773 pkt_len = status & GRETH_BD_LEN;
774
775 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
776
777 if (unlikely(skb == NULL)) {
778
779 if (net_ratelimit())
780 dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
781
782 dev->stats.rx_dropped++;
783
784 } else {
785 skb_reserve(skb, NET_IP_ALIGN);
786 skb->dev = dev;
787
788 dma_sync_single_for_cpu(greth->dev,
789 dma_addr,
790 pkt_len,
791 DMA_FROM_DEVICE);
792
793 if (netif_msg_pktdata(greth))
794 greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
795
796 memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
797
798 skb->protocol = eth_type_trans(skb, dev);
799 dev->stats.rx_packets++;
800 netif_receive_skb(skb);
801 }
802 }
803
804 status = GRETH_BD_EN | GRETH_BD_IE;
805 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
806 status |= GRETH_BD_WR;
807 }
808
809 wmb();
810 greth_write_bd(&bdp->stat, status);
811
812 dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
813
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000814 spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000815 greth_enable_rx(greth);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000816 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000817
818 greth->rx_cur = NEXT_RX(greth->rx_cur);
819 }
820
821 return count;
822}
823
824static inline int hw_checksummed(u32 status)
825{
826
827 if (status & GRETH_RXBD_IP_FRAG)
828 return 0;
829
830 if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
831 return 0;
832
833 if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
834 return 0;
835
836 if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
837 return 0;
838
839 return 1;
840}
841
842static int greth_rx_gbit(struct net_device *dev, int limit)
843{
844 struct greth_private *greth;
845 struct greth_bd *bdp;
846 struct sk_buff *skb, *newskb;
847 int pkt_len;
848 int bad, count = 0;
849 u32 status, dma_addr;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000850 unsigned long flags;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000851
852 greth = netdev_priv(dev);
853
854 for (count = 0; count < limit; ++count) {
855
856 bdp = greth->rx_bd_base + greth->rx_cur;
857 skb = greth->rx_skbuff[greth->rx_cur];
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000858 GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX);
859 mb();
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000860 status = greth_read_bd(&bdp->stat);
861 bad = 0;
862
863 if (status & GRETH_BD_EN)
864 break;
865
866 /* Check status for errors. */
867 if (unlikely(status & GRETH_RXBD_STATUS)) {
868
869 if (status & GRETH_RXBD_ERR_FT) {
870 dev->stats.rx_length_errors++;
871 bad = 1;
872 } else if (status &
873 (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
874 dev->stats.rx_frame_errors++;
875 bad = 1;
876 } else if (status & GRETH_RXBD_ERR_CRC) {
877 dev->stats.rx_crc_errors++;
878 bad = 1;
879 }
880 }
881
Daniel Hellstromb669e7f2011-01-14 03:02:40 +0000882 /* Allocate new skb to replace current, not needed if the
883 * current skb can be reused */
884 if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) {
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000885 skb_reserve(newskb, NET_IP_ALIGN);
886
887 dma_addr = dma_map_single(greth->dev,
888 newskb->data,
889 MAX_FRAME_SIZE + NET_IP_ALIGN,
890 DMA_FROM_DEVICE);
891
892 if (!dma_mapping_error(greth->dev, dma_addr)) {
893 /* Process the incoming frame. */
894 pkt_len = status & GRETH_BD_LEN;
895
896 dma_unmap_single(greth->dev,
897 greth_read_bd(&bdp->addr),
898 MAX_FRAME_SIZE + NET_IP_ALIGN,
899 DMA_FROM_DEVICE);
900
901 if (netif_msg_pktdata(greth))
902 greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
903
904 skb_put(skb, pkt_len);
905
Michał Mirosław131ae322011-04-17 00:15:47 +0000906 if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000907 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700909 skb_checksum_none_assert(skb);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000910
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000911 skb->protocol = eth_type_trans(skb, dev);
912 dev->stats.rx_packets++;
913 netif_receive_skb(skb);
914
915 greth->rx_skbuff[greth->rx_cur] = newskb;
916 greth_write_bd(&bdp->addr, dma_addr);
917 } else {
918 if (net_ratelimit())
919 dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
920 dev_kfree_skb(newskb);
Daniel Hellstromb669e7f2011-01-14 03:02:40 +0000921 /* reusing current skb, so it is a drop */
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000922 dev->stats.rx_dropped++;
923 }
Daniel Hellstromb669e7f2011-01-14 03:02:40 +0000924 } else if (bad) {
925 /* Bad Frame transfer, the skb is reused */
926 dev->stats.rx_dropped++;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000927 } else {
Daniel Hellstromb669e7f2011-01-14 03:02:40 +0000928 /* Failed Allocating a new skb. This is rather stupid
929 * but the current "filled" skb is reused, as if
930 * transfer failure. One could argue that RX descriptor
931 * table handling should be divided into cleaning and
932 * filling as the TX part of the driver
933 */
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000934 if (net_ratelimit())
935 dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
Daniel Hellstromb669e7f2011-01-14 03:02:40 +0000936 /* reusing current skb, so it is a drop */
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000937 dev->stats.rx_dropped++;
938 }
939
940 status = GRETH_BD_EN | GRETH_BD_IE;
941 if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
942 status |= GRETH_BD_WR;
943 }
944
945 wmb();
946 greth_write_bd(&bdp->stat, status);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000947 spin_lock_irqsave(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000948 greth_enable_rx(greth);
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000949 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000950 greth->rx_cur = NEXT_RX(greth->rx_cur);
951 }
952
953 return count;
954
955}
956
957static int greth_poll(struct napi_struct *napi, int budget)
958{
959 struct greth_private *greth;
960 int work_done = 0;
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000961 unsigned long flags;
962 u32 mask, ctrl;
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000963 greth = container_of(napi, struct greth_private, napi);
964
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000965restart_txrx_poll:
966 if (netif_queue_stopped(greth->netdev)) {
967 if (greth->gbit_mac)
968 greth_clean_tx_gbit(greth->netdev);
969 else
970 greth_clean_tx(greth->netdev);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000971 }
972
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000973 if (greth->gbit_mac) {
974 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
975 } else {
976 work_done += greth_rx(greth->netdev, budget - work_done);
977 }
978
979 if (work_done < budget) {
980
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000981 spin_lock_irqsave(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +0000982
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +0000983 ctrl = GRETH_REGLOAD(greth->regs->control);
984 if (netif_queue_stopped(greth->netdev)) {
985 GRETH_REGSAVE(greth->regs->control,
986 ctrl | GRETH_TXI | GRETH_RXI);
987 mask = GRETH_INT_RX | GRETH_INT_RE |
988 GRETH_INT_TX | GRETH_INT_TE;
989 } else {
990 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI);
991 mask = GRETH_INT_RX | GRETH_INT_RE;
992 }
993
994 if (GRETH_REGLOAD(greth->regs->status) & mask) {
995 GRETH_REGSAVE(greth->regs->control, ctrl);
996 spin_unlock_irqrestore(&greth->devlock, flags);
997 goto restart_txrx_poll;
998 } else {
999 __napi_complete(napi);
1000 spin_unlock_irqrestore(&greth->devlock, flags);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001001 }
1002 }
1003
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001004 return work_done;
1005}
1006
1007static int greth_set_mac_add(struct net_device *dev, void *p)
1008{
1009 struct sockaddr *addr = p;
1010 struct greth_private *greth;
1011 struct greth_regs *regs;
1012
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001013 greth = netdev_priv(dev);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001014 regs = (struct greth_regs *) greth->regs;
1015
1016 if (!is_valid_ether_addr(addr->sa_data))
1017 return -EINVAL;
1018
1019 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1020
1021 GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
1022 GRETH_REGSAVE(regs->esa_lsb,
1023 addr->sa_data[2] << 24 | addr->
1024 sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
1025 return 0;
1026}
1027
1028static u32 greth_hash_get_index(__u8 *addr)
1029{
1030 return (ether_crc(6, addr)) & 0x3F;
1031}
1032
1033static void greth_set_hash_filter(struct net_device *dev)
1034{
Jiri Pirko22bedad32010-04-01 21:22:57 +00001035 struct netdev_hw_addr *ha;
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001036 struct greth_private *greth = netdev_priv(dev);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001037 struct greth_regs *regs = (struct greth_regs *) greth->regs;
1038 u32 mc_filter[2];
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001039 unsigned int bitnr;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001040
1041 mc_filter[0] = mc_filter[1] = 0;
1042
Jiri Pirko22bedad32010-04-01 21:22:57 +00001043 netdev_for_each_mc_addr(ha, dev) {
1044 bitnr = greth_hash_get_index(ha->addr);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001045 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1046 }
1047
1048 GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
1049 GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
1050}
1051
1052static void greth_set_multicast_list(struct net_device *dev)
1053{
1054 int cfg;
1055 struct greth_private *greth = netdev_priv(dev);
1056 struct greth_regs *regs = (struct greth_regs *) greth->regs;
1057
1058 cfg = GRETH_REGLOAD(regs->control);
1059 if (dev->flags & IFF_PROMISC)
1060 cfg |= GRETH_CTRL_PR;
1061 else
1062 cfg &= ~GRETH_CTRL_PR;
1063
1064 if (greth->multicast) {
1065 if (dev->flags & IFF_ALLMULTI) {
1066 GRETH_REGSAVE(regs->hash_msb, -1);
1067 GRETH_REGSAVE(regs->hash_lsb, -1);
1068 cfg |= GRETH_CTRL_MCEN;
1069 GRETH_REGSAVE(regs->control, cfg);
1070 return;
1071 }
1072
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001073 if (netdev_mc_empty(dev)) {
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001074 cfg &= ~GRETH_CTRL_MCEN;
1075 GRETH_REGSAVE(regs->control, cfg);
1076 return;
1077 }
1078
1079 /* Setup multicast filter */
1080 greth_set_hash_filter(dev);
1081 cfg |= GRETH_CTRL_MCEN;
1082 }
1083 GRETH_REGSAVE(regs->control, cfg);
1084}
1085
1086static u32 greth_get_msglevel(struct net_device *dev)
1087{
1088 struct greth_private *greth = netdev_priv(dev);
1089 return greth->msg_enable;
1090}
1091
1092static void greth_set_msglevel(struct net_device *dev, u32 value)
1093{
1094 struct greth_private *greth = netdev_priv(dev);
1095 greth->msg_enable = value;
1096}
1097static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1098{
1099 struct greth_private *greth = netdev_priv(dev);
1100 struct phy_device *phy = greth->phy;
1101
1102 if (!phy)
1103 return -ENODEV;
1104
1105 return phy_ethtool_gset(phy, cmd);
1106}
1107
1108static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1109{
1110 struct greth_private *greth = netdev_priv(dev);
1111 struct phy_device *phy = greth->phy;
1112
1113 if (!phy)
1114 return -ENODEV;
1115
1116 return phy_ethtool_sset(phy, cmd);
1117}
1118
1119static int greth_get_regs_len(struct net_device *dev)
1120{
1121 return sizeof(struct greth_regs);
1122}
1123
1124static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1125{
1126 struct greth_private *greth = netdev_priv(dev);
1127
1128 strncpy(info->driver, dev_driver_string(greth->dev), 32);
1129 strncpy(info->version, "revision: 1.0", 32);
1130 strncpy(info->bus_info, greth->dev->bus->name, 32);
1131 strncpy(info->fw_version, "N/A", 32);
1132 info->eedump_len = 0;
1133 info->regdump_len = sizeof(struct greth_regs);
1134}
1135
1136static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
1137{
1138 int i;
1139 struct greth_private *greth = netdev_priv(dev);
1140 u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
1141 u32 *buff = p;
1142
1143 for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
1144 buff[i] = greth_read_bd(&greth_regs[i]);
1145}
1146
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001147static const struct ethtool_ops greth_ethtool_ops = {
1148 .get_msglevel = greth_get_msglevel,
1149 .set_msglevel = greth_set_msglevel,
1150 .get_settings = greth_get_settings,
1151 .set_settings = greth_set_settings,
1152 .get_drvinfo = greth_get_drvinfo,
1153 .get_regs_len = greth_get_regs_len,
1154 .get_regs = greth_get_regs,
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001155 .get_link = ethtool_op_get_link,
1156};
1157
1158static struct net_device_ops greth_netdev_ops = {
Daniel Hellstrom0f73f2c2011-01-14 03:02:43 +00001159 .ndo_open = greth_open,
1160 .ndo_stop = greth_close,
1161 .ndo_start_xmit = greth_start_xmit,
1162 .ndo_set_mac_address = greth_set_mac_add,
1163 .ndo_validate_addr = eth_validate_addr,
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001164};
1165
1166static inline int wait_for_mdio(struct greth_private *greth)
1167{
1168 unsigned long timeout = jiffies + 4*HZ/100;
1169 while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
1170 if (time_after(jiffies, timeout))
1171 return 0;
1172 }
1173 return 1;
1174}
1175
1176static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
1177{
1178 struct greth_private *greth = bus->priv;
1179 int data;
1180
1181 if (!wait_for_mdio(greth))
1182 return -EBUSY;
1183
1184 GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
1185
1186 if (!wait_for_mdio(greth))
1187 return -EBUSY;
1188
1189 if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
1190 data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
1191 return data;
1192
1193 } else {
1194 return -1;
1195 }
1196}
1197
1198static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
1199{
1200 struct greth_private *greth = bus->priv;
1201
1202 if (!wait_for_mdio(greth))
1203 return -EBUSY;
1204
1205 GRETH_REGSAVE(greth->regs->mdio,
1206 ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
1207
1208 if (!wait_for_mdio(greth))
1209 return -EBUSY;
1210
1211 return 0;
1212}
1213
1214static int greth_mdio_reset(struct mii_bus *bus)
1215{
1216 return 0;
1217}
1218
1219static void greth_link_change(struct net_device *dev)
1220{
1221 struct greth_private *greth = netdev_priv(dev);
1222 struct phy_device *phydev = greth->phy;
1223 unsigned long flags;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001224 int status_change = 0;
Daniel Hellstrom2436af82011-01-14 03:02:41 +00001225 u32 ctrl;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001226
1227 spin_lock_irqsave(&greth->devlock, flags);
1228
1229 if (phydev->link) {
1230
1231 if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
Daniel Hellstrom2436af82011-01-14 03:02:41 +00001232 ctrl = GRETH_REGLOAD(greth->regs->control) &
1233 ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001234
1235 if (phydev->duplex)
Daniel Hellstrom2436af82011-01-14 03:02:41 +00001236 ctrl |= GRETH_CTRL_FD;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001237
Daniel Hellstrom2436af82011-01-14 03:02:41 +00001238 if (phydev->speed == SPEED_100)
1239 ctrl |= GRETH_CTRL_SP;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001240 else if (phydev->speed == SPEED_1000)
Daniel Hellstrom2436af82011-01-14 03:02:41 +00001241 ctrl |= GRETH_CTRL_GB;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001242
Daniel Hellstrom2436af82011-01-14 03:02:41 +00001243 GRETH_REGSAVE(greth->regs->control, ctrl);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001244 greth->speed = phydev->speed;
1245 greth->duplex = phydev->duplex;
1246 status_change = 1;
1247 }
1248 }
1249
1250 if (phydev->link != greth->link) {
1251 if (!phydev->link) {
1252 greth->speed = 0;
1253 greth->duplex = -1;
1254 }
1255 greth->link = phydev->link;
1256
1257 status_change = 1;
1258 }
1259
1260 spin_unlock_irqrestore(&greth->devlock, flags);
1261
1262 if (status_change) {
1263 if (phydev->link)
1264 pr_debug("%s: link up (%d/%s)\n",
1265 dev->name, phydev->speed,
1266 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
1267 else
1268 pr_debug("%s: link down\n", dev->name);
1269 }
1270}
1271
1272static int greth_mdio_probe(struct net_device *dev)
1273{
1274 struct greth_private *greth = netdev_priv(dev);
1275 struct phy_device *phy = NULL;
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001276 int ret;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001277
1278 /* Find the first PHY */
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001279 phy = phy_find_first(greth->mdio);
1280
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001281 if (!phy) {
1282 if (netif_msg_probe(greth))
1283 dev_err(&dev->dev, "no PHY found\n");
1284 return -ENXIO;
1285 }
1286
kirjanov@gmail.com6e037182010-02-19 05:00:52 +00001287 ret = phy_connect_direct(dev, phy, &greth_link_change,
1288 0, greth->gbit_mac ?
1289 PHY_INTERFACE_MODE_GMII :
1290 PHY_INTERFACE_MODE_MII);
1291 if (ret) {
1292 if (netif_msg_ifup(greth))
1293 dev_err(&dev->dev, "could not attach to PHY\n");
1294 return ret;
1295 }
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001296
1297 if (greth->gbit_mac)
1298 phy->supported &= PHY_GBIT_FEATURES;
1299 else
1300 phy->supported &= PHY_BASIC_FEATURES;
1301
1302 phy->advertising = phy->supported;
1303
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001304 greth->link = 0;
1305 greth->speed = 0;
1306 greth->duplex = -1;
1307 greth->phy = phy;
1308
1309 return 0;
1310}
1311
1312static inline int phy_aneg_done(struct phy_device *phydev)
1313{
1314 int retval;
1315
1316 retval = phy_read(phydev, MII_BMSR);
1317
1318 return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
1319}
1320
1321static int greth_mdio_init(struct greth_private *greth)
1322{
1323 int ret, phy;
1324 unsigned long timeout;
1325
1326 greth->mdio = mdiobus_alloc();
1327 if (!greth->mdio) {
1328 return -ENOMEM;
1329 }
1330
1331 greth->mdio->name = "greth-mdio";
1332 snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
1333 greth->mdio->read = greth_mdio_read;
1334 greth->mdio->write = greth_mdio_write;
1335 greth->mdio->reset = greth_mdio_reset;
1336 greth->mdio->priv = greth;
1337
1338 greth->mdio->irq = greth->mdio_irqs;
1339
1340 for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1341 greth->mdio->irq[phy] = PHY_POLL;
1342
1343 ret = mdiobus_register(greth->mdio);
1344 if (ret) {
1345 goto error;
1346 }
1347
1348 ret = greth_mdio_probe(greth->netdev);
1349 if (ret) {
1350 if (netif_msg_probe(greth))
1351 dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
1352 goto unreg_mdio;
1353 }
1354
1355 phy_start(greth->phy);
1356
1357 /* If Ethernet debug link is used make autoneg happen right away */
1358 if (greth->edcl && greth_edcl == 1) {
1359 phy_start_aneg(greth->phy);
1360 timeout = jiffies + 6*HZ;
1361 while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
1362 }
1363 genphy_read_status(greth->phy);
1364 greth_link_change(greth->netdev);
1365 }
1366
1367 return 0;
1368
1369unreg_mdio:
1370 mdiobus_unregister(greth->mdio);
1371error:
1372 mdiobus_free(greth->mdio);
1373 return ret;
1374}
1375
1376/* Initialize the GRETH MAC */
Grant Likely74888762011-02-22 21:05:51 -07001377static int __devinit greth_of_probe(struct platform_device *ofdev)
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001378{
1379 struct net_device *dev;
1380 struct greth_private *greth;
1381 struct greth_regs *regs;
1382
1383 int i;
1384 int err;
1385 int tmp;
1386 unsigned long timeout;
1387
1388 dev = alloc_etherdev(sizeof(struct greth_private));
1389
1390 if (dev == NULL)
1391 return -ENOMEM;
1392
1393 greth = netdev_priv(dev);
1394 greth->netdev = dev;
1395 greth->dev = &ofdev->dev;
1396
1397 if (greth_debug > 0)
1398 greth->msg_enable = greth_debug;
1399 else
1400 greth->msg_enable = GRETH_DEF_MSG_ENABLE;
1401
1402 spin_lock_init(&greth->devlock);
1403
1404 greth->regs = of_ioremap(&ofdev->resource[0], 0,
1405 resource_size(&ofdev->resource[0]),
1406 "grlib-greth regs");
1407
1408 if (greth->regs == NULL) {
1409 if (netif_msg_probe(greth))
1410 dev_err(greth->dev, "ioremap failure.\n");
1411 err = -EIO;
1412 goto error1;
1413 }
1414
1415 regs = (struct greth_regs *) greth->regs;
Grant Likely19e48752010-08-08 00:23:26 -06001416 greth->irq = ofdev->archdata.irqs[0];
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001417
1418 dev_set_drvdata(greth->dev, dev);
1419 SET_NETDEV_DEV(dev, greth->dev);
1420
1421 if (netif_msg_probe(greth))
1422 dev_dbg(greth->dev, "reseting controller.\n");
1423
1424 /* Reset the controller. */
1425 GRETH_REGSAVE(regs->control, GRETH_RESET);
1426
1427 /* Wait for MAC to reset itself */
1428 timeout = jiffies + HZ/100;
1429 while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
1430 if (time_after(jiffies, timeout)) {
1431 err = -EIO;
1432 if (netif_msg_probe(greth))
1433 dev_err(greth->dev, "timeout when waiting for reset.\n");
1434 goto error2;
1435 }
1436 }
1437
1438 /* Get default PHY address */
1439 greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
1440
1441 /* Check if we have GBIT capable MAC */
1442 tmp = GRETH_REGLOAD(regs->control);
1443 greth->gbit_mac = (tmp >> 27) & 1;
1444
1445 /* Check for multicast capability */
1446 greth->multicast = (tmp >> 25) & 1;
1447
1448 greth->edcl = (tmp >> 31) & 1;
1449
1450 /* If we have EDCL we disable the EDCL speed-duplex FSM so
1451 * it doesn't interfere with the software */
1452 if (greth->edcl != 0)
1453 GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
1454
1455 /* Check if MAC can handle MDIO interrupts */
1456 greth->mdio_int_en = (tmp >> 26) & 1;
1457
1458 err = greth_mdio_init(greth);
1459 if (err) {
1460 if (netif_msg_probe(greth))
1461 dev_err(greth->dev, "failed to register MDIO bus\n");
1462 goto error2;
1463 }
1464
1465 /* Allocate TX descriptor ring in coherent memory */
1466 greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1467 1024,
1468 &greth->tx_bd_base_phys,
1469 GFP_KERNEL);
1470
1471 if (!greth->tx_bd_base) {
1472 if (netif_msg_probe(greth))
1473 dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1474 err = -ENOMEM;
1475 goto error3;
1476 }
1477
1478 memset(greth->tx_bd_base, 0, 1024);
1479
1480 /* Allocate RX descriptor ring in coherent memory */
1481 greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
1482 1024,
1483 &greth->rx_bd_base_phys,
1484 GFP_KERNEL);
1485
1486 if (!greth->rx_bd_base) {
1487 if (netif_msg_probe(greth))
1488 dev_err(greth->dev, "could not allocate descriptor memory.\n");
1489 err = -ENOMEM;
1490 goto error4;
1491 }
1492
1493 memset(greth->rx_bd_base, 0, 1024);
1494
1495 /* Get MAC address from: module param, OF property or ID prom */
1496 for (i = 0; i < 6; i++) {
1497 if (macaddr[i] != 0)
1498 break;
1499 }
1500 if (i == 6) {
1501 const unsigned char *addr;
1502 int len;
Grant Likely61c7a082010-04-13 16:12:29 -07001503 addr = of_get_property(ofdev->dev.of_node, "local-mac-address",
1504 &len);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001505 if (addr != NULL && len == 6) {
1506 for (i = 0; i < 6; i++)
1507 macaddr[i] = (unsigned int) addr[i];
1508 } else {
1509#ifdef CONFIG_SPARC
1510 for (i = 0; i < 6; i++)
1511 macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
1512#endif
1513 }
1514 }
1515
1516 for (i = 0; i < 6; i++)
1517 dev->dev_addr[i] = macaddr[i];
1518
1519 macaddr[5]++;
1520
1521 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
1522 if (netif_msg_probe(greth))
1523 dev_err(greth->dev, "no valid ethernet address, aborting.\n");
1524 err = -EINVAL;
1525 goto error5;
1526 }
1527
1528 GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
1529 GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
1530 dev->dev_addr[4] << 8 | dev->dev_addr[5]);
1531
1532 /* Clear all pending interrupts except PHY irq */
1533 GRETH_REGSAVE(regs->status, 0xFF);
1534
1535 if (greth->gbit_mac) {
Michał Mirosław131ae322011-04-17 00:15:47 +00001536 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
1537 NETIF_F_RXCSUM;
1538 dev->features = dev->hw_features | NETIF_F_HIGHDMA;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001539 greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001540 }
1541
1542 if (greth->multicast) {
1543 greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
1544 dev->flags |= IFF_MULTICAST;
1545 } else {
1546 dev->flags &= ~IFF_MULTICAST;
1547 }
1548
1549 dev->netdev_ops = &greth_netdev_ops;
1550 dev->ethtool_ops = &greth_ethtool_ops;
1551
Tobias Klausercb5d9912010-08-17 06:11:24 +00001552 err = register_netdev(dev);
1553 if (err) {
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001554 if (netif_msg_probe(greth))
1555 dev_err(greth->dev, "netdevice registration failed.\n");
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001556 goto error5;
1557 }
1558
1559 /* setup NAPI */
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001560 netif_napi_add(dev, &greth->napi, greth_poll, 64);
1561
1562 return 0;
1563
1564error5:
1565 dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1566error4:
1567 dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1568error3:
1569 mdiobus_unregister(greth->mdio);
1570error2:
1571 of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
1572error1:
1573 free_netdev(dev);
1574 return err;
1575}
1576
Grant Likely2dc11582010-08-06 09:25:50 -06001577static int __devexit greth_of_remove(struct platform_device *of_dev)
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001578{
1579 struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
1580 struct greth_private *greth = netdev_priv(ndev);
1581
1582 /* Free descriptor areas */
1583 dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
1584
1585 dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
1586
1587 dev_set_drvdata(&of_dev->dev, NULL);
1588
1589 if (greth->phy)
1590 phy_stop(greth->phy);
1591 mdiobus_unregister(greth->mdio);
1592
1593 unregister_netdev(ndev);
1594 free_netdev(ndev);
1595
1596 of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
1597
1598 return 0;
1599}
1600
1601static struct of_device_id greth_of_match[] = {
1602 {
1603 .name = "GAISLER_ETHMAC",
1604 },
Daniel Hellstromad4650a2011-01-14 03:02:37 +00001605 {
1606 .name = "01_01d",
1607 },
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001608 {},
1609};
1610
1611MODULE_DEVICE_TABLE(of, greth_of_match);
1612
Grant Likely74888762011-02-22 21:05:51 -07001613static struct platform_driver greth_of_driver = {
David S. Millerbc284f92010-05-31 05:47:32 -07001614 .driver = {
1615 .name = "grlib-greth",
1616 .owner = THIS_MODULE,
1617 .of_match_table = greth_of_match,
1618 },
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001619 .probe = greth_of_probe,
1620 .remove = __devexit_p(greth_of_remove),
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001621};
1622
1623static int __init greth_init(void)
1624{
Grant Likely74888762011-02-22 21:05:51 -07001625 return platform_driver_register(&greth_of_driver);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001626}
1627
1628static void __exit greth_cleanup(void)
1629{
Grant Likely74888762011-02-22 21:05:51 -07001630 platform_driver_unregister(&greth_of_driver);
Kristoffer Glembod4c41132010-02-15 03:33:44 +00001631}
1632
1633module_init(greth_init);
1634module_exit(greth_cleanup);
1635
1636MODULE_AUTHOR("Aeroflex Gaisler AB.");
1637MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
1638MODULE_LICENSE("GPL");