blob: dc2c35dce21681c6dc3579696dd6caa55ab22ed7 [file] [log] [blame]
Måns Rullgård52dfc832015-11-19 13:02:59 +00001/*
2 * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
3 *
4 * Mostly rewritten, based on driver from Sigma Designs. Original
5 * copyright notice below.
6 *
7 *
8 * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
9 *
10 * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/module.h>
24#include <linux/etherdevice.h>
25#include <linux/delay.h>
26#include <linux/ethtool.h>
27#include <linux/interrupt.h>
28#include <linux/platform_device.h>
29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/dma-mapping.h>
33#include <linux/phy.h>
34#include <linux/cache.h>
35#include <linux/jiffies.h>
36#include <linux/io.h>
37#include <linux/iopoll.h>
38#include <asm/barrier.h>
39
40#include "nb8800.h"
41
42static void nb8800_tx_done(struct net_device *dev);
43static int nb8800_dma_stop(struct net_device *dev);
44
45static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
46{
47 return readb_relaxed(priv->base + reg);
48}
49
50static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
51{
52 return readl_relaxed(priv->base + reg);
53}
54
55static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
56{
57 writeb_relaxed(val, priv->base + reg);
58}
59
60static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
61{
62 writew_relaxed(val, priv->base + reg);
63}
64
65static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
66{
67 writel_relaxed(val, priv->base + reg);
68}
69
70static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
71 u32 mask, u32 val)
72{
73 u32 old = nb8800_readb(priv, reg);
74 u32 new = (old & ~mask) | (val & mask);
75
76 if (new != old)
77 nb8800_writeb(priv, reg, new);
78}
79
80static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
81 u32 mask, u32 val)
82{
83 u32 old = nb8800_readl(priv, reg);
84 u32 new = (old & ~mask) | (val & mask);
85
86 if (new != old)
87 nb8800_writel(priv, reg, new);
88}
89
90static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
91 bool set)
92{
93 nb8800_maskb(priv, reg, bits, set ? bits : 0);
94}
95
96static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
97{
98 nb8800_maskb(priv, reg, bits, bits);
99}
100
101static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
102{
103 nb8800_maskb(priv, reg, bits, 0);
104}
105
106static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
107 bool set)
108{
109 nb8800_maskl(priv, reg, bits, set ? bits : 0);
110}
111
112static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
113{
114 nb8800_maskl(priv, reg, bits, bits);
115}
116
117static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
118{
119 nb8800_maskl(priv, reg, bits, 0);
120}
121
122static int nb8800_mdio_wait(struct mii_bus *bus)
123{
124 struct nb8800_priv *priv = bus->priv;
125 u32 val;
126
127 return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
128 val, !(val & MDIO_CMD_GO), 1, 1000);
129}
130
131static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
132{
133 struct nb8800_priv *priv = bus->priv;
134 int err;
135
136 err = nb8800_mdio_wait(bus);
137 if (err)
138 return err;
139
140 nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
141 udelay(10);
142 nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
143
144 return nb8800_mdio_wait(bus);
145}
146
147static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
148{
149 struct nb8800_priv *priv = bus->priv;
150 u32 val;
151 int err;
152
153 err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
154 if (err)
155 return err;
156
157 val = nb8800_readl(priv, NB8800_MDIO_STS);
158 if (val & MDIO_STS_ERR)
159 return 0xffff;
160
161 return val & 0xffff;
162}
163
164static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
165{
166 u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
167 MDIO_CMD_DATA(val) | MDIO_CMD_WR;
168
169 return nb8800_mdio_cmd(bus, cmd);
170}
171
172static void nb8800_mac_tx(struct net_device *dev, bool enable)
173{
174 struct nb8800_priv *priv = netdev_priv(dev);
175
176 while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
177 cpu_relax();
178
179 nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
180}
181
182static void nb8800_mac_rx(struct net_device *dev, bool enable)
183{
184 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
185}
186
187static void nb8800_mac_af(struct net_device *dev, bool enable)
188{
189 nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
190}
191
192static void nb8800_start_rx(struct net_device *dev)
193{
194 nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
195}
196
197static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
198{
199 struct nb8800_priv *priv = netdev_priv(dev);
200 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
201 struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
202 int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
203 dma_addr_t dma_addr;
204 struct page *page;
205 unsigned long offset;
206 void *data;
207
208 data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
209 if (!data)
210 return -ENOMEM;
211
212 page = virt_to_head_page(data);
213 offset = data - page_address(page);
214
215 dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
216 DMA_FROM_DEVICE);
217
218 if (dma_mapping_error(&dev->dev, dma_addr)) {
219 skb_free_frag(data);
220 return -ENOMEM;
221 }
222
223 rxb->page = page;
224 rxb->offset = offset;
225 rxd->desc.s_addr = dma_addr;
226
227 return 0;
228}
229
230static void nb8800_receive(struct net_device *dev, unsigned int i,
231 unsigned int len)
232{
233 struct nb8800_priv *priv = netdev_priv(dev);
234 struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
235 struct page *page = priv->rx_bufs[i].page;
236 int offset = priv->rx_bufs[i].offset;
237 void *data = page_address(page) + offset;
238 dma_addr_t dma = rxd->desc.s_addr;
239 struct sk_buff *skb;
240 unsigned int size;
241 int err;
242
243 size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
244
245 skb = napi_alloc_skb(&priv->napi, size);
246 if (!skb) {
247 netdev_err(dev, "rx skb allocation failed\n");
248 dev->stats.rx_dropped++;
249 return;
250 }
251
252 if (len <= RX_COPYBREAK) {
253 dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
254 memcpy(skb_put(skb, len), data, len);
255 dma_sync_single_for_device(&dev->dev, dma, len,
256 DMA_FROM_DEVICE);
257 } else {
258 err = nb8800_alloc_rx(dev, i, true);
259 if (err) {
260 netdev_err(dev, "rx buffer allocation failed\n");
261 dev->stats.rx_dropped++;
262 return;
263 }
264
265 dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
266 memcpy(skb_put(skb, RX_COPYHDR), data, RX_COPYHDR);
267 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
268 offset + RX_COPYHDR, len - RX_COPYHDR,
269 RX_BUF_SIZE);
270 }
271
272 skb->protocol = eth_type_trans(skb, dev);
273 napi_gro_receive(&priv->napi, skb);
274}
275
276static void nb8800_rx_error(struct net_device *dev, u32 report)
277{
278 if (report & RX_LENGTH_ERR)
279 dev->stats.rx_length_errors++;
280
281 if (report & RX_FCS_ERR)
282 dev->stats.rx_crc_errors++;
283
284 if (report & RX_FIFO_OVERRUN)
285 dev->stats.rx_fifo_errors++;
286
287 if (report & RX_ALIGNMENT_ERROR)
288 dev->stats.rx_frame_errors++;
289
290 dev->stats.rx_errors++;
291}
292
293static int nb8800_poll(struct napi_struct *napi, int budget)
294{
295 struct net_device *dev = napi->dev;
296 struct nb8800_priv *priv = netdev_priv(dev);
297 struct nb8800_rx_desc *rxd;
298 unsigned int last = priv->rx_eoc;
299 unsigned int next;
300 int work = 0;
301
302 nb8800_tx_done(dev);
303
304again:
Arnd Bergmann8bdb2902016-01-29 12:39:14 +0100305 do {
Måns Rullgård52dfc832015-11-19 13:02:59 +0000306 struct nb8800_rx_buf *rxb;
307 unsigned int len;
308
309 next = (last + 1) % RX_DESC_COUNT;
310
311 rxb = &priv->rx_bufs[next];
312 rxd = &priv->rx_descs[next];
313
314 if (!rxd->report)
315 break;
316
317 len = RX_BYTES_TRANSFERRED(rxd->report);
318
319 if (IS_RX_ERROR(rxd->report))
320 nb8800_rx_error(dev, rxd->report);
321 else
322 nb8800_receive(dev, next, len);
323
324 dev->stats.rx_packets++;
325 dev->stats.rx_bytes += len;
326
327 if (rxd->report & RX_MULTICAST_PKT)
328 dev->stats.multicast++;
329
330 rxd->report = 0;
331 last = next;
332 work++;
Arnd Bergmann8bdb2902016-01-29 12:39:14 +0100333 } while (work < budget);
Måns Rullgård52dfc832015-11-19 13:02:59 +0000334
335 if (work) {
336 priv->rx_descs[last].desc.config |= DESC_EOC;
337 wmb(); /* ensure new EOC is written before clearing old */
338 priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
339 priv->rx_eoc = last;
340 nb8800_start_rx(dev);
341 }
342
343 if (work < budget) {
344 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
345
346 /* If a packet arrived after we last checked but
347 * before writing RX_ITR, the interrupt will be
348 * delayed, so we retrieve it now.
349 */
350 if (priv->rx_descs[next].report)
351 goto again;
352
353 napi_complete_done(napi, work);
354 }
355
356 return work;
357}
358
359static void __nb8800_tx_dma_start(struct net_device *dev)
360{
361 struct nb8800_priv *priv = netdev_priv(dev);
362 struct nb8800_tx_buf *txb;
363 u32 txc_cr;
364
365 txb = &priv->tx_bufs[priv->tx_queue];
366 if (!txb->ready)
367 return;
368
369 txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
370 if (txc_cr & TCR_EN)
371 return;
372
373 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
374 wmb(); /* ensure desc addr is written before starting DMA */
375 nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
376
377 priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
378}
379
380static void nb8800_tx_dma_start(struct net_device *dev)
381{
382 struct nb8800_priv *priv = netdev_priv(dev);
383
384 spin_lock_irq(&priv->tx_lock);
385 __nb8800_tx_dma_start(dev);
386 spin_unlock_irq(&priv->tx_lock);
387}
388
389static void nb8800_tx_dma_start_irq(struct net_device *dev)
390{
391 struct nb8800_priv *priv = netdev_priv(dev);
392
393 spin_lock(&priv->tx_lock);
394 __nb8800_tx_dma_start(dev);
395 spin_unlock(&priv->tx_lock);
396}
397
398static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
399{
400 struct nb8800_priv *priv = netdev_priv(dev);
401 struct nb8800_tx_desc *txd;
402 struct nb8800_tx_buf *txb;
403 struct nb8800_dma_desc *desc;
404 dma_addr_t dma_addr;
405 unsigned int dma_len;
406 unsigned int align;
407 unsigned int next;
408
409 if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
410 netif_stop_queue(dev);
411 return NETDEV_TX_BUSY;
412 }
413
414 align = (8 - (uintptr_t)skb->data) & 7;
415
416 dma_len = skb->len - align;
417 dma_addr = dma_map_single(&dev->dev, skb->data + align,
418 dma_len, DMA_TO_DEVICE);
419
420 if (dma_mapping_error(&dev->dev, dma_addr)) {
421 netdev_err(dev, "tx dma mapping error\n");
422 kfree_skb(skb);
423 dev->stats.tx_dropped++;
424 return NETDEV_TX_OK;
425 }
426
427 if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
428 netif_stop_queue(dev);
429 skb->xmit_more = 0;
430 }
431
432 next = priv->tx_next;
433 txb = &priv->tx_bufs[next];
434 txd = &priv->tx_descs[next];
435 desc = &txd->desc[0];
436
437 next = (next + 1) % TX_DESC_COUNT;
438
439 if (align) {
440 memcpy(txd->buf, skb->data, align);
441
442 desc->s_addr =
443 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
444 desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
445 desc->config = DESC_BTS(2) | DESC_DS | align;
446
447 desc++;
448 }
449
450 desc->s_addr = dma_addr;
451 desc->n_addr = priv->tx_bufs[next].dma_desc;
452 desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
453
454 if (!skb->xmit_more)
455 desc->config |= DESC_EOC;
456
457 txb->skb = skb;
458 txb->dma_addr = dma_addr;
459 txb->dma_len = dma_len;
460
461 if (!priv->tx_chain) {
462 txb->chain_len = 1;
463 priv->tx_chain = txb;
464 } else {
465 priv->tx_chain->chain_len++;
466 }
467
468 netdev_sent_queue(dev, skb->len);
469
470 priv->tx_next = next;
471
472 if (!skb->xmit_more) {
473 smp_wmb();
474 priv->tx_chain->ready = true;
475 priv->tx_chain = NULL;
476 nb8800_tx_dma_start(dev);
477 }
478
479 return NETDEV_TX_OK;
480}
481
482static void nb8800_tx_error(struct net_device *dev, u32 report)
483{
484 if (report & TX_LATE_COLLISION)
485 dev->stats.collisions++;
486
487 if (report & TX_PACKET_DROPPED)
488 dev->stats.tx_dropped++;
489
490 if (report & TX_FIFO_UNDERRUN)
491 dev->stats.tx_fifo_errors++;
492
493 dev->stats.tx_errors++;
494}
495
496static void nb8800_tx_done(struct net_device *dev)
497{
498 struct nb8800_priv *priv = netdev_priv(dev);
499 unsigned int limit = priv->tx_next;
500 unsigned int done = priv->tx_done;
501 unsigned int packets = 0;
502 unsigned int len = 0;
503
504 while (done != limit) {
505 struct nb8800_tx_desc *txd = &priv->tx_descs[done];
506 struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
507 struct sk_buff *skb;
508
509 if (!txd->report)
510 break;
511
512 skb = txb->skb;
513 len += skb->len;
514
515 dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
516 DMA_TO_DEVICE);
517
518 if (IS_TX_ERROR(txd->report)) {
519 nb8800_tx_error(dev, txd->report);
520 kfree_skb(skb);
521 } else {
522 consume_skb(skb);
523 }
524
525 dev->stats.tx_packets++;
526 dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
527 dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
528
529 txb->skb = NULL;
530 txb->ready = false;
531 txd->report = 0;
532
533 done = (done + 1) % TX_DESC_COUNT;
534 packets++;
535 }
536
537 if (packets) {
538 smp_mb__before_atomic();
539 atomic_add(packets, &priv->tx_free);
540 netdev_completed_queue(dev, packets, len);
541 netif_wake_queue(dev);
542 priv->tx_done = done;
543 }
544}
545
546static irqreturn_t nb8800_irq(int irq, void *dev_id)
547{
548 struct net_device *dev = dev_id;
549 struct nb8800_priv *priv = netdev_priv(dev);
550 irqreturn_t ret = IRQ_NONE;
551 u32 val;
552
553 /* tx interrupt */
554 val = nb8800_readl(priv, NB8800_TXC_SR);
555 if (val) {
556 nb8800_writel(priv, NB8800_TXC_SR, val);
557
558 if (val & TSR_DI)
559 nb8800_tx_dma_start_irq(dev);
560
561 if (val & TSR_TI)
562 napi_schedule_irqoff(&priv->napi);
563
564 if (unlikely(val & TSR_DE))
565 netdev_err(dev, "TX DMA error\n");
566
567 /* should never happen with automatic status retrieval */
568 if (unlikely(val & TSR_TO))
569 netdev_err(dev, "TX Status FIFO overflow\n");
570
571 ret = IRQ_HANDLED;
572 }
573
574 /* rx interrupt */
575 val = nb8800_readl(priv, NB8800_RXC_SR);
576 if (val) {
577 nb8800_writel(priv, NB8800_RXC_SR, val);
578
579 if (likely(val & (RSR_RI | RSR_DI))) {
580 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
581 napi_schedule_irqoff(&priv->napi);
582 }
583
584 if (unlikely(val & RSR_DE))
585 netdev_err(dev, "RX DMA error\n");
586
587 /* should never happen with automatic status retrieval */
588 if (unlikely(val & RSR_RO))
589 netdev_err(dev, "RX Status FIFO overflow\n");
590
591 ret = IRQ_HANDLED;
592 }
593
594 return ret;
595}
596
597static void nb8800_mac_config(struct net_device *dev)
598{
599 struct nb8800_priv *priv = netdev_priv(dev);
600 bool gigabit = priv->speed == SPEED_1000;
601 u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
602 u32 mac_mode = 0;
603 u32 slot_time;
604 u32 phy_clk;
605 u32 ict;
606
607 if (!priv->duplex)
608 mac_mode |= HALF_DUPLEX;
609
610 if (gigabit) {
611 if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
612 mac_mode |= RGMII_MODE;
613
614 mac_mode |= GMAC_MODE;
615 phy_clk = 125000000;
616
617 /* Should be 512 but register is only 8 bits */
618 slot_time = 255;
619 } else {
620 phy_clk = 25000000;
621 slot_time = 128;
622 }
623
624 ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
625
626 nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
627 nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
628 nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
629}
630
631static void nb8800_pause_config(struct net_device *dev)
632{
633 struct nb8800_priv *priv = netdev_priv(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200634 struct phy_device *phydev = dev->phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +0000635 u32 rxcr;
636
637 if (priv->pause_aneg) {
638 if (!phydev || !phydev->link)
639 return;
640
641 priv->pause_rx = phydev->pause;
642 priv->pause_tx = phydev->pause ^ phydev->asym_pause;
643 }
644
645 nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
646
647 rxcr = nb8800_readl(priv, NB8800_RXC_CR);
648 if (!!(rxcr & RCR_FL) == priv->pause_tx)
649 return;
650
651 if (netif_running(dev)) {
652 napi_disable(&priv->napi);
653 netif_tx_lock_bh(dev);
654 nb8800_dma_stop(dev);
655 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
656 nb8800_start_rx(dev);
657 netif_tx_unlock_bh(dev);
658 napi_enable(&priv->napi);
659 } else {
660 nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
661 }
662}
663
664static void nb8800_link_reconfigure(struct net_device *dev)
665{
666 struct nb8800_priv *priv = netdev_priv(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200667 struct phy_device *phydev = dev->phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +0000668 int change = 0;
669
670 if (phydev->link) {
671 if (phydev->speed != priv->speed) {
672 priv->speed = phydev->speed;
673 change = 1;
674 }
675
676 if (phydev->duplex != priv->duplex) {
677 priv->duplex = phydev->duplex;
678 change = 1;
679 }
680
681 if (change)
682 nb8800_mac_config(dev);
683
684 nb8800_pause_config(dev);
685 }
686
687 if (phydev->link != priv->link) {
688 priv->link = phydev->link;
689 change = 1;
690 }
691
692 if (change)
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200693 phy_print_status(phydev);
Måns Rullgård52dfc832015-11-19 13:02:59 +0000694}
695
696static void nb8800_update_mac_addr(struct net_device *dev)
697{
698 struct nb8800_priv *priv = netdev_priv(dev);
699 int i;
700
701 for (i = 0; i < ETH_ALEN; i++)
702 nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
703
704 for (i = 0; i < ETH_ALEN; i++)
705 nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
706}
707
708static int nb8800_set_mac_address(struct net_device *dev, void *addr)
709{
710 struct sockaddr *sock = addr;
711
712 if (netif_running(dev))
713 return -EBUSY;
714
715 ether_addr_copy(dev->dev_addr, sock->sa_data);
716 nb8800_update_mac_addr(dev);
717
718 return 0;
719}
720
721static void nb8800_mc_init(struct net_device *dev, int val)
722{
723 struct nb8800_priv *priv = netdev_priv(dev);
724
725 nb8800_writeb(priv, NB8800_MC_INIT, val);
726 readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
727 1, 1000);
728}
729
730static void nb8800_set_rx_mode(struct net_device *dev)
731{
732 struct nb8800_priv *priv = netdev_priv(dev);
733 struct netdev_hw_addr *ha;
734 int i;
735
736 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
737 nb8800_mac_af(dev, false);
738 return;
739 }
740
741 nb8800_mac_af(dev, true);
742 nb8800_mc_init(dev, 0);
743
744 netdev_for_each_mc_addr(ha, dev) {
745 for (i = 0; i < ETH_ALEN; i++)
746 nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
747
748 nb8800_mc_init(dev, 0xff);
749 }
750}
751
752#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
753#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
754
755static void nb8800_dma_free(struct net_device *dev)
756{
757 struct nb8800_priv *priv = netdev_priv(dev);
758 unsigned int i;
759
760 if (priv->rx_bufs) {
761 for (i = 0; i < RX_DESC_COUNT; i++)
762 if (priv->rx_bufs[i].page)
763 put_page(priv->rx_bufs[i].page);
764
765 kfree(priv->rx_bufs);
766 priv->rx_bufs = NULL;
767 }
768
769 if (priv->tx_bufs) {
770 for (i = 0; i < TX_DESC_COUNT; i++)
771 kfree_skb(priv->tx_bufs[i].skb);
772
773 kfree(priv->tx_bufs);
774 priv->tx_bufs = NULL;
775 }
776
777 if (priv->rx_descs) {
778 dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
779 priv->rx_desc_dma);
780 priv->rx_descs = NULL;
781 }
782
783 if (priv->tx_descs) {
784 dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
785 priv->tx_desc_dma);
786 priv->tx_descs = NULL;
787 }
788}
789
790static void nb8800_dma_reset(struct net_device *dev)
791{
792 struct nb8800_priv *priv = netdev_priv(dev);
793 struct nb8800_rx_desc *rxd;
794 struct nb8800_tx_desc *txd;
795 unsigned int i;
796
797 for (i = 0; i < RX_DESC_COUNT; i++) {
798 dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
799
800 rxd = &priv->rx_descs[i];
801 rxd->desc.n_addr = rx_dma + sizeof(*rxd);
802 rxd->desc.r_addr =
803 rx_dma + offsetof(struct nb8800_rx_desc, report);
804 rxd->desc.config = priv->rx_dma_config;
805 rxd->report = 0;
806 }
807
808 rxd->desc.n_addr = priv->rx_desc_dma;
809 rxd->desc.config |= DESC_EOC;
810
811 priv->rx_eoc = RX_DESC_COUNT - 1;
812
813 for (i = 0; i < TX_DESC_COUNT; i++) {
814 struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
815 dma_addr_t r_dma = txb->dma_desc +
816 offsetof(struct nb8800_tx_desc, report);
817
818 txd = &priv->tx_descs[i];
819 txd->desc[0].r_addr = r_dma;
820 txd->desc[1].r_addr = r_dma;
821 txd->report = 0;
822 }
823
824 priv->tx_next = 0;
825 priv->tx_queue = 0;
826 priv->tx_done = 0;
827 atomic_set(&priv->tx_free, TX_DESC_COUNT);
828
829 nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
830
831 wmb(); /* ensure all setup is written before starting */
832}
833
834static int nb8800_dma_init(struct net_device *dev)
835{
836 struct nb8800_priv *priv = netdev_priv(dev);
837 unsigned int n_rx = RX_DESC_COUNT;
838 unsigned int n_tx = TX_DESC_COUNT;
839 unsigned int i;
840 int err;
841
842 priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
843 &priv->rx_desc_dma, GFP_KERNEL);
844 if (!priv->rx_descs)
845 goto err_out;
846
847 priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
848 if (!priv->rx_bufs)
849 goto err_out;
850
851 for (i = 0; i < n_rx; i++) {
852 err = nb8800_alloc_rx(dev, i, false);
853 if (err)
854 goto err_out;
855 }
856
857 priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
858 &priv->tx_desc_dma, GFP_KERNEL);
859 if (!priv->tx_descs)
860 goto err_out;
861
862 priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
863 if (!priv->tx_bufs)
864 goto err_out;
865
866 for (i = 0; i < n_tx; i++)
867 priv->tx_bufs[i].dma_desc =
868 priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
869
870 nb8800_dma_reset(dev);
871
872 return 0;
873
874err_out:
875 nb8800_dma_free(dev);
876
877 return -ENOMEM;
878}
879
880static int nb8800_dma_stop(struct net_device *dev)
881{
882 struct nb8800_priv *priv = netdev_priv(dev);
883 struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
884 struct nb8800_tx_desc *txd = &priv->tx_descs[0];
885 int retry = 5;
886 u32 txcr;
887 u32 rxcr;
888 int err;
889 unsigned int i;
890
891 /* wait for tx to finish */
892 err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
893 !(txcr & TCR_EN) &&
894 priv->tx_done == priv->tx_next,
895 1000, 1000000);
896 if (err)
897 return err;
898
899 /* The rx DMA only stops if it reaches the end of chain.
900 * To make this happen, we set the EOC flag on all rx
901 * descriptors, put the device in loopback mode, and send
902 * a few dummy frames. The interrupt handler will ignore
903 * these since NAPI is disabled and no real frames are in
904 * the tx queue.
905 */
906
907 for (i = 0; i < RX_DESC_COUNT; i++)
908 priv->rx_descs[i].desc.config |= DESC_EOC;
909
910 txd->desc[0].s_addr =
911 txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
912 txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
913 memset(txd->buf, 0, sizeof(txd->buf));
914
915 nb8800_mac_af(dev, false);
916 nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
917
918 do {
919 nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
920 wmb();
921 nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
922
923 err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
924 rxcr, !(rxcr & RCR_EN),
925 1000, 100000);
926 } while (err && --retry);
927
928 nb8800_mac_af(dev, true);
929 nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
930 nb8800_dma_reset(dev);
931
932 return retry ? 0 : -ETIMEDOUT;
933}
934
935static void nb8800_pause_adv(struct net_device *dev)
936{
937 struct nb8800_priv *priv = netdev_priv(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200938 struct phy_device *phydev = dev->phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +0000939 u32 adv = 0;
940
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200941 if (!phydev)
Måns Rullgård52dfc832015-11-19 13:02:59 +0000942 return;
943
944 if (priv->pause_rx)
945 adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
946 if (priv->pause_tx)
947 adv ^= ADVERTISED_Asym_Pause;
948
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200949 phydev->supported |= adv;
950 phydev->advertising |= adv;
Måns Rullgård52dfc832015-11-19 13:02:59 +0000951}
952
953static int nb8800_open(struct net_device *dev)
954{
955 struct nb8800_priv *priv = netdev_priv(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200956 struct phy_device *phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +0000957 int err;
958
959 /* clear any pending interrupts */
960 nb8800_writel(priv, NB8800_RXC_SR, 0xf);
961 nb8800_writel(priv, NB8800_TXC_SR, 0xf);
962
963 err = nb8800_dma_init(dev);
964 if (err)
965 return err;
966
967 err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
968 if (err)
969 goto err_free_dma;
970
971 nb8800_mac_rx(dev, true);
972 nb8800_mac_tx(dev, true);
973
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200974 phydev = of_phy_connect(dev, priv->phy_node,
975 nb8800_link_reconfigure, 0,
976 priv->phy_mode);
977 if (!phydev)
Måns Rullgård52dfc832015-11-19 13:02:59 +0000978 goto err_free_irq;
979
980 nb8800_pause_adv(dev);
981
982 netdev_reset_queue(dev);
983 napi_enable(&priv->napi);
984 netif_start_queue(dev);
985
986 nb8800_start_rx(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +0200987 phy_start(phydev);
Måns Rullgård52dfc832015-11-19 13:02:59 +0000988
989 return 0;
990
991err_free_irq:
992 free_irq(dev->irq, dev);
993err_free_dma:
994 nb8800_dma_free(dev);
995
996 return err;
997}
998
999static int nb8800_stop(struct net_device *dev)
1000{
1001 struct nb8800_priv *priv = netdev_priv(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001002 struct phy_device *phydev = dev->phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +00001003
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001004 phy_stop(phydev);
Måns Rullgård52dfc832015-11-19 13:02:59 +00001005
1006 netif_stop_queue(dev);
1007 napi_disable(&priv->napi);
1008
1009 nb8800_dma_stop(dev);
1010 nb8800_mac_rx(dev, false);
1011 nb8800_mac_tx(dev, false);
1012
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001013 phy_disconnect(phydev);
Måns Rullgård52dfc832015-11-19 13:02:59 +00001014
1015 free_irq(dev->irq, dev);
1016
1017 nb8800_dma_free(dev);
1018
1019 return 0;
1020}
1021
1022static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1023{
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001024 return phy_mii_ioctl(dev->phydev, rq, cmd);
Måns Rullgård52dfc832015-11-19 13:02:59 +00001025}
1026
1027static const struct net_device_ops nb8800_netdev_ops = {
1028 .ndo_open = nb8800_open,
1029 .ndo_stop = nb8800_stop,
1030 .ndo_start_xmit = nb8800_xmit,
1031 .ndo_set_mac_address = nb8800_set_mac_address,
1032 .ndo_set_rx_mode = nb8800_set_rx_mode,
1033 .ndo_do_ioctl = nb8800_ioctl,
1034 .ndo_change_mtu = eth_change_mtu,
1035 .ndo_validate_addr = eth_validate_addr,
1036};
1037
Måns Rullgård52dfc832015-11-19 13:02:59 +00001038static int nb8800_nway_reset(struct net_device *dev)
1039{
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001040 struct phy_device *phydev = dev->phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +00001041
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001042 if (!phydev)
Måns Rullgård52dfc832015-11-19 13:02:59 +00001043 return -ENODEV;
1044
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001045 return genphy_restart_aneg(phydev);
Måns Rullgård52dfc832015-11-19 13:02:59 +00001046}
1047
1048static void nb8800_get_pauseparam(struct net_device *dev,
1049 struct ethtool_pauseparam *pp)
1050{
1051 struct nb8800_priv *priv = netdev_priv(dev);
1052
1053 pp->autoneg = priv->pause_aneg;
1054 pp->rx_pause = priv->pause_rx;
1055 pp->tx_pause = priv->pause_tx;
1056}
1057
1058static int nb8800_set_pauseparam(struct net_device *dev,
1059 struct ethtool_pauseparam *pp)
1060{
1061 struct nb8800_priv *priv = netdev_priv(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001062 struct phy_device *phydev = dev->phydev;
Måns Rullgård52dfc832015-11-19 13:02:59 +00001063
1064 priv->pause_aneg = pp->autoneg;
1065 priv->pause_rx = pp->rx_pause;
1066 priv->pause_tx = pp->tx_pause;
1067
1068 nb8800_pause_adv(dev);
1069
1070 if (!priv->pause_aneg)
1071 nb8800_pause_config(dev);
Philippe Reynes46bb0bb2016-06-18 23:16:54 +02001072 else if (phydev)
1073 phy_start_aneg(phydev);
Måns Rullgård52dfc832015-11-19 13:02:59 +00001074
1075 return 0;
1076}
1077
1078static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
1079 "rx_bytes_ok",
1080 "rx_frames_ok",
1081 "rx_undersize_frames",
1082 "rx_fragment_frames",
1083 "rx_64_byte_frames",
1084 "rx_127_byte_frames",
1085 "rx_255_byte_frames",
1086 "rx_511_byte_frames",
1087 "rx_1023_byte_frames",
1088 "rx_max_size_frames",
1089 "rx_oversize_frames",
1090 "rx_bad_fcs_frames",
1091 "rx_broadcast_frames",
1092 "rx_multicast_frames",
1093 "rx_control_frames",
1094 "rx_pause_frames",
1095 "rx_unsup_control_frames",
1096 "rx_align_error_frames",
1097 "rx_overrun_frames",
1098 "rx_jabber_frames",
1099 "rx_bytes",
1100 "rx_frames",
1101
1102 "tx_bytes_ok",
1103 "tx_frames_ok",
1104 "tx_64_byte_frames",
1105 "tx_127_byte_frames",
1106 "tx_255_byte_frames",
1107 "tx_511_byte_frames",
1108 "tx_1023_byte_frames",
1109 "tx_max_size_frames",
1110 "tx_oversize_frames",
1111 "tx_broadcast_frames",
1112 "tx_multicast_frames",
1113 "tx_control_frames",
1114 "tx_pause_frames",
1115 "tx_underrun_frames",
1116 "tx_single_collision_frames",
1117 "tx_multi_collision_frames",
1118 "tx_deferred_collision_frames",
1119 "tx_late_collision_frames",
1120 "tx_excessive_collision_frames",
1121 "tx_bytes",
1122 "tx_frames",
1123 "tx_collisions",
1124};
1125
1126#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
1127
1128static int nb8800_get_sset_count(struct net_device *dev, int sset)
1129{
1130 if (sset == ETH_SS_STATS)
1131 return NB8800_NUM_STATS;
1132
1133 return -EOPNOTSUPP;
1134}
1135
1136static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
1137{
1138 if (sset == ETH_SS_STATS)
1139 memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
1140}
1141
1142static u32 nb8800_read_stat(struct net_device *dev, int index)
1143{
1144 struct nb8800_priv *priv = netdev_priv(dev);
1145
1146 nb8800_writeb(priv, NB8800_STAT_INDEX, index);
1147
1148 return nb8800_readl(priv, NB8800_STAT_DATA);
1149}
1150
1151static void nb8800_get_ethtool_stats(struct net_device *dev,
1152 struct ethtool_stats *estats, u64 *st)
1153{
1154 unsigned int i;
1155 u32 rx, tx;
1156
1157 for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
1158 rx = nb8800_read_stat(dev, i);
1159 tx = nb8800_read_stat(dev, i | 0x80);
1160 st[i] = rx;
1161 st[i + NB8800_NUM_STATS / 2] = tx;
1162 }
1163}
1164
1165static const struct ethtool_ops nb8800_ethtool_ops = {
Måns Rullgård52dfc832015-11-19 13:02:59 +00001166 .nway_reset = nb8800_nway_reset,
1167 .get_link = ethtool_op_get_link,
1168 .get_pauseparam = nb8800_get_pauseparam,
1169 .set_pauseparam = nb8800_set_pauseparam,
1170 .get_sset_count = nb8800_get_sset_count,
1171 .get_strings = nb8800_get_strings,
1172 .get_ethtool_stats = nb8800_get_ethtool_stats,
Philippe Reynes90789322016-06-18 23:16:55 +02001173 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1174 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Måns Rullgård52dfc832015-11-19 13:02:59 +00001175};
1176
1177static int nb8800_hw_init(struct net_device *dev)
1178{
1179 struct nb8800_priv *priv = netdev_priv(dev);
1180 u32 val;
1181
1182 val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
1183 nb8800_writeb(priv, NB8800_TX_CTL1, val);
1184
1185 /* Collision retry count */
1186 nb8800_writeb(priv, NB8800_TX_CTL2, 5);
1187
1188 val = RX_PAD_STRIP | RX_AF_EN;
1189 nb8800_writeb(priv, NB8800_RX_CTL, val);
1190
1191 /* Chosen by fair dice roll */
1192 nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
1193
1194 /* TX cycles per deferral period */
1195 nb8800_writeb(priv, NB8800_TX_SDP, 12);
1196
1197 /* The following three threshold values have been
1198 * experimentally determined for good results.
1199 */
1200
1201 /* RX/TX FIFO threshold for partial empty (64-bit entries) */
1202 nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
1203
1204 /* RX/TX FIFO threshold for partial full (64-bit entries) */
1205 nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
1206
1207 /* Buffer size for transmit (64-bit entries) */
1208 nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
1209
1210 /* Configure tx DMA */
1211
1212 val = nb8800_readl(priv, NB8800_TXC_CR);
1213 val &= TCR_LE; /* keep endian setting */
1214 val |= TCR_DM; /* DMA descriptor mode */
1215 val |= TCR_RS; /* automatically store tx status */
1216 val |= TCR_DIE; /* interrupt on DMA chain completion */
1217 val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
1218 val |= TCR_BTS(2); /* 32-byte bus transaction size */
1219 nb8800_writel(priv, NB8800_TXC_CR, val);
1220
1221 /* TX complete interrupt after 10 ms or 7 frames (see above) */
1222 val = clk_get_rate(priv->clk) / 100;
1223 nb8800_writel(priv, NB8800_TX_ITR, val);
1224
1225 /* Configure rx DMA */
1226
1227 val = nb8800_readl(priv, NB8800_RXC_CR);
1228 val &= RCR_LE; /* keep endian setting */
1229 val |= RCR_DM; /* DMA descriptor mode */
1230 val |= RCR_RS; /* automatically store rx status */
1231 val |= RCR_DIE; /* interrupt at end of DMA chain */
1232 val |= RCR_RFI(7); /* interrupt after 7 frames received */
1233 val |= RCR_BTS(2); /* 32-byte bus transaction size */
1234 nb8800_writel(priv, NB8800_RXC_CR, val);
1235
1236 /* The rx interrupt can fire before the DMA has completed
1237 * unless a small delay is added. 50 us is hopefully enough.
1238 */
1239 priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
1240
1241 /* In NAPI poll mode we want to disable interrupts, but the
1242 * hardware does not permit this. Delay 10 ms instead.
1243 */
1244 priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
1245
1246 nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
1247
1248 priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
1249
1250 /* Flow control settings */
1251
1252 /* Pause time of 0.1 ms */
1253 val = 100000 / 512;
1254 nb8800_writeb(priv, NB8800_PQ1, val >> 8);
1255 nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
1256
1257 /* Auto-negotiate by default */
1258 priv->pause_aneg = true;
1259 priv->pause_rx = true;
1260 priv->pause_tx = true;
1261
1262 nb8800_mc_init(dev, 0);
1263
1264 return 0;
1265}
1266
1267static int nb8800_tangox_init(struct net_device *dev)
1268{
1269 struct nb8800_priv *priv = netdev_priv(dev);
1270 u32 pad_mode = PAD_MODE_MII;
1271
1272 switch (priv->phy_mode) {
1273 case PHY_INTERFACE_MODE_MII:
1274 case PHY_INTERFACE_MODE_GMII:
1275 pad_mode = PAD_MODE_MII;
1276 break;
1277
1278 case PHY_INTERFACE_MODE_RGMII:
1279 pad_mode = PAD_MODE_RGMII;
1280 break;
1281
1282 case PHY_INTERFACE_MODE_RGMII_TXID:
1283 pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
1284 break;
1285
1286 default:
1287 dev_err(dev->dev.parent, "unsupported phy mode %s\n",
1288 phy_modes(priv->phy_mode));
1289 return -EINVAL;
1290 }
1291
1292 nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
1293
1294 return 0;
1295}
1296
1297static int nb8800_tangox_reset(struct net_device *dev)
1298{
1299 struct nb8800_priv *priv = netdev_priv(dev);
1300 int clk_div;
1301
1302 nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
1303 usleep_range(1000, 10000);
1304 nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
1305
1306 wmb(); /* ensure reset is cleared before proceeding */
1307
1308 clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
1309 nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
1310
1311 return 0;
1312}
1313
1314static const struct nb8800_ops nb8800_tangox_ops = {
1315 .init = nb8800_tangox_init,
1316 .reset = nb8800_tangox_reset,
1317};
1318
1319static int nb8800_tango4_init(struct net_device *dev)
1320{
1321 struct nb8800_priv *priv = netdev_priv(dev);
1322 int err;
1323
1324 err = nb8800_tangox_init(dev);
1325 if (err)
1326 return err;
1327
1328 /* On tango4 interrupt on DMA completion per frame works and gives
1329 * better performance despite generating more rx interrupts.
1330 */
1331
1332 /* Disable unnecessary interrupt on rx completion */
1333 nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
1334
1335 /* Request interrupt on descriptor DMA completion */
1336 priv->rx_dma_config |= DESC_ID;
1337
1338 return 0;
1339}
1340
1341static const struct nb8800_ops nb8800_tango4_ops = {
1342 .init = nb8800_tango4_init,
1343 .reset = nb8800_tangox_reset,
1344};
1345
1346static const struct of_device_id nb8800_dt_ids[] = {
1347 {
1348 .compatible = "aurora,nb8800",
1349 },
1350 {
1351 .compatible = "sigma,smp8642-ethernet",
1352 .data = &nb8800_tangox_ops,
1353 },
1354 {
1355 .compatible = "sigma,smp8734-ethernet",
1356 .data = &nb8800_tango4_ops,
1357 },
1358 { }
1359};
1360
1361static int nb8800_probe(struct platform_device *pdev)
1362{
1363 const struct of_device_id *match;
1364 const struct nb8800_ops *ops = NULL;
1365 struct nb8800_priv *priv;
1366 struct resource *res;
1367 struct net_device *dev;
1368 struct mii_bus *bus;
1369 const unsigned char *mac;
1370 void __iomem *base;
1371 int irq;
1372 int ret;
1373
1374 match = of_match_device(nb8800_dt_ids, &pdev->dev);
1375 if (match)
1376 ops = match->data;
1377
1378 irq = platform_get_irq(pdev, 0);
1379 if (irq <= 0) {
1380 dev_err(&pdev->dev, "No IRQ\n");
1381 return -EINVAL;
1382 }
1383
1384 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1385 base = devm_ioremap_resource(&pdev->dev, res);
1386 if (IS_ERR(base))
1387 return PTR_ERR(base);
1388
1389 dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
1390
1391 dev = alloc_etherdev(sizeof(*priv));
1392 if (!dev)
1393 return -ENOMEM;
1394
1395 platform_set_drvdata(pdev, dev);
1396 SET_NETDEV_DEV(dev, &pdev->dev);
1397
1398 priv = netdev_priv(dev);
1399 priv->base = base;
1400
1401 priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
1402 if (priv->phy_mode < 0)
1403 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
1404
1405 priv->clk = devm_clk_get(&pdev->dev, NULL);
1406 if (IS_ERR(priv->clk)) {
1407 dev_err(&pdev->dev, "failed to get clock\n");
1408 ret = PTR_ERR(priv->clk);
1409 goto err_free_dev;
1410 }
1411
1412 ret = clk_prepare_enable(priv->clk);
1413 if (ret)
1414 goto err_free_dev;
1415
1416 spin_lock_init(&priv->tx_lock);
1417
1418 if (ops && ops->reset) {
1419 ret = ops->reset(dev);
1420 if (ret)
1421 goto err_free_dev;
1422 }
1423
1424 bus = devm_mdiobus_alloc(&pdev->dev);
1425 if (!bus) {
1426 ret = -ENOMEM;
1427 goto err_disable_clk;
1428 }
1429
1430 bus->name = "nb8800-mii";
1431 bus->read = nb8800_mdio_read;
1432 bus->write = nb8800_mdio_write;
1433 bus->parent = &pdev->dev;
1434 snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
1435 (unsigned long)res->start);
1436 bus->priv = priv;
1437
1438 ret = of_mdiobus_register(bus, pdev->dev.of_node);
1439 if (ret) {
1440 dev_err(&pdev->dev, "failed to register MII bus\n");
1441 goto err_disable_clk;
1442 }
1443
Sebastian Friasc7dfe3a2016-02-22 13:33:14 +01001444 if (of_phy_is_fixed_link(pdev->dev.of_node)) {
1445 ret = of_phy_register_fixed_link(pdev->dev.of_node);
1446 if (ret < 0) {
1447 dev_err(&pdev->dev, "bad fixed-link spec\n");
1448 goto err_free_bus;
1449 }
1450 priv->phy_node = of_node_get(pdev->dev.of_node);
1451 }
1452
1453 if (!priv->phy_node)
1454 priv->phy_node = of_parse_phandle(pdev->dev.of_node,
1455 "phy-handle", 0);
1456
Måns Rullgård52dfc832015-11-19 13:02:59 +00001457 if (!priv->phy_node) {
1458 dev_err(&pdev->dev, "no PHY specified\n");
1459 ret = -ENODEV;
1460 goto err_free_bus;
1461 }
1462
1463 priv->mii_bus = bus;
1464
1465 ret = nb8800_hw_init(dev);
1466 if (ret)
1467 goto err_free_bus;
1468
1469 if (ops && ops->init) {
1470 ret = ops->init(dev);
1471 if (ret)
1472 goto err_free_bus;
1473 }
1474
1475 dev->netdev_ops = &nb8800_netdev_ops;
1476 dev->ethtool_ops = &nb8800_ethtool_ops;
1477 dev->flags |= IFF_MULTICAST;
1478 dev->irq = irq;
1479
1480 mac = of_get_mac_address(pdev->dev.of_node);
1481 if (mac)
1482 ether_addr_copy(dev->dev_addr, mac);
1483
1484 if (!is_valid_ether_addr(dev->dev_addr))
1485 eth_hw_addr_random(dev);
1486
1487 nb8800_update_mac_addr(dev);
1488
1489 netif_carrier_off(dev);
1490
1491 ret = register_netdev(dev);
1492 if (ret) {
1493 netdev_err(dev, "failed to register netdev\n");
1494 goto err_free_dma;
1495 }
1496
1497 netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
1498
1499 netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
1500
1501 return 0;
1502
1503err_free_dma:
1504 nb8800_dma_free(dev);
1505err_free_bus:
1506 mdiobus_unregister(bus);
1507err_disable_clk:
1508 clk_disable_unprepare(priv->clk);
1509err_free_dev:
1510 free_netdev(dev);
1511
1512 return ret;
1513}
1514
1515static int nb8800_remove(struct platform_device *pdev)
1516{
1517 struct net_device *ndev = platform_get_drvdata(pdev);
1518 struct nb8800_priv *priv = netdev_priv(ndev);
1519
1520 unregister_netdev(ndev);
1521
1522 mdiobus_unregister(priv->mii_bus);
1523
1524 clk_disable_unprepare(priv->clk);
1525
1526 nb8800_dma_free(ndev);
1527 free_netdev(ndev);
1528
1529 return 0;
1530}
1531
1532static struct platform_driver nb8800_driver = {
1533 .driver = {
1534 .name = "nb8800",
1535 .of_match_table = nb8800_dt_ids,
1536 },
1537 .probe = nb8800_probe,
1538 .remove = nb8800_remove,
1539};
1540
1541module_platform_driver(nb8800_driver);
1542
1543MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
1544MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
1545MODULE_LICENSE("GPL");