blob: 2d5d0d1101512d4d8537cb8df929f226922abaf6 [file] [log] [blame]
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001/*
Jamie Ilesf75ba502011-11-08 10:12:32 +00002 * Cadence MACB/GEM Ethernet Controller driver
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003 *
4 * Copyright (C) 2004-2006 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
Jamie Ilesc220f8c2011-03-08 20:27:08 +000011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010012#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
Nicolas Ferre909a8582012-11-19 06:00:21 +000017#include <linux/circ_buf.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010018#include <linux/slab.h>
19#include <linux/init.h>
Soren Brinkmann60fe7162013-12-10 16:07:21 -080020#include <linux/io.h>
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +000021#include <linux/gpio.h>
Gregory CLEMENT270c4992015-12-17 10:51:04 +010022#include <linux/gpio/consumer.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000023#include <linux/interrupt.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010024#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010026#include <linux/dma-mapping.h>
Jamie Iles84e0cdb2011-03-08 20:17:06 +000027#include <linux/platform_data/macb.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010028#include <linux/platform_device.h>
frederic RODO6c36a702007-07-12 19:07:24 +020029#include <linux/phy.h>
Olof Johanssonb17471f2011-12-20 13:13:07 -080030#include <linux/of.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010031#include <linux/of_device.h>
Gregory CLEMENT270c4992015-12-17 10:51:04 +010032#include <linux/of_gpio.h>
Boris BREZILLON148cbb52013-08-22 17:57:28 +020033#include <linux/of_mdio.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010034#include <linux/of_net.h>
Rafal Ozieblo1629dd42016-11-16 10:02:34 +000035#include <linux/ip.h>
36#include <linux/udp.h>
37#include <linux/tcp.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010038#include "macb.h"
39
Nicolas Ferre1b447912013-06-04 21:57:11 +000040#define MACB_RX_BUFFER_SIZE 128
Nicolas Ferre1b447912013-06-04 21:57:11 +000041#define RX_BUFFER_MULTIPLE 64 /* bytes */
Zach Brown8441bb32016-10-19 09:56:58 -050042
Zach Brownb410d132016-10-19 09:56:57 -050043#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
Zach Brown8441bb32016-10-19 09:56:58 -050044#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192
Rafal Ozieblodc97a892017-01-27 15:08:20 +000046#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
Zach Brownb410d132016-10-19 09:56:57 -050047 * (bp)->rx_ring_size)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010048
Zach Brownb410d132016-10-19 09:56:57 -050049#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
Zach Brown8441bb32016-10-19 09:56:58 -050050#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096
Rafal Ozieblodc97a892017-01-27 15:08:20 +000052#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
Zach Brownb410d132016-10-19 09:56:57 -050053 * (bp)->tx_ring_size)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010054
Nicolas Ferre909a8582012-11-19 06:00:21 +000055/* level of occupied TX descriptors under which we wake up TX process */
Zach Brownb410d132016-10-19 09:56:57 -050056#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010057
58#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
59 | MACB_BIT(ISR_ROVR))
Nicolas Ferree86cd532012-10-31 06:04:57 +000060#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
61 | MACB_BIT(ISR_RLE) \
62 | MACB_BIT(TXERR))
63#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
64
Rafal Ozieblo1629dd42016-11-16 10:02:34 +000065/* Max length of transmit frame must be a multiple of 8 bytes */
66#define MACB_TX_LEN_ALIGN 8
67#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +020069
Jarod Wilson44770e12016-10-17 15:54:17 -040070#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
David S. Millerf9c45ae2017-07-03 06:31:05 -070071#define MACB_NETIF_LSO NETIF_F_TSO
Harini Katakama5898ea2015-05-06 22:27:18 +053072
Sergio Prado3e2a5e12016-02-09 12:07:16 -020073#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
74#define MACB_WOL_ENABLED (0x1 << 1)
75
Moritz Fischer64ec42f2016-03-29 19:11:12 -070076/* Graceful stop timeouts in us. We should allow up to
Nicolas Ferree86cd532012-10-31 06:04:57 +000077 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
78 */
79#define MACB_HALT_TIMEOUT 1230
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010080
Rafal Ozieblodc97a892017-01-27 15:08:20 +000081/* DMA buffer descriptor might be different size
Rafal Ozieblo7b429612017-06-29 07:12:51 +010082 * depends on hardware configuration:
83 *
84 * 1. dma address width 32 bits:
85 * word 1: 32 bit address of Data Buffer
86 * word 2: control
87 *
88 * 2. dma address width 64 bits:
89 * word 1: 32 bit address of Data Buffer
90 * word 2: control
91 * word 3: upper 32 bit address of Data Buffer
92 * word 4: unused
93 *
94 * 3. dma address width 32 bits with hardware timestamping:
95 * word 1: 32 bit address of Data Buffer
96 * word 2: control
97 * word 3: timestamp word 1
98 * word 4: timestamp word 2
99 *
100 * 4. dma address width 64 bits with hardware timestamping:
101 * word 1: 32 bit address of Data Buffer
102 * word 2: control
103 * word 3: upper 32 bit address of Data Buffer
104 * word 4: unused
105 * word 5: timestamp word 1
106 * word 6: timestamp word 2
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000107 */
108static unsigned int macb_dma_desc_get_size(struct macb *bp)
109{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100110#ifdef MACB_EXT_DESC
111 unsigned int desc_size;
112
113 switch (bp->hw_dma_cap) {
114 case HW_DMA_CAP_64B:
115 desc_size = sizeof(struct macb_dma_desc)
116 + sizeof(struct macb_dma_desc_64);
117 break;
118 case HW_DMA_CAP_PTP:
119 desc_size = sizeof(struct macb_dma_desc)
120 + sizeof(struct macb_dma_desc_ptp);
121 break;
122 case HW_DMA_CAP_64B_PTP:
123 desc_size = sizeof(struct macb_dma_desc)
124 + sizeof(struct macb_dma_desc_64)
125 + sizeof(struct macb_dma_desc_ptp);
126 break;
127 default:
128 desc_size = sizeof(struct macb_dma_desc);
129 }
130 return desc_size;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000131#endif
132 return sizeof(struct macb_dma_desc);
133}
134
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100135static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000136{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100137#ifdef MACB_EXT_DESC
138 switch (bp->hw_dma_cap) {
139 case HW_DMA_CAP_64B:
140 case HW_DMA_CAP_PTP:
141 desc_idx <<= 1;
142 break;
143 case HW_DMA_CAP_64B_PTP:
144 desc_idx *= 3;
145 break;
146 default:
147 break;
148 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000149#endif
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100150 return desc_idx;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000151}
152
153#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
154static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
155{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100156 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
157 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
158 return NULL;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000159}
160#endif
161
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000162/* Ring buffer accessors */
Zach Brownb410d132016-10-19 09:56:57 -0500163static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000164{
Zach Brownb410d132016-10-19 09:56:57 -0500165 return index & (bp->tx_ring_size - 1);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000166}
167
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100168static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
169 unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000170{
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000171 index = macb_tx_ring_wrap(queue->bp, index);
172 index = macb_adj_dma_desc_idx(queue->bp, index);
173 return &queue->tx_ring[index];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000174}
175
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100176static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
177 unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000178{
Zach Brownb410d132016-10-19 09:56:57 -0500179 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000180}
181
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100182static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000183{
184 dma_addr_t offset;
185
Zach Brownb410d132016-10-19 09:56:57 -0500186 offset = macb_tx_ring_wrap(queue->bp, index) *
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000187 macb_dma_desc_get_size(queue->bp);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000188
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100189 return queue->tx_ring_dma + offset;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000190}
191
Zach Brownb410d132016-10-19 09:56:57 -0500192static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000193{
Zach Brownb410d132016-10-19 09:56:57 -0500194 return index & (bp->rx_ring_size - 1);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000195}
196
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000197static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000198{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000199 index = macb_rx_ring_wrap(queue->bp, index);
200 index = macb_adj_dma_desc_idx(queue->bp, index);
201 return &queue->rx_ring[index];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000202}
203
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000204static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000205{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000206 return queue->rx_buffers + queue->bp->rx_buffer_size *
207 macb_rx_ring_wrap(queue->bp, index);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000208}
209
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +0300210/* I/O accessors */
211static u32 hw_readl_native(struct macb *bp, int offset)
212{
213 return __raw_readl(bp->regs + offset);
214}
215
216static void hw_writel_native(struct macb *bp, int offset, u32 value)
217{
218 __raw_writel(value, bp->regs + offset);
219}
220
221static u32 hw_readl(struct macb *bp, int offset)
222{
223 return readl_relaxed(bp->regs + offset);
224}
225
226static void hw_writel(struct macb *bp, int offset, u32 value)
227{
228 writel_relaxed(value, bp->regs + offset);
229}
230
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700231/* Find the CPU endianness by using the loopback bit of NCR register. When the
Moritz Fischer88023be2016-03-29 19:11:15 -0700232 * CPU is in big endian we need to program swapped mode for management
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +0300233 * descriptor access.
234 */
235static bool hw_is_native_io(void __iomem *addr)
236{
237 u32 value = MACB_BIT(LLB);
238
239 __raw_writel(value, addr + MACB_NCR);
240 value = __raw_readl(addr + MACB_NCR);
241
242 /* Write 0 back to disable everything */
243 __raw_writel(0, addr + MACB_NCR);
244
245 return value == MACB_BIT(LLB);
246}
247
248static bool hw_is_gem(void __iomem *addr, bool native_io)
249{
250 u32 id;
251
252 if (native_io)
253 id = __raw_readl(addr + MACB_MID);
254 else
255 id = readl_relaxed(addr + MACB_MID);
256
257 return MACB_BFEXT(IDNUM, id) >= 0x2;
258}
259
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100260static void macb_set_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100261{
262 u32 bottom;
263 u16 top;
264
265 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000266 macb_or_gem_writel(bp, SA1B, bottom);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100267 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000268 macb_or_gem_writel(bp, SA1T, top);
Joachim Eastwood3629a6c2012-11-11 13:56:28 +0000269
270 /* Clear unused address register sets */
271 macb_or_gem_writel(bp, SA2B, 0);
272 macb_or_gem_writel(bp, SA2T, 0);
273 macb_or_gem_writel(bp, SA3B, 0);
274 macb_or_gem_writel(bp, SA3T, 0);
275 macb_or_gem_writel(bp, SA4B, 0);
276 macb_or_gem_writel(bp, SA4T, 0);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100277}
278
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100279static void macb_get_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100280{
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000281 struct macb_platform_data *pdata;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100282 u32 bottom;
283 u16 top;
284 u8 addr[6];
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000285 int i;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100286
Jingoo Hanc607a0d2013-08-30 14:12:21 +0900287 pdata = dev_get_platdata(&bp->pdev->dev);
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000288
Moritz Fischeraa50b552016-03-29 19:11:13 -0700289 /* Check all 4 address register for valid address */
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000290 for (i = 0; i < 4; i++) {
291 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
292 top = macb_or_gem_readl(bp, SA1T + i * 8);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100293
Joachim Eastwoodd25e78a2012-11-07 08:14:51 +0000294 if (pdata && pdata->rev_eth_addr) {
295 addr[5] = bottom & 0xff;
296 addr[4] = (bottom >> 8) & 0xff;
297 addr[3] = (bottom >> 16) & 0xff;
298 addr[2] = (bottom >> 24) & 0xff;
299 addr[1] = top & 0xff;
300 addr[0] = (top & 0xff00) >> 8;
301 } else {
302 addr[0] = bottom & 0xff;
303 addr[1] = (bottom >> 8) & 0xff;
304 addr[2] = (bottom >> 16) & 0xff;
305 addr[3] = (bottom >> 24) & 0xff;
306 addr[4] = top & 0xff;
307 addr[5] = (top >> 8) & 0xff;
308 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100309
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000310 if (is_valid_ether_addr(addr)) {
311 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
312 return;
313 }
Sven Schnelled1d57412008-06-09 16:33:57 -0700314 }
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000315
Andy Shevchenkoa35919e2015-07-24 21:24:01 +0300316 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000317 eth_hw_addr_random(bp->dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100318}
319
frederic RODO6c36a702007-07-12 19:07:24 +0200320static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100321{
frederic RODO6c36a702007-07-12 19:07:24 +0200322 struct macb *bp = bus->priv;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100323 int value;
324
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100325 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
326 | MACB_BF(RW, MACB_MAN_READ)
frederic RODO6c36a702007-07-12 19:07:24 +0200327 | MACB_BF(PHYA, mii_id)
328 | MACB_BF(REGA, regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100329 | MACB_BF(CODE, MACB_MAN_CODE)));
330
frederic RODO6c36a702007-07-12 19:07:24 +0200331 /* wait for end of transfer */
332 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
333 cpu_relax();
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100334
335 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100336
337 return value;
338}
339
frederic RODO6c36a702007-07-12 19:07:24 +0200340static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
341 u16 value)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100342{
frederic RODO6c36a702007-07-12 19:07:24 +0200343 struct macb *bp = bus->priv;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100344
345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
346 | MACB_BF(RW, MACB_MAN_WRITE)
frederic RODO6c36a702007-07-12 19:07:24 +0200347 | MACB_BF(PHYA, mii_id)
348 | MACB_BF(REGA, regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100349 | MACB_BF(CODE, MACB_MAN_CODE)
frederic RODO6c36a702007-07-12 19:07:24 +0200350 | MACB_BF(DATA, value)));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100351
frederic RODO6c36a702007-07-12 19:07:24 +0200352 /* wait for end of transfer */
353 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
354 cpu_relax();
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100355
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100356 return 0;
357}
358
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800359/**
360 * macb_set_tx_clk() - Set a clock to a new frequency
361 * @clk Pointer to the clock to change
362 * @rate New frequency in Hz
363 * @dev Pointer to the struct net_device
364 */
365static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
366{
367 long ferr, rate, rate_rounded;
368
Cyrille Pitchen93b31f42015-03-07 07:23:31 +0100369 if (!clk)
370 return;
371
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800372 switch (speed) {
373 case SPEED_10:
374 rate = 2500000;
375 break;
376 case SPEED_100:
377 rate = 25000000;
378 break;
379 case SPEED_1000:
380 rate = 125000000;
381 break;
382 default:
Soren Brinkmann9319e472013-12-10 20:57:57 -0800383 return;
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800384 }
385
386 rate_rounded = clk_round_rate(clk, rate);
387 if (rate_rounded < 0)
388 return;
389
390 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
391 * is not satisfied.
392 */
393 ferr = abs(rate_rounded - rate);
394 ferr = DIV_ROUND_UP(ferr, rate / 100000);
395 if (ferr > 5)
396 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700397 rate);
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800398
399 if (clk_set_rate(clk, rate_rounded))
400 netdev_err(dev, "adjusting tx_clk failed.\n");
401}
402
frederic RODO6c36a702007-07-12 19:07:24 +0200403static void macb_handle_link_change(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100404{
frederic RODO6c36a702007-07-12 19:07:24 +0200405 struct macb *bp = netdev_priv(dev);
Philippe Reynes0a912812016-06-22 00:32:35 +0200406 struct phy_device *phydev = dev->phydev;
frederic RODO6c36a702007-07-12 19:07:24 +0200407 unsigned long flags;
frederic RODO6c36a702007-07-12 19:07:24 +0200408 int status_change = 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100409
frederic RODO6c36a702007-07-12 19:07:24 +0200410 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100411
frederic RODO6c36a702007-07-12 19:07:24 +0200412 if (phydev->link) {
413 if ((bp->speed != phydev->speed) ||
414 (bp->duplex != phydev->duplex)) {
415 u32 reg;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100416
frederic RODO6c36a702007-07-12 19:07:24 +0200417 reg = macb_readl(bp, NCFGR);
418 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
Patrice Vilchez140b7552012-10-31 06:04:50 +0000419 if (macb_is_gem(bp))
420 reg &= ~GEM_BIT(GBE);
frederic RODO6c36a702007-07-12 19:07:24 +0200421
422 if (phydev->duplex)
423 reg |= MACB_BIT(FD);
Atsushi Nemoto179956f2008-02-21 22:50:54 +0900424 if (phydev->speed == SPEED_100)
frederic RODO6c36a702007-07-12 19:07:24 +0200425 reg |= MACB_BIT(SPD);
Nicolas Ferree1755872014-07-24 13:50:58 +0200426 if (phydev->speed == SPEED_1000 &&
427 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
Patrice Vilchez140b7552012-10-31 06:04:50 +0000428 reg |= GEM_BIT(GBE);
frederic RODO6c36a702007-07-12 19:07:24 +0200429
Patrice Vilchez140b7552012-10-31 06:04:50 +0000430 macb_or_gem_writel(bp, NCFGR, reg);
frederic RODO6c36a702007-07-12 19:07:24 +0200431
432 bp->speed = phydev->speed;
433 bp->duplex = phydev->duplex;
434 status_change = 1;
435 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100436 }
437
frederic RODO6c36a702007-07-12 19:07:24 +0200438 if (phydev->link != bp->link) {
Anton Vorontsovc8f15682008-07-22 15:41:24 -0700439 if (!phydev->link) {
frederic RODO6c36a702007-07-12 19:07:24 +0200440 bp->speed = 0;
441 bp->duplex = -1;
442 }
443 bp->link = phydev->link;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100444
frederic RODO6c36a702007-07-12 19:07:24 +0200445 status_change = 1;
446 }
447
448 spin_unlock_irqrestore(&bp->lock, flags);
449
450 if (status_change) {
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000451 if (phydev->link) {
Jaeden Amero2c29b232015-03-12 18:07:54 -0500452 /* Update the TX clock rate if and only if the link is
453 * up and there has been a link change.
454 */
455 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
456
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000457 netif_carrier_on(dev);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000458 netdev_info(dev, "link up (%d/%s)\n",
459 phydev->speed,
460 phydev->duplex == DUPLEX_FULL ?
461 "Full" : "Half");
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000462 } else {
463 netif_carrier_off(dev);
Jamie Ilesc220f8c2011-03-08 20:27:08 +0000464 netdev_info(dev, "link down\n");
Nicolas Ferre03fc4722012-07-03 23:14:13 +0000465 }
frederic RODO6c36a702007-07-12 19:07:24 +0200466 }
467}
468
469/* based on au1000_eth. c*/
470static int macb_mii_probe(struct net_device *dev)
471{
472 struct macb *bp = netdev_priv(dev);
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +0000473 struct macb_platform_data *pdata;
Jiri Pirko7455a762010-02-08 05:12:08 +0000474 struct phy_device *phydev;
Brad Mouring739de9a2018-03-13 16:32:13 -0500475 struct device_node *np;
476 int phy_irq, ret, i;
477
478 pdata = dev_get_platdata(&bp->pdev->dev);
479 np = bp->pdev->dev.of_node;
480 ret = 0;
481
482 if (np) {
483 if (of_phy_is_fixed_link(np)) {
484 if (of_phy_register_fixed_link(np) < 0) {
485 dev_err(&bp->pdev->dev,
486 "broken fixed-link specification\n");
487 return -ENODEV;
488 }
489 bp->phy_node = of_node_get(np);
490 } else {
Brad Mouring2105a5d2018-03-13 16:32:15 -0500491 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
492 /* fallback to standard phy registration if no
493 * phy-handle was found nor any phy found during
494 * dt phy registration
Brad Mouring739de9a2018-03-13 16:32:13 -0500495 */
Brad Mouring2105a5d2018-03-13 16:32:15 -0500496 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
Brad Mouring739de9a2018-03-13 16:32:13 -0500497 for (i = 0; i < PHY_MAX_ADDR; i++) {
498 struct phy_device *phydev;
499
500 phydev = mdiobus_scan(bp->mii_bus, i);
501 if (IS_ERR(phydev) &&
502 PTR_ERR(phydev) != -ENODEV) {
503 ret = PTR_ERR(phydev);
504 break;
505 }
506 }
507
508 if (ret)
509 return -ENODEV;
510 }
511 }
512 }
frederic RODO6c36a702007-07-12 19:07:24 +0200513
Michael Grzeschikdacdbb42017-06-23 16:54:10 +0200514 if (bp->phy_node) {
515 phydev = of_phy_connect(dev, bp->phy_node,
516 &macb_handle_link_change, 0,
517 bp->phy_interface);
518 if (!phydev)
519 return -ENODEV;
520 } else {
521 phydev = phy_find_first(bp->mii_bus);
522 if (!phydev) {
523 netdev_err(dev, "no PHY found\n");
524 return -ENXIO;
Joachim Eastwood2dbfdbb2012-11-11 13:56:27 +0000525 }
frederic RODO6c36a702007-07-12 19:07:24 +0200526
Michael Grzeschikdacdbb42017-06-23 16:54:10 +0200527 if (pdata) {
528 if (gpio_is_valid(pdata->phy_irq_pin)) {
529 ret = devm_gpio_request(&bp->pdev->dev,
530 pdata->phy_irq_pin, "phy int");
531 if (!ret) {
532 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
533 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
534 }
535 } else {
536 phydev->irq = PHY_POLL;
537 }
538 }
539
540 /* attach the mac to the phy */
541 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
542 bp->phy_interface);
543 if (ret) {
544 netdev_err(dev, "Could not attach to PHY\n");
545 return ret;
546 }
frederic RODO6c36a702007-07-12 19:07:24 +0200547 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100548
frederic RODO6c36a702007-07-12 19:07:24 +0200549 /* mask with MAC supported features */
Nicolas Ferree1755872014-07-24 13:50:58 +0200550 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
Patrice Vilchez140b7552012-10-31 06:04:50 +0000551 phydev->supported &= PHY_GBIT_FEATURES;
552 else
553 phydev->supported &= PHY_BASIC_FEATURES;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100554
Nathan Sullivan222ca8e2015-05-22 09:22:10 -0500555 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
556 phydev->supported &= ~SUPPORTED_1000baseT_Half;
557
frederic RODO6c36a702007-07-12 19:07:24 +0200558 phydev->advertising = phydev->supported;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100559
frederic RODO6c36a702007-07-12 19:07:24 +0200560 bp->link = 0;
561 bp->speed = 0;
562 bp->duplex = -1;
frederic RODO6c36a702007-07-12 19:07:24 +0200563
564 return 0;
565}
566
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100567static int macb_mii_init(struct macb *bp)
frederic RODO6c36a702007-07-12 19:07:24 +0200568{
Jamie Iles84e0cdb2011-03-08 20:17:06 +0000569 struct macb_platform_data *pdata;
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200570 struct device_node *np;
Brad Mouringcb732e92018-03-13 16:32:14 -0500571 int err;
frederic RODO6c36a702007-07-12 19:07:24 +0200572
Uwe Kleine-Koenig3dbda772009-07-23 08:31:31 +0200573 /* Enable management port */
frederic RODO6c36a702007-07-12 19:07:24 +0200574 macb_writel(bp, NCR, MACB_BIT(MPE));
575
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700576 bp->mii_bus = mdiobus_alloc();
Moritz Fischeraa50b552016-03-29 19:11:13 -0700577 if (!bp->mii_bus) {
frederic RODO6c36a702007-07-12 19:07:24 +0200578 err = -ENOMEM;
579 goto err_out;
580 }
581
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700582 bp->mii_bus->name = "MACB_mii_bus";
583 bp->mii_bus->read = &macb_mdio_read;
584 bp->mii_bus->write = &macb_mdio_write;
Florian Fainelli98d5e572012-01-09 23:59:11 +0000585 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700586 bp->pdev->name, bp->pdev->id);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700587 bp->mii_bus->priv = bp;
Florian Fainellicf669662016-05-02 18:38:45 -0700588 bp->mii_bus->parent = &bp->pdev->dev;
Jingoo Hanc607a0d2013-08-30 14:12:21 +0900589 pdata = dev_get_platdata(&bp->pdev->dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700590
Jamie Iles91523942011-02-28 04:05:25 +0000591 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200592
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200593 np = bp->pdev->dev.of_node;
Florian Fainelli00e798c2018-05-15 16:56:19 -0700594 if (pdata)
595 bp->mii_bus->phy_mask = pdata->phy_mask;
Brad Mouring739de9a2018-03-13 16:32:13 -0500596
Florian Fainelli00e798c2018-05-15 16:56:19 -0700597 err = of_mdiobus_register(bp->mii_bus, np);
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200598 if (err)
Andrew Lunne7f4dc32016-01-06 20:11:15 +0100599 goto err_out_free_mdiobus;
frederic RODO6c36a702007-07-12 19:07:24 +0200600
Boris BREZILLON7daa78e2013-08-27 14:36:14 +0200601 err = macb_mii_probe(bp->dev);
602 if (err)
frederic RODO6c36a702007-07-12 19:07:24 +0200603 goto err_out_unregister_bus;
frederic RODO6c36a702007-07-12 19:07:24 +0200604
605 return 0;
606
607err_out_unregister_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700608 mdiobus_unregister(bp->mii_bus);
Michael Grzeschik9ce98142017-11-08 09:56:34 +0100609 if (np && of_phy_is_fixed_link(np))
610 of_phy_deregister_fixed_link(np);
Brad Mouring739de9a2018-03-13 16:32:13 -0500611err_out_free_mdiobus:
612 of_node_put(bp->phy_node);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700613 mdiobus_free(bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200614err_out:
615 return err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100616}
617
618static void macb_update_stats(struct macb *bp)
619{
Jamie Ilesa494ed82011-03-09 16:26:35 +0000620 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
621 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +0300622 int offset = MACB_PFR;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100623
624 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
625
Moritz Fischer96ec6312016-03-29 19:11:11 -0700626 for (; p < end; p++, offset += 4)
David S. Miller7a6e0702015-07-27 14:24:48 -0700627 *p += bp->macb_reg_readl(bp, offset);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100628}
629
Nicolas Ferree86cd532012-10-31 06:04:57 +0000630static int macb_halt_tx(struct macb *bp)
631{
632 unsigned long halt_time, timeout;
633 u32 status;
634
635 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
636
637 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
638 do {
639 halt_time = jiffies;
640 status = macb_readl(bp, TSR);
641 if (!(status & MACB_BIT(TGO)))
642 return 0;
643
644 usleep_range(10, 250);
645 } while (time_before(halt_time, timeout));
646
647 return -ETIMEDOUT;
648}
649
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200650static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
651{
652 if (tx_skb->mapping) {
653 if (tx_skb->mapped_as_page)
654 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
655 tx_skb->size, DMA_TO_DEVICE);
656 else
657 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
658 tx_skb->size, DMA_TO_DEVICE);
659 tx_skb->mapping = 0;
660 }
661
662 if (tx_skb->skb) {
663 dev_kfree_skb_any(tx_skb->skb);
664 tx_skb->skb = NULL;
665 }
666}
667
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000668static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
Harini Katakamfff80192016-08-09 13:15:53 +0530669{
Harini Katakamfff80192016-08-09 13:15:53 +0530670#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000671 struct macb_dma_desc_64 *desc_64;
672
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100673 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000674 desc_64 = macb_64b_desc(bp, desc);
675 desc_64->addrh = upper_32_bits(addr);
676 }
Harini Katakamfff80192016-08-09 13:15:53 +0530677#endif
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000678 desc->addr = lower_32_bits(addr);
679}
680
681static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
682{
683 dma_addr_t addr = 0;
684#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
685 struct macb_dma_desc_64 *desc_64;
686
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100687 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000688 desc_64 = macb_64b_desc(bp, desc);
689 addr = ((u64)(desc_64->addrh) << 32);
690 }
691#endif
692 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
693 return addr;
Harini Katakamfff80192016-08-09 13:15:53 +0530694}
695
Nicolas Ferree86cd532012-10-31 06:04:57 +0000696static void macb_tx_error_task(struct work_struct *work)
697{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100698 struct macb_queue *queue = container_of(work, struct macb_queue,
699 tx_error_task);
700 struct macb *bp = queue->bp;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000701 struct macb_tx_skb *tx_skb;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100702 struct macb_dma_desc *desc;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000703 struct sk_buff *skb;
704 unsigned int tail;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100705 unsigned long flags;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000706
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100707 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
708 (unsigned int)(queue - bp->queues),
709 queue->tx_tail, queue->tx_head);
710
711 /* Prevent the queue IRQ handlers from running: each of them may call
712 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
713 * As explained below, we have to halt the transmission before updating
714 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
715 * network engine about the macb/gem being halted.
716 */
717 spin_lock_irqsave(&bp->lock, flags);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000718
719 /* Make sure nobody is trying to queue up new packets */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100720 netif_tx_stop_all_queues(bp->dev);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000721
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700722 /* Stop transmission now
Nicolas Ferree86cd532012-10-31 06:04:57 +0000723 * (in case we have just queued new packets)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100724 * macb/gem must be halted to write TBQP register
Nicolas Ferree86cd532012-10-31 06:04:57 +0000725 */
726 if (macb_halt_tx(bp))
727 /* Just complain for now, reinitializing TX path can be good */
728 netdev_err(bp->dev, "BUG: halt tx timed out\n");
729
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700730 /* Treat frames in TX queue including the ones that caused the error.
Nicolas Ferree86cd532012-10-31 06:04:57 +0000731 * Free transmit buffers in upper layer.
732 */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100733 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
734 u32 ctrl;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000735
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100736 desc = macb_tx_desc(queue, tail);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000737 ctrl = desc->ctrl;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100738 tx_skb = macb_tx_skb(queue, tail);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000739 skb = tx_skb->skb;
740
741 if (ctrl & MACB_BIT(TX_USED)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200742 /* skb is set for the last buffer of the frame */
743 while (!skb) {
744 macb_tx_unmap(bp, tx_skb);
745 tail++;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100746 tx_skb = macb_tx_skb(queue, tail);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200747 skb = tx_skb->skb;
748 }
749
750 /* ctrl still refers to the first buffer descriptor
751 * since it's the only one written back by the hardware
752 */
753 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
754 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
Zach Brownb410d132016-10-19 09:56:57 -0500755 macb_tx_ring_wrap(bp, tail),
756 skb->data);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200757 bp->dev->stats.tx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000758 queue->stats.tx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200759 bp->dev->stats.tx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000760 queue->stats.tx_bytes += skb->len;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200761 }
Nicolas Ferree86cd532012-10-31 06:04:57 +0000762 } else {
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700763 /* "Buffers exhausted mid-frame" errors may only happen
764 * if the driver is buggy, so complain loudly about
765 * those. Statistics are updated by hardware.
Nicolas Ferree86cd532012-10-31 06:04:57 +0000766 */
767 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
768 netdev_err(bp->dev,
769 "BUG: TX buffers exhausted mid-frame\n");
770
771 desc->ctrl = ctrl | MACB_BIT(TX_USED);
772 }
773
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200774 macb_tx_unmap(bp, tx_skb);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000775 }
776
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100777 /* Set end of TX queue */
778 desc = macb_tx_desc(queue, 0);
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000779 macb_set_addr(bp, desc, 0);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100780 desc->ctrl = MACB_BIT(TX_USED);
781
Nicolas Ferree86cd532012-10-31 06:04:57 +0000782 /* Make descriptor updates visible to hardware */
783 wmb();
784
785 /* Reinitialize the TX desc queue */
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000786 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +0530787#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100788 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000789 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +0530790#endif
Nicolas Ferree86cd532012-10-31 06:04:57 +0000791 /* Make TX ring reflect state of hardware */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100792 queue->tx_head = 0;
793 queue->tx_tail = 0;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000794
795 /* Housework before enabling TX IRQ */
796 macb_writel(bp, TSR, macb_readl(bp, TSR));
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100797 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
798
799 /* Now we are ready to start transmission again */
800 netif_tx_start_all_queues(bp->dev);
801 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
802
803 spin_unlock_irqrestore(&bp->lock, flags);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000804}
805
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100806static void macb_tx_interrupt(struct macb_queue *queue)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100807{
808 unsigned int tail;
809 unsigned int head;
810 u32 status;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100811 struct macb *bp = queue->bp;
812 u16 queue_index = queue - bp->queues;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100813
814 status = macb_readl(bp, TSR);
815 macb_writel(bp, TSR, status);
816
Nicolas Ferre581df9e2013-05-14 03:00:16 +0000817 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100818 queue_writel(queue, ISR, MACB_BIT(TCOMP));
Steffen Trumtrar749a2b62013-03-27 23:07:05 +0000819
Nicolas Ferree86cd532012-10-31 06:04:57 +0000820 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700821 (unsigned long)status);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100822
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100823 head = queue->tx_head;
824 for (tail = queue->tx_tail; tail != head; tail++) {
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000825 struct macb_tx_skb *tx_skb;
826 struct sk_buff *skb;
827 struct macb_dma_desc *desc;
828 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100829
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100830 desc = macb_tx_desc(queue, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100831
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000832 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100833 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +0000834
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000835 ctrl = desc->ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100836
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200837 /* TX_USED bit is only set by hardware on the very first buffer
838 * descriptor of the transmitted frame.
839 */
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000840 if (!(ctrl & MACB_BIT(TX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100841 break;
842
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200843 /* Process all buffers of the current transmitted frame */
844 for (;; tail++) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100845 tx_skb = macb_tx_skb(queue, tail);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200846 skb = tx_skb->skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000847
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200848 /* First, update TX stats if needed */
849 if (skb) {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +0100850 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
851 /* skb now belongs to timestamp buffer
852 * and will be removed later
853 */
854 tx_skb->skb = NULL;
855 }
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200856 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
Zach Brownb410d132016-10-19 09:56:57 -0500857 macb_tx_ring_wrap(bp, tail),
858 skb->data);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200859 bp->dev->stats.tx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000860 queue->stats.tx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200861 bp->dev->stats.tx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000862 queue->stats.tx_bytes += skb->len;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200863 }
864
865 /* Now we can safely release resources */
866 macb_tx_unmap(bp, tx_skb);
867
868 /* skb is set only for the last buffer of the frame.
869 * WARNING: at this point skb has been freed by
870 * macb_tx_unmap().
871 */
872 if (skb)
873 break;
874 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100875 }
876
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100877 queue->tx_tail = tail;
878 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
879 CIRC_CNT(queue->tx_head, queue->tx_tail,
Zach Brownb410d132016-10-19 09:56:57 -0500880 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100881 netif_wake_subqueue(bp->dev, queue_index);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100882}
883
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000884static void gem_rx_refill(struct macb_queue *queue)
Nicolas Ferre4df95132013-06-04 21:57:12 +0000885{
886 unsigned int entry;
887 struct sk_buff *skb;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000888 dma_addr_t paddr;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000889 struct macb *bp = queue->bp;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000890 struct macb_dma_desc *desc;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000891
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000892 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
893 bp->rx_ring_size) > 0) {
894 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000895
896 /* Make hw descriptor updates visible to CPU */
897 rmb();
898
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000899 queue->rx_prepared_head++;
900 desc = macb_rx_desc(queue, entry);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000901
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000902 if (!queue->rx_skbuff[entry]) {
Nicolas Ferre4df95132013-06-04 21:57:12 +0000903 /* allocate sk_buff for this free entry in ring */
904 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
Moritz Fischeraa50b552016-03-29 19:11:13 -0700905 if (unlikely(!skb)) {
Nicolas Ferre4df95132013-06-04 21:57:12 +0000906 netdev_err(bp->dev,
907 "Unable to allocate sk_buff\n");
908 break;
909 }
Nicolas Ferre4df95132013-06-04 21:57:12 +0000910
911 /* now fill corresponding descriptor entry */
912 paddr = dma_map_single(&bp->pdev->dev, skb->data,
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700913 bp->rx_buffer_size,
914 DMA_FROM_DEVICE);
Soren Brinkmann92030902014-03-04 08:46:39 -0800915 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
916 dev_kfree_skb(skb);
917 break;
918 }
919
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000920 queue->rx_skbuff[entry] = skb;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000921
Zach Brownb410d132016-10-19 09:56:57 -0500922 if (entry == bp->rx_ring_size - 1)
Nicolas Ferre4df95132013-06-04 21:57:12 +0000923 paddr |= MACB_BIT(RX_WRAP);
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000924 macb_set_addr(bp, desc, paddr);
925 desc->ctrl = 0;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000926
927 /* properly align Ethernet header */
928 skb_reserve(skb, NET_IP_ALIGN);
Punnaiah Choudary Kallurid4c216c2015-04-29 08:34:46 +0530929 } else {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000930 desc->addr &= ~MACB_BIT(RX_USED);
931 desc->ctrl = 0;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000932 }
933 }
934
935 /* Make descriptor updates visible to hardware */
936 wmb();
937
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000938 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
939 queue, queue->rx_prepared_head, queue->rx_tail);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000940}
941
942/* Mark DMA descriptors from begin up to and not including end as unused */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000943static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
Nicolas Ferre4df95132013-06-04 21:57:12 +0000944 unsigned int end)
945{
946 unsigned int frag;
947
948 for (frag = begin; frag != end; frag++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000949 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700950
Nicolas Ferre4df95132013-06-04 21:57:12 +0000951 desc->addr &= ~MACB_BIT(RX_USED);
952 }
953
954 /* Make descriptor updates visible to hardware */
955 wmb();
956
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700957 /* When this happens, the hardware stats registers for
Nicolas Ferre4df95132013-06-04 21:57:12 +0000958 * whatever caused this is updated, so we don't have to record
959 * anything.
960 */
961}
962
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000963static int gem_rx(struct macb_queue *queue, int budget)
Nicolas Ferre4df95132013-06-04 21:57:12 +0000964{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000965 struct macb *bp = queue->bp;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000966 unsigned int len;
967 unsigned int entry;
968 struct sk_buff *skb;
969 struct macb_dma_desc *desc;
970 int count = 0;
971
972 while (count < budget) {
Harini Katakamfff80192016-08-09 13:15:53 +0530973 u32 ctrl;
974 dma_addr_t addr;
975 bool rxused;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000976
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000977 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
978 desc = macb_rx_desc(queue, entry);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000979
980 /* Make hw descriptor updates visible to CPU */
981 rmb();
982
Harini Katakamfff80192016-08-09 13:15:53 +0530983 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000984 addr = macb_get_addr(bp, desc);
Nicolas Ferre4df95132013-06-04 21:57:12 +0000985 ctrl = desc->ctrl;
986
Harini Katakamfff80192016-08-09 13:15:53 +0530987 if (!rxused)
Nicolas Ferre4df95132013-06-04 21:57:12 +0000988 break;
989
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000990 queue->rx_tail++;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000991 count++;
992
993 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
994 netdev_err(bp->dev,
995 "not whole frame pointed by descriptor\n");
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200996 bp->dev->stats.rx_dropped++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000997 queue->stats.rx_dropped++;
Nicolas Ferre4df95132013-06-04 21:57:12 +0000998 break;
999 }
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001000 skb = queue->rx_skbuff[entry];
Nicolas Ferre4df95132013-06-04 21:57:12 +00001001 if (unlikely(!skb)) {
1002 netdev_err(bp->dev,
1003 "inconsistent Rx descriptor chain\n");
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001004 bp->dev->stats.rx_dropped++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001005 queue->stats.rx_dropped++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001006 break;
1007 }
1008 /* now everything is ready for receiving packet */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001009 queue->rx_skbuff[entry] = NULL;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05301010 len = ctrl & bp->rx_frm_len_mask;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001011
1012 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1013
1014 skb_put(skb, len);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001015 dma_unmap_single(&bp->pdev->dev, addr,
Soren Brinkmann48330e082014-03-04 08:46:40 -08001016 bp->rx_buffer_size, DMA_FROM_DEVICE);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001017
1018 skb->protocol = eth_type_trans(skb, bp->dev);
1019 skb_checksum_none_assert(skb);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02001020 if (bp->dev->features & NETIF_F_RXCSUM &&
1021 !(bp->dev->flags & IFF_PROMISC) &&
1022 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1023 skb->ip_summed = CHECKSUM_UNNECESSARY;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001024
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001025 bp->dev->stats.rx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001026 queue->stats.rx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001027 bp->dev->stats.rx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001028 queue->stats.rx_bytes += skb->len;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001029
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01001030 gem_ptp_do_rxstamp(bp, skb, desc);
1031
Nicolas Ferre4df95132013-06-04 21:57:12 +00001032#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1033 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1034 skb->len, skb->csum);
1035 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
Cyrille Pitchen51f83012014-12-11 11:15:54 +01001036 skb_mac_header(skb), 16, true);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001037 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1038 skb->data, 32, true);
1039#endif
1040
1041 netif_receive_skb(skb);
1042 }
1043
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001044 gem_rx_refill(queue);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001045
1046 return count;
1047}
1048
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001049static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001050 unsigned int last_frag)
1051{
1052 unsigned int len;
1053 unsigned int frag;
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001054 unsigned int offset;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001055 struct sk_buff *skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001056 struct macb_dma_desc *desc;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001057 struct macb *bp = queue->bp;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001058
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001059 desc = macb_rx_desc(queue, last_frag);
Harini Katakam98b5a0f42015-05-06 22:27:17 +05301060 len = desc->ctrl & bp->rx_frm_len_mask;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001061
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001062 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
Zach Brownb410d132016-10-19 09:56:57 -05001063 macb_rx_ring_wrap(bp, first_frag),
1064 macb_rx_ring_wrap(bp, last_frag), len);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001065
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001066 /* The ethernet header starts NET_IP_ALIGN bytes into the
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001067 * first buffer. Since the header is 14 bytes, this makes the
1068 * payload word-aligned.
1069 *
1070 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1071 * the two padding bytes into the skb so that we avoid hitting
1072 * the slowpath in memcpy(), and pull them off afterwards.
1073 */
1074 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001075 if (!skb) {
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001076 bp->dev->stats.rx_dropped++;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001077 for (frag = first_frag; ; frag++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001078 desc = macb_rx_desc(queue, frag);
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001079 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001080 if (frag == last_frag)
1081 break;
1082 }
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001083
1084 /* Make descriptor updates visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001085 wmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001086
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001087 return 1;
1088 }
1089
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001090 offset = 0;
1091 len += NET_IP_ALIGN;
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001092 skb_checksum_none_assert(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001093 skb_put(skb, len);
1094
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001095 for (frag = first_frag; ; frag++) {
Nicolas Ferre1b447912013-06-04 21:57:11 +00001096 unsigned int frag_len = bp->rx_buffer_size;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001097
1098 if (offset + frag_len > len) {
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001099 if (unlikely(frag != last_frag)) {
1100 dev_kfree_skb_any(skb);
1101 return -1;
1102 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001103 frag_len = len - offset;
1104 }
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001105 skb_copy_to_linear_data_offset(skb, offset,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001106 macb_rx_buffer(queue, frag),
Moritz Fischeraa50b552016-03-29 19:11:13 -07001107 frag_len);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001108 offset += bp->rx_buffer_size;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001109 desc = macb_rx_desc(queue, frag);
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001110 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001111
1112 if (frag == last_frag)
1113 break;
1114 }
1115
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001116 /* Make descriptor updates visible to hardware */
1117 wmb();
1118
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001119 __skb_pull(skb, NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001120 skb->protocol = eth_type_trans(skb, bp->dev);
1121
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001122 bp->dev->stats.rx_packets++;
1123 bp->dev->stats.rx_bytes += skb->len;
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001124 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001125 skb->len, skb->csum);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001126 netif_receive_skb(skb);
1127
1128 return 0;
1129}
1130
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001131static inline void macb_init_rx_ring(struct macb_queue *queue)
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001132{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001133 struct macb *bp = queue->bp;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001134 dma_addr_t addr;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001135 struct macb_dma_desc *desc = NULL;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001136 int i;
1137
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001138 addr = queue->rx_buffers_dma;
Zach Brownb410d132016-10-19 09:56:57 -05001139 for (i = 0; i < bp->rx_ring_size; i++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001140 desc = macb_rx_desc(queue, i);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001141 macb_set_addr(bp, desc, addr);
1142 desc->ctrl = 0;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001143 addr += bp->rx_buffer_size;
1144 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001145 desc->addr |= MACB_BIT(RX_WRAP);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001146 queue->rx_tail = 0;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001147}
1148
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001149static int macb_rx(struct macb_queue *queue, int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001150{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001151 struct macb *bp = queue->bp;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001152 bool reset_rx_queue = false;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001153 int received = 0;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001154 unsigned int tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001155 int first_frag = -1;
1156
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001157 for (tail = queue->rx_tail; budget > 0; tail++) {
1158 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001159 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001160
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001161 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001162 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001163
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001164 ctrl = desc->ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001165
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001166 if (!(desc->addr & MACB_BIT(RX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001167 break;
1168
1169 if (ctrl & MACB_BIT(RX_SOF)) {
1170 if (first_frag != -1)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001171 discard_partial_frame(queue, first_frag, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001172 first_frag = tail;
1173 }
1174
1175 if (ctrl & MACB_BIT(RX_EOF)) {
1176 int dropped;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001177
1178 if (unlikely(first_frag == -1)) {
1179 reset_rx_queue = true;
1180 continue;
1181 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001182
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001183 dropped = macb_rx_frame(queue, first_frag, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001184 first_frag = -1;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001185 if (unlikely(dropped < 0)) {
1186 reset_rx_queue = true;
1187 continue;
1188 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001189 if (!dropped) {
1190 received++;
1191 budget--;
1192 }
1193 }
1194 }
1195
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001196 if (unlikely(reset_rx_queue)) {
1197 unsigned long flags;
1198 u32 ctrl;
1199
1200 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1201
1202 spin_lock_irqsave(&bp->lock, flags);
1203
1204 ctrl = macb_readl(bp, NCR);
1205 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1206
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001207 macb_init_rx_ring(queue);
1208 queue_writel(queue, RBQP, queue->rx_ring_dma);
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001209
1210 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1211
1212 spin_unlock_irqrestore(&bp->lock, flags);
1213 return received;
1214 }
1215
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001216 if (first_frag != -1)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001217 queue->rx_tail = first_frag;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001218 else
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001219 queue->rx_tail = tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001220
1221 return received;
1222}
1223
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001224static int macb_poll(struct napi_struct *napi, int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001225{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001226 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1227 struct macb *bp = queue->bp;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001228 int work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001229 u32 status;
1230
1231 status = macb_readl(bp, RSR);
1232 macb_writel(bp, RSR, status);
1233
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001234 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001235 (unsigned long)status, budget);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001236
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001237 work_done = bp->macbgem_ops.mog_rx(queue, budget);
Joshua Hokeb3363692010-10-25 01:44:22 +00001238 if (work_done < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001239 napi_complete_done(napi, work_done);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001240
Nicolas Ferre8770e912013-02-12 11:08:48 +01001241 /* Packets received while interrupts were disabled */
1242 status = macb_readl(bp, RSR);
Soren Brinkmann504ad982014-05-04 15:43:01 -07001243 if (status) {
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001244 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001245 queue_writel(queue, ISR, MACB_BIT(RCOMP));
Nicolas Ferre8770e912013-02-12 11:08:48 +01001246 napi_reschedule(napi);
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001247 } else {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001248 queue_writel(queue, IER, MACB_RX_INT_FLAGS);
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001249 }
Joshua Hokeb3363692010-10-25 01:44:22 +00001250 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001251
1252 /* TODO: Handle errors */
1253
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001254 return work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001255}
1256
Harini Katakam032dc412018-01-27 12:09:01 +05301257static void macb_hresp_error_task(unsigned long data)
1258{
1259 struct macb *bp = (struct macb *)data;
1260 struct net_device *dev = bp->dev;
1261 struct macb_queue *queue = bp->queues;
1262 unsigned int q;
1263 u32 ctrl;
1264
1265 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1266 queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
1267 MACB_TX_INT_FLAGS |
1268 MACB_BIT(HRESP));
1269 }
1270 ctrl = macb_readl(bp, NCR);
1271 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1272 macb_writel(bp, NCR, ctrl);
1273
1274 netif_tx_stop_all_queues(dev);
1275 netif_carrier_off(dev);
1276
1277 bp->macbgem_ops.mog_init_rings(bp);
1278
1279 /* Initialize TX and RX buffers */
1280 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1281 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1282#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1283 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1284 queue_writel(queue, RBQPH,
1285 upper_32_bits(queue->rx_ring_dma));
1286#endif
1287 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1288#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1289 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1290 queue_writel(queue, TBQPH,
1291 upper_32_bits(queue->tx_ring_dma));
1292#endif
1293
1294 /* Enable interrupts */
1295 queue_writel(queue, IER,
1296 MACB_RX_INT_FLAGS |
1297 MACB_TX_INT_FLAGS |
1298 MACB_BIT(HRESP));
1299 }
1300
1301 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1302 macb_writel(bp, NCR, ctrl);
1303
1304 netif_carrier_on(dev);
1305 netif_tx_start_all_queues(dev);
1306}
1307
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001308static irqreturn_t macb_interrupt(int irq, void *dev_id)
1309{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001310 struct macb_queue *queue = dev_id;
1311 struct macb *bp = queue->bp;
1312 struct net_device *dev = bp->dev;
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001313 u32 status, ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001314
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001315 status = queue_readl(queue, ISR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001316
1317 if (unlikely(!status))
1318 return IRQ_NONE;
1319
1320 spin_lock(&bp->lock);
1321
1322 while (status) {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001323 /* close possible race with dev_close */
1324 if (unlikely(!netif_running(dev))) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001325 queue_writel(queue, IDR, -1);
Nathan Sullivan24468372016-01-14 13:27:27 -06001326 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1327 queue_writel(queue, ISR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001328 break;
1329 }
1330
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001331 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1332 (unsigned int)(queue - bp->queues),
1333 (unsigned long)status);
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001334
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001335 if (status & MACB_RX_INT_FLAGS) {
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001336 /* There's no point taking any more interrupts
Joshua Hokeb3363692010-10-25 01:44:22 +00001337 * until we have processed the buffers. The
1338 * scheduling call may fail if the poll routine
1339 * is already scheduled, so disable interrupts
1340 * now.
1341 */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001342 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
Nicolas Ferre581df9e2013-05-14 03:00:16 +00001343 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001344 queue_writel(queue, ISR, MACB_BIT(RCOMP));
Joshua Hokeb3363692010-10-25 01:44:22 +00001345
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001346 if (napi_schedule_prep(&queue->napi)) {
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001347 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001348 __napi_schedule(&queue->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001349 }
1350 }
1351
Nicolas Ferree86cd532012-10-31 06:04:57 +00001352 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001353 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1354 schedule_work(&queue->tx_error_task);
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001355
1356 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001357 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001358
Nicolas Ferree86cd532012-10-31 06:04:57 +00001359 break;
1360 }
1361
1362 if (status & MACB_BIT(TCOMP))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001363 macb_tx_interrupt(queue);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001364
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001365 /* Link change detection isn't possible with RMII, so we'll
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001366 * add that if/when we get our hands on a full-blown MII PHY.
1367 */
1368
Nathan Sullivan86b5e7d2015-05-13 17:01:36 -05001369 /* There is a hardware issue under heavy load where DMA can
1370 * stop, this causes endless "used buffer descriptor read"
1371 * interrupts but it can be cleared by re-enabling RX. See
1372 * the at91 manual, section 41.3.1 or the Zynq manual
1373 * section 16.7.4 for details.
1374 */
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001375 if (status & MACB_BIT(RXUBR)) {
1376 ctrl = macb_readl(bp, NCR);
1377 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
Zumeng Chenffac0e92016-11-28 21:55:00 +08001378 wmb();
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001379 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1380
1381 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchenba504992016-03-24 15:40:04 +01001382 queue_writel(queue, ISR, MACB_BIT(RXUBR));
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001383 }
1384
Alexander Steinb19f7f72011-04-13 05:03:24 +00001385 if (status & MACB_BIT(ISR_ROVR)) {
1386 /* We missed at least one packet */
Jamie Ilesf75ba502011-11-08 10:12:32 +00001387 if (macb_is_gem(bp))
1388 bp->hw_stats.gem.rx_overruns++;
1389 else
1390 bp->hw_stats.macb.rx_overruns++;
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001391
1392 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001393 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
Alexander Steinb19f7f72011-04-13 05:03:24 +00001394 }
1395
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001396 if (status & MACB_BIT(HRESP)) {
Harini Katakam032dc412018-01-27 12:09:01 +05301397 tasklet_schedule(&bp->hresp_err_tasklet);
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001398 netdev_err(dev, "DMA bus error: HRESP not OK\n");
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001399
1400 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001401 queue_writel(queue, ISR, MACB_BIT(HRESP));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001402 }
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001403 status = queue_readl(queue, ISR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001404 }
1405
1406 spin_unlock(&bp->lock);
1407
1408 return IRQ_HANDLED;
1409}
1410
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001411#ifdef CONFIG_NET_POLL_CONTROLLER
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001412/* Polling receive - used by netconsole and other diagnostic tools
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001413 * to allow network i/o with interrupts disabled.
1414 */
1415static void macb_poll_controller(struct net_device *dev)
1416{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001417 struct macb *bp = netdev_priv(dev);
1418 struct macb_queue *queue;
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001419 unsigned long flags;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001420 unsigned int q;
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001421
1422 local_irq_save(flags);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001423 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1424 macb_interrupt(dev->irq, queue);
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001425 local_irq_restore(flags);
1426}
1427#endif
1428
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001429static unsigned int macb_tx_map(struct macb *bp,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001430 struct macb_queue *queue,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001431 struct sk_buff *skb,
1432 unsigned int hdrlen)
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001433{
1434 dma_addr_t mapping;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001435 unsigned int len, entry, i, tx_head = queue->tx_head;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001436 struct macb_tx_skb *tx_skb = NULL;
1437 struct macb_dma_desc *desc;
1438 unsigned int offset, size, count = 0;
1439 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001440 unsigned int eof = 1, mss_mfs = 0;
1441 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1442
1443 /* LSO */
1444 if (skb_shinfo(skb)->gso_size != 0) {
1445 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1446 /* UDP - UFO */
1447 lso_ctrl = MACB_LSO_UFO_ENABLE;
1448 else
1449 /* TCP - TSO */
1450 lso_ctrl = MACB_LSO_TSO_ENABLE;
1451 }
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001452
1453 /* First, map non-paged data */
1454 len = skb_headlen(skb);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001455
1456 /* first buffer length */
1457 size = hdrlen;
1458
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001459 offset = 0;
1460 while (len) {
Zach Brownb410d132016-10-19 09:56:57 -05001461 entry = macb_tx_ring_wrap(bp, tx_head);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001462 tx_skb = &queue->tx_skb[entry];
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001463
1464 mapping = dma_map_single(&bp->pdev->dev,
1465 skb->data + offset,
1466 size, DMA_TO_DEVICE);
1467 if (dma_mapping_error(&bp->pdev->dev, mapping))
1468 goto dma_error;
1469
1470 /* Save info to properly release resources */
1471 tx_skb->skb = NULL;
1472 tx_skb->mapping = mapping;
1473 tx_skb->size = size;
1474 tx_skb->mapped_as_page = false;
1475
1476 len -= size;
1477 offset += size;
1478 count++;
1479 tx_head++;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001480
1481 size = min(len, bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001482 }
1483
1484 /* Then, map paged data from fragments */
1485 for (f = 0; f < nr_frags; f++) {
1486 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1487
1488 len = skb_frag_size(frag);
1489 offset = 0;
1490 while (len) {
1491 size = min(len, bp->max_tx_length);
Zach Brownb410d132016-10-19 09:56:57 -05001492 entry = macb_tx_ring_wrap(bp, tx_head);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001493 tx_skb = &queue->tx_skb[entry];
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001494
1495 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1496 offset, size, DMA_TO_DEVICE);
1497 if (dma_mapping_error(&bp->pdev->dev, mapping))
1498 goto dma_error;
1499
1500 /* Save info to properly release resources */
1501 tx_skb->skb = NULL;
1502 tx_skb->mapping = mapping;
1503 tx_skb->size = size;
1504 tx_skb->mapped_as_page = true;
1505
1506 len -= size;
1507 offset += size;
1508 count++;
1509 tx_head++;
1510 }
1511 }
1512
1513 /* Should never happen */
Moritz Fischeraa50b552016-03-29 19:11:13 -07001514 if (unlikely(!tx_skb)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001515 netdev_err(bp->dev, "BUG! empty skb!\n");
1516 return 0;
1517 }
1518
1519 /* This is the last buffer of the frame: save socket buffer */
1520 tx_skb->skb = skb;
1521
1522 /* Update TX ring: update buffer descriptors in reverse order
1523 * to avoid race condition
1524 */
1525
1526 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1527 * to set the end of TX queue
1528 */
1529 i = tx_head;
Zach Brownb410d132016-10-19 09:56:57 -05001530 entry = macb_tx_ring_wrap(bp, i);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001531 ctrl = MACB_BIT(TX_USED);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001532 desc = macb_tx_desc(queue, entry);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001533 desc->ctrl = ctrl;
1534
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001535 if (lso_ctrl) {
1536 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1537 /* include header and FCS in value given to h/w */
1538 mss_mfs = skb_shinfo(skb)->gso_size +
1539 skb_transport_offset(skb) +
1540 ETH_FCS_LEN;
1541 else /* TSO */ {
1542 mss_mfs = skb_shinfo(skb)->gso_size;
1543 /* TCP Sequence Number Source Select
1544 * can be set only for TSO
1545 */
1546 seq_ctrl = 0;
1547 }
1548 }
1549
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001550 do {
1551 i--;
Zach Brownb410d132016-10-19 09:56:57 -05001552 entry = macb_tx_ring_wrap(bp, i);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001553 tx_skb = &queue->tx_skb[entry];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001554 desc = macb_tx_desc(queue, entry);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001555
1556 ctrl = (u32)tx_skb->size;
1557 if (eof) {
1558 ctrl |= MACB_BIT(TX_LAST);
1559 eof = 0;
1560 }
Zach Brownb410d132016-10-19 09:56:57 -05001561 if (unlikely(entry == (bp->tx_ring_size - 1)))
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001562 ctrl |= MACB_BIT(TX_WRAP);
1563
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001564 /* First descriptor is header descriptor */
1565 if (i == queue->tx_head) {
1566 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1567 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1568 } else
1569 /* Only set MSS/MFS on payload descriptors
1570 * (second or later descriptor)
1571 */
1572 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1573
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001574 /* Set TX buffer descriptor */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001575 macb_set_addr(bp, desc, tx_skb->mapping);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001576 /* desc->addr must be visible to hardware before clearing
1577 * 'TX_USED' bit in desc->ctrl.
1578 */
1579 wmb();
1580 desc->ctrl = ctrl;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001581 } while (i != queue->tx_head);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001582
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001583 queue->tx_head = tx_head;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001584
1585 return count;
1586
1587dma_error:
1588 netdev_err(bp->dev, "TX DMA map failed\n");
1589
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001590 for (i = queue->tx_head; i != tx_head; i++) {
1591 tx_skb = macb_tx_skb(queue, i);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001592
1593 macb_tx_unmap(bp, tx_skb);
1594 }
1595
1596 return 0;
1597}
1598
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001599static netdev_features_t macb_features_check(struct sk_buff *skb,
1600 struct net_device *dev,
1601 netdev_features_t features)
1602{
1603 unsigned int nr_frags, f;
1604 unsigned int hdrlen;
1605
1606 /* Validate LSO compatibility */
1607
1608 /* there is only one buffer */
1609 if (!skb_is_nonlinear(skb))
1610 return features;
1611
1612 /* length of header */
1613 hdrlen = skb_transport_offset(skb);
1614 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1615 hdrlen += tcp_hdrlen(skb);
1616
1617 /* For LSO:
1618 * When software supplies two or more payload buffers all payload buffers
1619 * apart from the last must be a multiple of 8 bytes in size.
1620 */
1621 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1622 return features & ~MACB_NETIF_LSO;
1623
1624 nr_frags = skb_shinfo(skb)->nr_frags;
1625 /* No need to check last fragment */
1626 nr_frags--;
1627 for (f = 0; f < nr_frags; f++) {
1628 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1629
1630 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1631 return features & ~MACB_NETIF_LSO;
1632 }
1633 return features;
1634}
1635
Helmut Buchsbaum007e4ba2016-09-04 18:09:47 +02001636static inline int macb_clear_csum(struct sk_buff *skb)
1637{
1638 /* no change for packets without checksum offloading */
1639 if (skb->ip_summed != CHECKSUM_PARTIAL)
1640 return 0;
1641
1642 /* make sure we can modify the header */
1643 if (unlikely(skb_cow_head(skb, 0)))
1644 return -1;
1645
1646 /* initialize checksum field
1647 * This is required - at least for Zynq, which otherwise calculates
1648 * wrong UDP header checksums for UDP packets with UDP data len <=2
1649 */
1650 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1651 return 0;
1652}
1653
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001654static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1655{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001656 u16 queue_index = skb_get_queue_mapping(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001657 struct macb *bp = netdev_priv(dev);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001658 struct macb_queue *queue = &bp->queues[queue_index];
Dongdong Deng48719532009-08-23 19:49:07 -07001659 unsigned long flags;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001660 unsigned int desc_cnt, nr_frags, frag_size, f;
1661 unsigned int hdrlen;
1662 bool is_lso, is_udp = 0;
1663
1664 is_lso = (skb_shinfo(skb)->gso_size != 0);
1665
1666 if (is_lso) {
1667 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1668
1669 /* length of headers */
1670 if (is_udp)
1671 /* only queue eth + ip headers separately for UDP */
1672 hdrlen = skb_transport_offset(skb);
1673 else
1674 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1675 if (skb_headlen(skb) < hdrlen) {
1676 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1677 /* if this is required, would need to copy to single buffer */
1678 return NETDEV_TX_BUSY;
1679 }
1680 } else
1681 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001682
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001683#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1684 netdev_vdbg(bp->dev,
Moritz Fischeraa50b552016-03-29 19:11:13 -07001685 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1686 queue_index, skb->len, skb->head, skb->data,
1687 skb_tail_pointer(skb), skb_end_pointer(skb));
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001688 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1689 skb->data, 16, true);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001690#endif
1691
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001692 /* Count how many TX buffer descriptors are needed to send this
1693 * socket buffer: skb fragments of jumbo frames may need to be
Moritz Fischeraa50b552016-03-29 19:11:13 -07001694 * split into many buffer descriptors.
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001695 */
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001696 if (is_lso && (skb_headlen(skb) > hdrlen))
1697 /* extra header descriptor if also payload in first buffer */
1698 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1699 else
1700 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001701 nr_frags = skb_shinfo(skb)->nr_frags;
1702 for (f = 0; f < nr_frags; f++) {
1703 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001704 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001705 }
1706
Dongdong Deng48719532009-08-23 19:49:07 -07001707 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001708
1709 /* This is a hard error, log it. */
Zach Brownb410d132016-10-19 09:56:57 -05001710 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001711 bp->tx_ring_size) < desc_cnt) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001712 netif_stop_subqueue(dev, queue_index);
Dongdong Deng48719532009-08-23 19:49:07 -07001713 spin_unlock_irqrestore(&bp->lock, flags);
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001714 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001715 queue->tx_head, queue->tx_tail);
Patrick McHardy5b548142009-06-12 06:22:29 +00001716 return NETDEV_TX_BUSY;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001717 }
1718
Helmut Buchsbaum007e4ba2016-09-04 18:09:47 +02001719 if (macb_clear_csum(skb)) {
1720 dev_kfree_skb_any(skb);
Wei Yongjuna7c22bd2016-09-10 11:17:57 +00001721 goto unlock;
Helmut Buchsbaum007e4ba2016-09-04 18:09:47 +02001722 }
1723
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001724 /* Map socket buffer for DMA transfer */
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001725 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
Eric W. Biedermanc88b5b62014-03-15 16:08:27 -07001726 dev_kfree_skb_any(skb);
Soren Brinkmann92030902014-03-04 08:46:39 -08001727 goto unlock;
1728 }
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001729
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001730 /* Make newly initialized descriptor visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001731 wmb();
Richard Cochrane0720922011-06-19 21:51:28 +00001732 skb_tx_timestamp(skb);
1733
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001734 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1735
Zach Brownb410d132016-10-19 09:56:57 -05001736 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001737 netif_stop_subqueue(dev, queue_index);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001738
Soren Brinkmann92030902014-03-04 08:46:39 -08001739unlock:
Dongdong Deng48719532009-08-23 19:49:07 -07001740 spin_unlock_irqrestore(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001741
Patrick McHardy6ed10652009-06-23 06:03:08 +00001742 return NETDEV_TX_OK;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001743}
1744
Nicolas Ferre4df95132013-06-04 21:57:12 +00001745static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
Nicolas Ferre1b447912013-06-04 21:57:11 +00001746{
1747 if (!macb_is_gem(bp)) {
1748 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1749 } else {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001750 bp->rx_buffer_size = size;
Nicolas Ferre1b447912013-06-04 21:57:11 +00001751
Nicolas Ferre1b447912013-06-04 21:57:11 +00001752 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001753 netdev_dbg(bp->dev,
Moritz Fischeraa50b552016-03-29 19:11:13 -07001754 "RX buffer must be multiple of %d bytes, expanding\n",
1755 RX_BUFFER_MULTIPLE);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001756 bp->rx_buffer_size =
Nicolas Ferre4df95132013-06-04 21:57:12 +00001757 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001758 }
Nicolas Ferre1b447912013-06-04 21:57:11 +00001759 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001760
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -08001761 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
Nicolas Ferre4df95132013-06-04 21:57:12 +00001762 bp->dev->mtu, bp->rx_buffer_size);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001763}
1764
Nicolas Ferre4df95132013-06-04 21:57:12 +00001765static void gem_free_rx_buffers(struct macb *bp)
1766{
1767 struct sk_buff *skb;
1768 struct macb_dma_desc *desc;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001769 struct macb_queue *queue;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001770 dma_addr_t addr;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001771 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001772 int i;
1773
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001774 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1775 if (!queue->rx_skbuff)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001776 continue;
1777
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001778 for (i = 0; i < bp->rx_ring_size; i++) {
1779 skb = queue->rx_skbuff[i];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001780
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001781 if (!skb)
1782 continue;
1783
1784 desc = macb_rx_desc(queue, i);
1785 addr = macb_get_addr(bp, desc);
1786
1787 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1788 DMA_FROM_DEVICE);
1789 dev_kfree_skb_any(skb);
1790 skb = NULL;
1791 }
1792
1793 kfree(queue->rx_skbuff);
1794 queue->rx_skbuff = NULL;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001795 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001796}
1797
1798static void macb_free_rx_buffers(struct macb *bp)
1799{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001800 struct macb_queue *queue = &bp->queues[0];
1801
1802 if (queue->rx_buffers) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001803 dma_free_coherent(&bp->pdev->dev,
Zach Brownb410d132016-10-19 09:56:57 -05001804 bp->rx_ring_size * bp->rx_buffer_size,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001805 queue->rx_buffers, queue->rx_buffers_dma);
1806 queue->rx_buffers = NULL;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001807 }
1808}
Nicolas Ferre1b447912013-06-04 21:57:11 +00001809
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001810static void macb_free_consistent(struct macb *bp)
1811{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001812 struct macb_queue *queue;
1813 unsigned int q;
1814
Nicolas Ferre4df95132013-06-04 21:57:12 +00001815 bp->macbgem_ops.mog_free_rx_buffers(bp);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001816
1817 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1818 kfree(queue->tx_skb);
1819 queue->tx_skb = NULL;
1820 if (queue->tx_ring) {
Zach Brownb410d132016-10-19 09:56:57 -05001821 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001822 queue->tx_ring, queue->tx_ring_dma);
1823 queue->tx_ring = NULL;
1824 }
Harini Katakame50b7702018-07-06 12:18:57 +05301825 if (queue->rx_ring) {
1826 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1827 queue->rx_ring, queue->rx_ring_dma);
1828 queue->rx_ring = NULL;
1829 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001830 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001831}
1832
1833static int gem_alloc_rx_buffers(struct macb *bp)
1834{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001835 struct macb_queue *queue;
1836 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001837 int size;
1838
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001839 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1840 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1841 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
1842 if (!queue->rx_skbuff)
1843 return -ENOMEM;
1844 else
1845 netdev_dbg(bp->dev,
1846 "Allocated %d RX struct sk_buff entries at %p\n",
1847 bp->rx_ring_size, queue->rx_skbuff);
1848 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001849 return 0;
1850}
1851
1852static int macb_alloc_rx_buffers(struct macb *bp)
1853{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001854 struct macb_queue *queue = &bp->queues[0];
Nicolas Ferre4df95132013-06-04 21:57:12 +00001855 int size;
1856
Zach Brownb410d132016-10-19 09:56:57 -05001857 size = bp->rx_ring_size * bp->rx_buffer_size;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001858 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1859 &queue->rx_buffers_dma, GFP_KERNEL);
1860 if (!queue->rx_buffers)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001861 return -ENOMEM;
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001862
1863 netdev_dbg(bp->dev,
1864 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001865 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001866 return 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001867}
1868
1869static int macb_alloc_consistent(struct macb *bp)
1870{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001871 struct macb_queue *queue;
1872 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001873 int size;
1874
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001875 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Zach Brownb410d132016-10-19 09:56:57 -05001876 size = TX_RING_BYTES(bp);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001877 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1878 &queue->tx_ring_dma,
1879 GFP_KERNEL);
1880 if (!queue->tx_ring)
1881 goto out_err;
1882 netdev_dbg(bp->dev,
1883 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1884 q, size, (unsigned long)queue->tx_ring_dma,
1885 queue->tx_ring);
1886
Zach Brownb410d132016-10-19 09:56:57 -05001887 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001888 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1889 if (!queue->tx_skb)
1890 goto out_err;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001891
1892 size = RX_RING_BYTES(bp);
1893 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1894 &queue->rx_ring_dma, GFP_KERNEL);
1895 if (!queue->rx_ring)
1896 goto out_err;
1897 netdev_dbg(bp->dev,
1898 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1899 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001900 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001901 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001902 goto out_err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001903
1904 return 0;
1905
1906out_err:
1907 macb_free_consistent(bp);
1908 return -ENOMEM;
1909}
1910
Nicolas Ferre4df95132013-06-04 21:57:12 +00001911static void gem_init_rings(struct macb *bp)
1912{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001913 struct macb_queue *queue;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001914 struct macb_dma_desc *desc = NULL;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001915 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001916 int i;
1917
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001918 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Zach Brownb410d132016-10-19 09:56:57 -05001919 for (i = 0; i < bp->tx_ring_size; i++) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001920 desc = macb_tx_desc(queue, i);
1921 macb_set_addr(bp, desc, 0);
1922 desc->ctrl = MACB_BIT(TX_USED);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001923 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001924 desc->ctrl |= MACB_BIT(TX_WRAP);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001925 queue->tx_head = 0;
1926 queue->tx_tail = 0;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001927
1928 queue->rx_tail = 0;
1929 queue->rx_prepared_head = 0;
1930
1931 gem_rx_refill(queue);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001932 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001933
Nicolas Ferre4df95132013-06-04 21:57:12 +00001934}
1935
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001936static void macb_init_rings(struct macb *bp)
1937{
1938 int i;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001939 struct macb_dma_desc *desc = NULL;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001940
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001941 macb_init_rx_ring(&bp->queues[0]);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001942
Zach Brownb410d132016-10-19 09:56:57 -05001943 for (i = 0; i < bp->tx_ring_size; i++) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001944 desc = macb_tx_desc(&bp->queues[0], i);
1945 macb_set_addr(bp, desc, 0);
1946 desc->ctrl = MACB_BIT(TX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001947 }
Ben Shelton21d35152015-04-22 17:28:54 -05001948 bp->queues[0].tx_head = 0;
1949 bp->queues[0].tx_tail = 0;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001950 desc->ctrl |= MACB_BIT(TX_WRAP);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001951}
1952
1953static void macb_reset_hw(struct macb *bp)
1954{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001955 struct macb_queue *queue;
1956 unsigned int q;
1957
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001958 /* Disable RX and TX (XXX: Should we halt the transmission
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001959 * more gracefully?)
1960 */
1961 macb_writel(bp, NCR, 0);
1962
1963 /* Clear the stats registers (XXX: Update stats first?) */
1964 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1965
1966 /* Clear all status flags */
Joachim Eastwood95ebcea2012-10-22 08:45:31 +00001967 macb_writel(bp, TSR, -1);
1968 macb_writel(bp, RSR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001969
1970 /* Disable all interrupts */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001971 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1972 queue_writel(queue, IDR, -1);
1973 queue_readl(queue, ISR);
Nathan Sullivan24468372016-01-14 13:27:27 -06001974 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1975 queue_writel(queue, ISR, -1);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001976 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001977}
1978
Jamie Iles70c9f3d2011-03-09 16:22:54 +00001979static u32 gem_mdc_clk_div(struct macb *bp)
1980{
1981 u32 config;
1982 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1983
1984 if (pclk_hz <= 20000000)
1985 config = GEM_BF(CLK, GEM_CLK_DIV8);
1986 else if (pclk_hz <= 40000000)
1987 config = GEM_BF(CLK, GEM_CLK_DIV16);
1988 else if (pclk_hz <= 80000000)
1989 config = GEM_BF(CLK, GEM_CLK_DIV32);
1990 else if (pclk_hz <= 120000000)
1991 config = GEM_BF(CLK, GEM_CLK_DIV48);
1992 else if (pclk_hz <= 160000000)
1993 config = GEM_BF(CLK, GEM_CLK_DIV64);
1994 else
1995 config = GEM_BF(CLK, GEM_CLK_DIV96);
1996
1997 return config;
1998}
1999
2000static u32 macb_mdc_clk_div(struct macb *bp)
2001{
2002 u32 config;
2003 unsigned long pclk_hz;
2004
2005 if (macb_is_gem(bp))
2006 return gem_mdc_clk_div(bp);
2007
2008 pclk_hz = clk_get_rate(bp->pclk);
2009 if (pclk_hz <= 20000000)
2010 config = MACB_BF(CLK, MACB_CLK_DIV8);
2011 else if (pclk_hz <= 40000000)
2012 config = MACB_BF(CLK, MACB_CLK_DIV16);
2013 else if (pclk_hz <= 80000000)
2014 config = MACB_BF(CLK, MACB_CLK_DIV32);
2015 else
2016 config = MACB_BF(CLK, MACB_CLK_DIV64);
2017
2018 return config;
2019}
2020
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002021/* Get the DMA bus width field of the network configuration register that we
Jamie Iles757a03c2011-03-09 16:29:59 +00002022 * should program. We find the width from decoding the design configuration
2023 * register to find the maximum supported data bus width.
2024 */
2025static u32 macb_dbw(struct macb *bp)
2026{
2027 if (!macb_is_gem(bp))
2028 return 0;
2029
2030 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2031 case 4:
2032 return GEM_BF(DBW, GEM_DBW128);
2033 case 2:
2034 return GEM_BF(DBW, GEM_DBW64);
2035 case 1:
2036 default:
2037 return GEM_BF(DBW, GEM_DBW32);
2038 }
2039}
2040
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002041/* Configure the receive DMA engine
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002042 * - use the correct receive buffer size
Nicolas Ferree1755872014-07-24 13:50:58 +02002043 * - set best burst length for DMA operations
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002044 * (if not supported by FIFO, it will fallback to default)
2045 * - set both rx/tx packet buffers to full memory size
2046 * These are configurable parameters for GEM.
Jamie Iles0116da42011-03-14 17:38:30 +00002047 */
2048static void macb_configure_dma(struct macb *bp)
2049{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002050 struct macb_queue *queue;
2051 u32 buffer_size;
2052 unsigned int q;
Jamie Iles0116da42011-03-14 17:38:30 +00002053 u32 dmacfg;
2054
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002055 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
Jamie Iles0116da42011-03-14 17:38:30 +00002056 if (macb_is_gem(bp)) {
2057 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002058 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2059 if (q)
2060 queue_writel(queue, RBQS, buffer_size);
2061 else
2062 dmacfg |= GEM_BF(RXBS, buffer_size);
2063 }
Nicolas Ferree1755872014-07-24 13:50:58 +02002064 if (bp->dma_burst_length)
2065 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002066 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
Arun Chandrana50dad32015-02-18 16:59:35 +05302067 dmacfg &= ~GEM_BIT(ENDIA_PKT);
Arun Chandran62f69242015-03-01 11:38:02 +05302068
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03002069 if (bp->native_io)
Arun Chandran62f69242015-03-01 11:38:02 +05302070 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2071 else
2072 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2073
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02002074 if (bp->dev->features & NETIF_F_HW_CSUM)
2075 dmacfg |= GEM_BIT(TXCOEN);
2076 else
2077 dmacfg &= ~GEM_BIT(TXCOEN);
Harini Katakamfff80192016-08-09 13:15:53 +05302078
2079#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002080 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002081 dmacfg |= GEM_BIT(ADDR64);
Harini Katakamfff80192016-08-09 13:15:53 +05302082#endif
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002083#ifdef CONFIG_MACB_USE_HWSTAMP
2084 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2085 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2086#endif
Nicolas Ferree1755872014-07-24 13:50:58 +02002087 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2088 dmacfg);
Jamie Iles0116da42011-03-14 17:38:30 +00002089 gem_writel(bp, DMACFG, dmacfg);
2090 }
2091}
2092
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002093static void macb_init_hw(struct macb *bp)
2094{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002095 struct macb_queue *queue;
2096 unsigned int q;
2097
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002098 u32 config;
2099
2100 macb_reset_hw(bp);
Joachim Eastwood314bccc2012-11-07 08:14:52 +00002101 macb_set_hwaddr(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002102
Jamie Iles70c9f3d2011-03-09 16:22:54 +00002103 config = macb_mdc_clk_div(bp);
Punnaiah Choudary Kalluri022be252015-11-18 09:03:50 +05302104 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2105 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00002106 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002107 config |= MACB_BIT(PAE); /* PAuse Enable */
2108 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
Dan Carpentera104a6b2015-05-12 21:15:24 +03002109 if (bp->caps & MACB_CAPS_JUMBO)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302110 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
2111 else
2112 config |= MACB_BIT(BIG); /* Receive oversized frames */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002113 if (bp->dev->flags & IFF_PROMISC)
2114 config |= MACB_BIT(CAF); /* Copy All Frames */
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002115 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2116 config |= GEM_BIT(RXCOEN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002117 if (!(bp->dev->flags & IFF_BROADCAST))
2118 config |= MACB_BIT(NBC); /* No BroadCast */
Jamie Iles757a03c2011-03-09 16:29:59 +00002119 config |= macb_dbw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002120 macb_writel(bp, NCFGR, config);
Dan Carpentera104a6b2015-05-12 21:15:24 +03002121 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302122 gem_writel(bp, JML, bp->jumbo_max_len);
Vitalii Demianets26cdfb42012-11-02 07:09:24 +00002123 bp->speed = SPEED_10;
2124 bp->duplex = DUPLEX_HALF;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302125 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
Dan Carpentera104a6b2015-05-12 21:15:24 +03002126 if (bp->caps & MACB_CAPS_JUMBO)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302127 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002128
Jamie Iles0116da42011-03-14 17:38:30 +00002129 macb_configure_dma(bp);
2130
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002131 /* Initialize TX and RX buffers */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002132 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002133 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2134#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2135 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2136 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2137#endif
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002138 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +05302139#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002140 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002141 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +05302142#endif
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002143
2144 /* Enable interrupts */
2145 queue_writel(queue, IER,
2146 MACB_RX_INT_FLAGS |
2147 MACB_TX_INT_FLAGS |
2148 MACB_BIT(HRESP));
2149 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002150
2151 /* Enable TX and RX */
frederic RODO6c36a702007-07-12 19:07:24 +02002152 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002153}
2154
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002155/* The hash address register is 64 bits long and takes up two
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002156 * locations in the memory map. The least significant bits are stored
2157 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2158 *
2159 * The unicast hash enable and the multicast hash enable bits in the
2160 * network configuration register enable the reception of hash matched
2161 * frames. The destination address is reduced to a 6 bit index into
2162 * the 64 bit hash register using the following hash function. The
2163 * hash function is an exclusive or of every sixth bit of the
2164 * destination address.
2165 *
2166 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2167 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2168 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2169 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2170 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2171 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2172 *
2173 * da[0] represents the least significant bit of the first byte
2174 * received, that is, the multicast/unicast indicator, and da[47]
2175 * represents the most significant bit of the last byte received. If
2176 * the hash index, hi[n], points to a bit that is set in the hash
2177 * register then the frame will be matched according to whether the
2178 * frame is multicast or unicast. A multicast match will be signalled
2179 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2180 * index points to a bit set in the hash register. A unicast match
2181 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2182 * and the hash index points to a bit set in the hash register. To
2183 * receive all multicast frames, the hash register should be set with
2184 * all ones and the multicast hash enable bit should be set in the
2185 * network configuration register.
2186 */
2187
2188static inline int hash_bit_value(int bitnr, __u8 *addr)
2189{
2190 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2191 return 1;
2192 return 0;
2193}
2194
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002195/* Return the hash index value for the specified address. */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002196static int hash_get_index(__u8 *addr)
2197{
2198 int i, j, bitval;
2199 int hash_index = 0;
2200
2201 for (j = 0; j < 6; j++) {
2202 for (i = 0, bitval = 0; i < 8; i++)
Xander Huff2fa45e22015-01-15 15:55:19 -06002203 bitval ^= hash_bit_value(i * 6 + j, addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002204
2205 hash_index |= (bitval << j);
2206 }
2207
2208 return hash_index;
2209}
2210
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002211/* Add multicast addresses to the internal multicast-hash table. */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002212static void macb_sethashtable(struct net_device *dev)
2213{
Jiri Pirko22bedad32010-04-01 21:22:57 +00002214 struct netdev_hw_addr *ha;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002215 unsigned long mc_filter[2];
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00002216 unsigned int bitnr;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002217 struct macb *bp = netdev_priv(dev);
2218
Moritz Fischeraa50b552016-03-29 19:11:13 -07002219 mc_filter[0] = 0;
2220 mc_filter[1] = 0;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002221
Jiri Pirko22bedad32010-04-01 21:22:57 +00002222 netdev_for_each_mc_addr(ha, dev) {
2223 bitnr = hash_get_index(ha->addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002224 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2225 }
2226
Jamie Ilesf75ba502011-11-08 10:12:32 +00002227 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2228 macb_or_gem_writel(bp, HRT, mc_filter[1]);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002229}
2230
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002231/* Enable/Disable promiscuous and multicast modes. */
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01002232static void macb_set_rx_mode(struct net_device *dev)
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002233{
2234 unsigned long cfg;
2235 struct macb *bp = netdev_priv(dev);
2236
2237 cfg = macb_readl(bp, NCFGR);
2238
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002239 if (dev->flags & IFF_PROMISC) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002240 /* Enable promiscuous mode */
2241 cfg |= MACB_BIT(CAF);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002242
2243 /* Disable RX checksum offload */
2244 if (macb_is_gem(bp))
2245 cfg &= ~GEM_BIT(RXCOEN);
2246 } else {
2247 /* Disable promiscuous mode */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002248 cfg &= ~MACB_BIT(CAF);
2249
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002250 /* Enable RX checksum offload only if requested */
2251 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2252 cfg |= GEM_BIT(RXCOEN);
2253 }
2254
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002255 if (dev->flags & IFF_ALLMULTI) {
2256 /* Enable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00002257 macb_or_gem_writel(bp, HRB, -1);
2258 macb_or_gem_writel(bp, HRT, -1);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002259 cfg |= MACB_BIT(NCFGR_MTI);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002260 } else if (!netdev_mc_empty(dev)) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002261 /* Enable specific multicasts */
2262 macb_sethashtable(dev);
2263 cfg |= MACB_BIT(NCFGR_MTI);
2264 } else if (dev->flags & (~IFF_ALLMULTI)) {
2265 /* Disable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00002266 macb_or_gem_writel(bp, HRB, 0);
2267 macb_or_gem_writel(bp, HRT, 0);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002268 cfg &= ~MACB_BIT(NCFGR_MTI);
2269 }
2270
2271 macb_writel(bp, NCFGR, cfg);
2272}
2273
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002274static int macb_open(struct net_device *dev)
2275{
2276 struct macb *bp = netdev_priv(dev);
Nicolas Ferre4df95132013-06-04 21:57:12 +00002277 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002278 struct macb_queue *queue;
2279 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002280 int err;
2281
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002282 netdev_dbg(bp->dev, "open\n");
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002283
Nicolas Ferre03fc4722012-07-03 23:14:13 +00002284 /* carrier starts down */
2285 netif_carrier_off(dev);
2286
frederic RODO6c36a702007-07-12 19:07:24 +02002287 /* if the phy is not yet register, retry later*/
Philippe Reynes0a912812016-06-22 00:32:35 +02002288 if (!dev->phydev)
frederic RODO6c36a702007-07-12 19:07:24 +02002289 return -EAGAIN;
2290
Nicolas Ferre1b447912013-06-04 21:57:11 +00002291 /* RX buffers initialization */
Nicolas Ferre4df95132013-06-04 21:57:12 +00002292 macb_init_rx_buffer_size(bp, bufsz);
Nicolas Ferre1b447912013-06-04 21:57:11 +00002293
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002294 err = macb_alloc_consistent(bp);
2295 if (err) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002296 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2297 err);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002298 return err;
2299 }
2300
Nicolas Ferre4df95132013-06-04 21:57:12 +00002301 bp->macbgem_ops.mog_init_rings(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002302 macb_init_hw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002303
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002304 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2305 napi_enable(&queue->napi);
2306
frederic RODO6c36a702007-07-12 19:07:24 +02002307 /* schedule a link state check */
Philippe Reynes0a912812016-06-22 00:32:35 +02002308 phy_start(dev->phydev);
frederic RODO6c36a702007-07-12 19:07:24 +02002309
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002310 netif_tx_start_all_queues(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002311
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002312 if (bp->ptp_info)
2313 bp->ptp_info->ptp_init(dev);
2314
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002315 return 0;
2316}
2317
2318static int macb_close(struct net_device *dev)
2319{
2320 struct macb *bp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002321 struct macb_queue *queue;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002322 unsigned long flags;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002323 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002324
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002325 netif_tx_stop_all_queues(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002326
2327 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2328 napi_disable(&queue->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002329
Philippe Reynes0a912812016-06-22 00:32:35 +02002330 if (dev->phydev)
2331 phy_stop(dev->phydev);
frederic RODO6c36a702007-07-12 19:07:24 +02002332
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002333 spin_lock_irqsave(&bp->lock, flags);
2334 macb_reset_hw(bp);
2335 netif_carrier_off(dev);
2336 spin_unlock_irqrestore(&bp->lock, flags);
2337
2338 macb_free_consistent(bp);
2339
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002340 if (bp->ptp_info)
2341 bp->ptp_info->ptp_remove(dev);
2342
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002343 return 0;
2344}
2345
Harini Katakama5898ea2015-05-06 22:27:18 +05302346static int macb_change_mtu(struct net_device *dev, int new_mtu)
2347{
Harini Katakama5898ea2015-05-06 22:27:18 +05302348 if (netif_running(dev))
2349 return -EBUSY;
2350
Harini Katakama5898ea2015-05-06 22:27:18 +05302351 dev->mtu = new_mtu;
2352
2353 return 0;
2354}
2355
Jamie Ilesa494ed82011-03-09 16:26:35 +00002356static void gem_update_stats(struct macb *bp)
2357{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002358 struct macb_queue *queue;
2359 unsigned int i, q, idx;
2360 unsigned long *stat;
2361
Jamie Ilesa494ed82011-03-09 16:26:35 +00002362 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002363
Xander Huff3ff13f12015-01-13 16:15:51 -06002364 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2365 u32 offset = gem_statistics[i].offset;
David S. Miller7a6e0702015-07-27 14:24:48 -07002366 u64 val = bp->macb_reg_readl(bp, offset);
Xander Huff3ff13f12015-01-13 16:15:51 -06002367
2368 bp->ethtool_stats[i] += val;
2369 *p += val;
2370
2371 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2372 /* Add GEM_OCTTXH, GEM_OCTRXH */
David S. Miller7a6e0702015-07-27 14:24:48 -07002373 val = bp->macb_reg_readl(bp, offset + 4);
Xander Huff2fa45e22015-01-15 15:55:19 -06002374 bp->ethtool_stats[i] += ((u64)val) << 32;
Xander Huff3ff13f12015-01-13 16:15:51 -06002375 *(++p) += val;
2376 }
2377 }
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002378
2379 idx = GEM_STATS_LEN;
2380 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2381 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2382 bp->ethtool_stats[idx++] = *stat;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002383}
2384
2385static struct net_device_stats *gem_get_stats(struct macb *bp)
2386{
2387 struct gem_stats *hwstat = &bp->hw_stats.gem;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02002388 struct net_device_stats *nstat = &bp->dev->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002389
2390 gem_update_stats(bp);
2391
2392 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2393 hwstat->rx_alignment_errors +
2394 hwstat->rx_resource_errors +
2395 hwstat->rx_overruns +
2396 hwstat->rx_oversize_frames +
2397 hwstat->rx_jabbers +
2398 hwstat->rx_undersized_frames +
2399 hwstat->rx_length_field_frame_errors);
2400 nstat->tx_errors = (hwstat->tx_late_collisions +
2401 hwstat->tx_excessive_collisions +
2402 hwstat->tx_underrun +
2403 hwstat->tx_carrier_sense_errors);
2404 nstat->multicast = hwstat->rx_multicast_frames;
2405 nstat->collisions = (hwstat->tx_single_collision_frames +
2406 hwstat->tx_multiple_collision_frames +
2407 hwstat->tx_excessive_collisions);
2408 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2409 hwstat->rx_jabbers +
2410 hwstat->rx_undersized_frames +
2411 hwstat->rx_length_field_frame_errors);
2412 nstat->rx_over_errors = hwstat->rx_resource_errors;
2413 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2414 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2415 nstat->rx_fifo_errors = hwstat->rx_overruns;
2416 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2417 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2418 nstat->tx_fifo_errors = hwstat->tx_underrun;
2419
2420 return nstat;
2421}
2422
Xander Huff3ff13f12015-01-13 16:15:51 -06002423static void gem_get_ethtool_stats(struct net_device *dev,
2424 struct ethtool_stats *stats, u64 *data)
2425{
2426 struct macb *bp;
2427
2428 bp = netdev_priv(dev);
2429 gem_update_stats(bp);
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002430 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2431 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
Xander Huff3ff13f12015-01-13 16:15:51 -06002432}
2433
2434static int gem_get_sset_count(struct net_device *dev, int sset)
2435{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002436 struct macb *bp = netdev_priv(dev);
2437
Xander Huff3ff13f12015-01-13 16:15:51 -06002438 switch (sset) {
2439 case ETH_SS_STATS:
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002440 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
Xander Huff3ff13f12015-01-13 16:15:51 -06002441 default:
2442 return -EOPNOTSUPP;
2443 }
2444}
2445
2446static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2447{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002448 char stat_string[ETH_GSTRING_LEN];
2449 struct macb *bp = netdev_priv(dev);
2450 struct macb_queue *queue;
Andy Shevchenko8bcbf822015-07-24 21:24:02 +03002451 unsigned int i;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002452 unsigned int q;
Xander Huff3ff13f12015-01-13 16:15:51 -06002453
2454 switch (sset) {
2455 case ETH_SS_STATS:
2456 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2457 memcpy(p, gem_statistics[i].stat_string,
2458 ETH_GSTRING_LEN);
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002459
2460 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2461 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2462 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2463 q, queue_statistics[i].stat_string);
2464 memcpy(p, stat_string, ETH_GSTRING_LEN);
2465 }
2466 }
Xander Huff3ff13f12015-01-13 16:15:51 -06002467 break;
2468 }
2469}
2470
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01002471static struct net_device_stats *macb_get_stats(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002472{
2473 struct macb *bp = netdev_priv(dev);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02002474 struct net_device_stats *nstat = &bp->dev->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002475 struct macb_stats *hwstat = &bp->hw_stats.macb;
2476
2477 if (macb_is_gem(bp))
2478 return gem_get_stats(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002479
frederic RODO6c36a702007-07-12 19:07:24 +02002480 /* read stats from hardware */
2481 macb_update_stats(bp);
2482
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002483 /* Convert HW stats into netdevice stats */
2484 nstat->rx_errors = (hwstat->rx_fcs_errors +
2485 hwstat->rx_align_errors +
2486 hwstat->rx_resource_errors +
2487 hwstat->rx_overruns +
2488 hwstat->rx_oversize_pkts +
2489 hwstat->rx_jabbers +
2490 hwstat->rx_undersize_pkts +
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002491 hwstat->rx_length_mismatch);
2492 nstat->tx_errors = (hwstat->tx_late_cols +
2493 hwstat->tx_excessive_cols +
2494 hwstat->tx_underruns +
Wolfgang Steinwender716723c2015-04-10 11:42:56 +02002495 hwstat->tx_carrier_errors +
2496 hwstat->sqe_test_errors);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002497 nstat->collisions = (hwstat->tx_single_cols +
2498 hwstat->tx_multiple_cols +
2499 hwstat->tx_excessive_cols);
2500 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2501 hwstat->rx_jabbers +
2502 hwstat->rx_undersize_pkts +
2503 hwstat->rx_length_mismatch);
Alexander Steinb19f7f72011-04-13 05:03:24 +00002504 nstat->rx_over_errors = hwstat->rx_resource_errors +
2505 hwstat->rx_overruns;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002506 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2507 nstat->rx_frame_errors = hwstat->rx_align_errors;
2508 nstat->rx_fifo_errors = hwstat->rx_overruns;
2509 /* XXX: What does "missed" mean? */
2510 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2511 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2512 nstat->tx_fifo_errors = hwstat->tx_underruns;
2513 /* Don't know about heartbeat or window errors... */
2514
2515 return nstat;
2516}
2517
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002518static int macb_get_regs_len(struct net_device *netdev)
2519{
2520 return MACB_GREGS_NBR * sizeof(u32);
2521}
2522
2523static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2524 void *p)
2525{
2526 struct macb *bp = netdev_priv(dev);
2527 unsigned int tail, head;
2528 u32 *regs_buff = p;
2529
2530 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2531 | MACB_GREGS_VERSION;
2532
Zach Brownb410d132016-10-19 09:56:57 -05002533 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2534 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002535
2536 regs_buff[0] = macb_readl(bp, NCR);
2537 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2538 regs_buff[2] = macb_readl(bp, NSR);
2539 regs_buff[3] = macb_readl(bp, TSR);
2540 regs_buff[4] = macb_readl(bp, RBQP);
2541 regs_buff[5] = macb_readl(bp, TBQP);
2542 regs_buff[6] = macb_readl(bp, RSR);
2543 regs_buff[7] = macb_readl(bp, IMR);
2544
2545 regs_buff[8] = tail;
2546 regs_buff[9] = head;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002547 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2548 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002549
Neil Armstrongce721a72016-01-05 14:39:16 +01002550 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2551 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002552 if (macb_is_gem(bp))
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002553 regs_buff[13] = gem_readl(bp, DMACFG);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002554}
2555
Sergio Prado3e2a5e12016-02-09 12:07:16 -02002556static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2557{
2558 struct macb *bp = netdev_priv(netdev);
2559
2560 wol->supported = 0;
2561 wol->wolopts = 0;
2562
2563 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2564 wol->supported = WAKE_MAGIC;
2565
2566 if (bp->wol & MACB_WOL_ENABLED)
2567 wol->wolopts |= WAKE_MAGIC;
2568 }
2569}
2570
2571static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2572{
2573 struct macb *bp = netdev_priv(netdev);
2574
2575 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2576 (wol->wolopts & ~WAKE_MAGIC))
2577 return -EOPNOTSUPP;
2578
2579 if (wol->wolopts & WAKE_MAGIC)
2580 bp->wol |= MACB_WOL_ENABLED;
2581 else
2582 bp->wol &= ~MACB_WOL_ENABLED;
2583
2584 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2585
2586 return 0;
2587}
2588
Zach Brown8441bb32016-10-19 09:56:58 -05002589static void macb_get_ringparam(struct net_device *netdev,
2590 struct ethtool_ringparam *ring)
2591{
2592 struct macb *bp = netdev_priv(netdev);
2593
2594 ring->rx_max_pending = MAX_RX_RING_SIZE;
2595 ring->tx_max_pending = MAX_TX_RING_SIZE;
2596
2597 ring->rx_pending = bp->rx_ring_size;
2598 ring->tx_pending = bp->tx_ring_size;
2599}
2600
2601static int macb_set_ringparam(struct net_device *netdev,
2602 struct ethtool_ringparam *ring)
2603{
2604 struct macb *bp = netdev_priv(netdev);
2605 u32 new_rx_size, new_tx_size;
2606 unsigned int reset = 0;
2607
2608 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2609 return -EINVAL;
2610
2611 new_rx_size = clamp_t(u32, ring->rx_pending,
2612 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2613 new_rx_size = roundup_pow_of_two(new_rx_size);
2614
2615 new_tx_size = clamp_t(u32, ring->tx_pending,
2616 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2617 new_tx_size = roundup_pow_of_two(new_tx_size);
2618
2619 if ((new_tx_size == bp->tx_ring_size) &&
2620 (new_rx_size == bp->rx_ring_size)) {
2621 /* nothing to do */
2622 return 0;
2623 }
2624
2625 if (netif_running(bp->dev)) {
2626 reset = 1;
2627 macb_close(bp->dev);
2628 }
2629
2630 bp->rx_ring_size = new_rx_size;
2631 bp->tx_ring_size = new_tx_size;
2632
2633 if (reset)
2634 macb_open(bp->dev);
2635
2636 return 0;
2637}
2638
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01002639#ifdef CONFIG_MACB_USE_HWSTAMP
2640static unsigned int gem_get_tsu_rate(struct macb *bp)
2641{
2642 struct clk *tsu_clk;
2643 unsigned int tsu_rate;
2644
2645 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2646 if (!IS_ERR(tsu_clk))
2647 tsu_rate = clk_get_rate(tsu_clk);
2648 /* try pclk instead */
2649 else if (!IS_ERR(bp->pclk)) {
2650 tsu_clk = bp->pclk;
2651 tsu_rate = clk_get_rate(tsu_clk);
2652 } else
2653 return -ENOTSUPP;
2654 return tsu_rate;
2655}
2656
2657static s32 gem_get_ptp_max_adj(void)
2658{
2659 return 64000000;
2660}
2661
2662static int gem_get_ts_info(struct net_device *dev,
2663 struct ethtool_ts_info *info)
2664{
2665 struct macb *bp = netdev_priv(dev);
2666
2667 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2668 ethtool_op_get_ts_info(dev, info);
2669 return 0;
2670 }
2671
2672 info->so_timestamping =
2673 SOF_TIMESTAMPING_TX_SOFTWARE |
2674 SOF_TIMESTAMPING_RX_SOFTWARE |
2675 SOF_TIMESTAMPING_SOFTWARE |
2676 SOF_TIMESTAMPING_TX_HARDWARE |
2677 SOF_TIMESTAMPING_RX_HARDWARE |
2678 SOF_TIMESTAMPING_RAW_HARDWARE;
2679 info->tx_types =
2680 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2681 (1 << HWTSTAMP_TX_OFF) |
2682 (1 << HWTSTAMP_TX_ON);
2683 info->rx_filters =
2684 (1 << HWTSTAMP_FILTER_NONE) |
2685 (1 << HWTSTAMP_FILTER_ALL);
2686
2687 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2688
2689 return 0;
2690}
2691
2692static struct macb_ptp_info gem_ptp_info = {
2693 .ptp_init = gem_ptp_init,
2694 .ptp_remove = gem_ptp_remove,
2695 .get_ptp_max_adj = gem_get_ptp_max_adj,
2696 .get_tsu_rate = gem_get_tsu_rate,
2697 .get_ts_info = gem_get_ts_info,
2698 .get_hwtst = gem_get_hwtst,
2699 .set_hwtst = gem_set_hwtst,
2700};
2701#endif
2702
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002703static int macb_get_ts_info(struct net_device *netdev,
2704 struct ethtool_ts_info *info)
2705{
2706 struct macb *bp = netdev_priv(netdev);
2707
2708 if (bp->ptp_info)
2709 return bp->ptp_info->get_ts_info(netdev, info);
2710
2711 return ethtool_op_get_ts_info(netdev, info);
2712}
2713
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002714static void gem_enable_flow_filters(struct macb *bp, bool enable)
2715{
2716 struct ethtool_rx_fs_item *item;
2717 u32 t2_scr;
2718 int num_t2_scr;
2719
2720 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2721
2722 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2723 struct ethtool_rx_flow_spec *fs = &item->fs;
2724 struct ethtool_tcpip4_spec *tp4sp_m;
2725
2726 if (fs->location >= num_t2_scr)
2727 continue;
2728
2729 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2730
2731 /* enable/disable screener regs for the flow entry */
2732 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2733
2734 /* only enable fields with no masking */
2735 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2736
2737 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2738 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2739 else
2740 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2741
2742 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2743 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2744 else
2745 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2746
2747 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2748 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2749 else
2750 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2751
2752 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2753 }
2754}
2755
2756static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2757{
2758 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2759 uint16_t index = fs->location;
2760 u32 w0, w1, t2_scr;
2761 bool cmp_a = false;
2762 bool cmp_b = false;
2763 bool cmp_c = false;
2764
2765 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2766 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2767
2768 /* ignore field if any masking set */
2769 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2770 /* 1st compare reg - IP source address */
2771 w0 = 0;
2772 w1 = 0;
2773 w0 = tp4sp_v->ip4src;
2774 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2775 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2776 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2777 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2778 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2779 cmp_a = true;
2780 }
2781
2782 /* ignore field if any masking set */
2783 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2784 /* 2nd compare reg - IP destination address */
2785 w0 = 0;
2786 w1 = 0;
2787 w0 = tp4sp_v->ip4dst;
2788 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2789 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2790 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
2791 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
2792 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
2793 cmp_b = true;
2794 }
2795
2796 /* ignore both port fields if masking set in both */
2797 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
2798 /* 3rd compare reg - source port, destination port */
2799 w0 = 0;
2800 w1 = 0;
2801 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
2802 if (tp4sp_m->psrc == tp4sp_m->pdst) {
2803 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
2804 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2805 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
2806 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2807 } else {
2808 /* only one port definition */
2809 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
2810 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
2811 if (tp4sp_m->psrc == 0xFFFF) { /* src port */
2812 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
2813 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2814 } else { /* dst port */
2815 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2816 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
2817 }
2818 }
2819 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
2820 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
2821 cmp_c = true;
2822 }
2823
2824 t2_scr = 0;
2825 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
2826 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
2827 if (cmp_a)
2828 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
2829 if (cmp_b)
2830 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
2831 if (cmp_c)
2832 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
2833 gem_writel_n(bp, SCRT2, index, t2_scr);
2834}
2835
2836static int gem_add_flow_filter(struct net_device *netdev,
2837 struct ethtool_rxnfc *cmd)
2838{
2839 struct macb *bp = netdev_priv(netdev);
2840 struct ethtool_rx_flow_spec *fs = &cmd->fs;
2841 struct ethtool_rx_fs_item *item, *newfs;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002842 unsigned long flags;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002843 int ret = -EINVAL;
2844 bool added = false;
2845
Julia Cartwrightcc1674e2017-12-05 18:02:50 -06002846 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002847 if (newfs == NULL)
2848 return -ENOMEM;
2849 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
2850
2851 netdev_dbg(netdev,
2852 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2853 fs->flow_type, (int)fs->ring_cookie, fs->location,
2854 htonl(fs->h_u.tcp_ip4_spec.ip4src),
2855 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2856 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
2857
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002858 spin_lock_irqsave(&bp->rx_fs_lock, flags);
2859
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002860 /* find correct place to add in list */
Julia Cartwrighta3da8ad2017-12-05 18:02:48 -06002861 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2862 if (item->fs.location > newfs->fs.location) {
2863 list_add_tail(&newfs->list, &item->list);
2864 added = true;
2865 break;
2866 } else if (item->fs.location == fs->location) {
2867 netdev_err(netdev, "Rule not added: location %d not free!\n",
2868 fs->location);
2869 ret = -EBUSY;
2870 goto err;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002871 }
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002872 }
Julia Cartwrighta3da8ad2017-12-05 18:02:48 -06002873 if (!added)
2874 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002875
2876 gem_prog_cmp_regs(bp, fs);
2877 bp->rx_fs_list.count++;
2878 /* enable filtering if NTUPLE on */
2879 if (netdev->features & NETIF_F_NTUPLE)
2880 gem_enable_flow_filters(bp, 1);
2881
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002882 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002883 return 0;
2884
2885err:
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002886 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002887 kfree(newfs);
2888 return ret;
2889}
2890
2891static int gem_del_flow_filter(struct net_device *netdev,
2892 struct ethtool_rxnfc *cmd)
2893{
2894 struct macb *bp = netdev_priv(netdev);
2895 struct ethtool_rx_fs_item *item;
2896 struct ethtool_rx_flow_spec *fs;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002897 unsigned long flags;
2898
2899 spin_lock_irqsave(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002900
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002901 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2902 if (item->fs.location == cmd->fs.location) {
2903 /* disable screener regs for the flow entry */
2904 fs = &(item->fs);
2905 netdev_dbg(netdev,
2906 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2907 fs->flow_type, (int)fs->ring_cookie, fs->location,
2908 htonl(fs->h_u.tcp_ip4_spec.ip4src),
2909 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2910 htons(fs->h_u.tcp_ip4_spec.psrc),
2911 htons(fs->h_u.tcp_ip4_spec.pdst));
2912
2913 gem_writel_n(bp, SCRT2, fs->location, 0);
2914
2915 list_del(&item->list);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002916 bp->rx_fs_list.count--;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002917 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2918 kfree(item);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002919 return 0;
2920 }
2921 }
2922
Julia Cartwright7038cdb2017-12-05 18:02:49 -06002923 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002924 return -EINVAL;
2925}
2926
2927static int gem_get_flow_entry(struct net_device *netdev,
2928 struct ethtool_rxnfc *cmd)
2929{
2930 struct macb *bp = netdev_priv(netdev);
2931 struct ethtool_rx_fs_item *item;
2932
2933 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2934 if (item->fs.location == cmd->fs.location) {
2935 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
2936 return 0;
2937 }
2938 }
2939 return -EINVAL;
2940}
2941
2942static int gem_get_all_flow_entries(struct net_device *netdev,
2943 struct ethtool_rxnfc *cmd, u32 *rule_locs)
2944{
2945 struct macb *bp = netdev_priv(netdev);
2946 struct ethtool_rx_fs_item *item;
2947 uint32_t cnt = 0;
2948
2949 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2950 if (cnt == cmd->rule_cnt)
2951 return -EMSGSIZE;
2952 rule_locs[cnt] = item->fs.location;
2953 cnt++;
2954 }
2955 cmd->data = bp->max_tuples;
2956 cmd->rule_cnt = cnt;
2957
2958 return 0;
2959}
2960
2961static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
2962 u32 *rule_locs)
2963{
2964 struct macb *bp = netdev_priv(netdev);
2965 int ret = 0;
2966
2967 switch (cmd->cmd) {
2968 case ETHTOOL_GRXRINGS:
2969 cmd->data = bp->num_queues;
2970 break;
2971 case ETHTOOL_GRXCLSRLCNT:
2972 cmd->rule_cnt = bp->rx_fs_list.count;
2973 break;
2974 case ETHTOOL_GRXCLSRULE:
2975 ret = gem_get_flow_entry(netdev, cmd);
2976 break;
2977 case ETHTOOL_GRXCLSRLALL:
2978 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
2979 break;
2980 default:
2981 netdev_err(netdev,
2982 "Command parameter %d is not supported\n", cmd->cmd);
2983 ret = -EOPNOTSUPP;
2984 }
2985
2986 return ret;
2987}
2988
2989static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2990{
2991 struct macb *bp = netdev_priv(netdev);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002992 int ret;
2993
Rafal Oziebloae8223de2017-11-30 18:20:44 +00002994 switch (cmd->cmd) {
2995 case ETHTOOL_SRXCLSRLINS:
2996 if ((cmd->fs.location >= bp->max_tuples)
2997 || (cmd->fs.ring_cookie >= bp->num_queues)) {
2998 ret = -EINVAL;
2999 break;
3000 }
3001 ret = gem_add_flow_filter(netdev, cmd);
3002 break;
3003 case ETHTOOL_SRXCLSRLDEL:
3004 ret = gem_del_flow_filter(netdev, cmd);
3005 break;
3006 default:
3007 netdev_err(netdev,
3008 "Command parameter %d is not supported\n", cmd->cmd);
3009 ret = -EOPNOTSUPP;
3010 }
3011
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003012 return ret;
3013}
3014
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003015static const struct ethtool_ops macb_ethtool_ops = {
Nicolas Ferred1d1b532012-10-31 06:04:56 +00003016 .get_regs_len = macb_get_regs_len,
3017 .get_regs = macb_get_regs,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003018 .get_link = ethtool_op_get_link,
Richard Cochran17f393e2012-04-03 22:59:31 +00003019 .get_ts_info = ethtool_op_get_ts_info,
Sergio Prado3e2a5e12016-02-09 12:07:16 -02003020 .get_wol = macb_get_wol,
3021 .set_wol = macb_set_wol,
Philippe Reynes176275a2016-06-22 00:32:36 +02003022 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3023 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Zach Brown8441bb32016-10-19 09:56:58 -05003024 .get_ringparam = macb_get_ringparam,
3025 .set_ringparam = macb_set_ringparam,
Xander Huff8cd5a562015-01-15 15:55:20 -06003026};
Xander Huff8cd5a562015-01-15 15:55:20 -06003027
Lad, Prabhakar8093b1c2015-02-05 16:21:07 +00003028static const struct ethtool_ops gem_ethtool_ops = {
Xander Huff8cd5a562015-01-15 15:55:20 -06003029 .get_regs_len = macb_get_regs_len,
3030 .get_regs = macb_get_regs,
3031 .get_link = ethtool_op_get_link,
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003032 .get_ts_info = macb_get_ts_info,
Xander Huff3ff13f12015-01-13 16:15:51 -06003033 .get_ethtool_stats = gem_get_ethtool_stats,
3034 .get_strings = gem_get_ethtool_strings,
3035 .get_sset_count = gem_get_sset_count,
Philippe Reynes176275a2016-06-22 00:32:36 +02003036 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3037 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Zach Brown8441bb32016-10-19 09:56:58 -05003038 .get_ringparam = macb_get_ringparam,
3039 .set_ringparam = macb_set_ringparam,
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003040 .get_rxnfc = gem_get_rxnfc,
3041 .set_rxnfc = gem_set_rxnfc,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003042};
3043
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003044static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003045{
Philippe Reynes0a912812016-06-22 00:32:35 +02003046 struct phy_device *phydev = dev->phydev;
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003047 struct macb *bp = netdev_priv(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003048
3049 if (!netif_running(dev))
3050 return -EINVAL;
3051
frederic RODO6c36a702007-07-12 19:07:24 +02003052 if (!phydev)
3053 return -ENODEV;
3054
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003055 if (!bp->ptp_info)
3056 return phy_mii_ioctl(phydev, rq, cmd);
3057
3058 switch (cmd) {
3059 case SIOCSHWTSTAMP:
3060 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3061 case SIOCGHWTSTAMP:
3062 return bp->ptp_info->get_hwtst(dev, rq);
3063 default:
3064 return phy_mii_ioctl(phydev, rq, cmd);
3065 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003066}
3067
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003068static int macb_set_features(struct net_device *netdev,
3069 netdev_features_t features)
3070{
3071 struct macb *bp = netdev_priv(netdev);
3072 netdev_features_t changed = features ^ netdev->features;
3073
3074 /* TX checksum offload */
3075 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3076 u32 dmacfg;
3077
3078 dmacfg = gem_readl(bp, DMACFG);
3079 if (features & NETIF_F_HW_CSUM)
3080 dmacfg |= GEM_BIT(TXCOEN);
3081 else
3082 dmacfg &= ~GEM_BIT(TXCOEN);
3083 gem_writel(bp, DMACFG, dmacfg);
3084 }
3085
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003086 /* RX checksum offload */
3087 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3088 u32 netcfg;
3089
3090 netcfg = gem_readl(bp, NCFGR);
3091 if (features & NETIF_F_RXCSUM &&
3092 !(netdev->flags & IFF_PROMISC))
3093 netcfg |= GEM_BIT(RXCOEN);
3094 else
3095 netcfg &= ~GEM_BIT(RXCOEN);
3096 gem_writel(bp, NCFGR, netcfg);
3097 }
3098
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003099 /* RX Flow Filters */
3100 if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3101 bool turn_on = features & NETIF_F_NTUPLE;
3102
3103 gem_enable_flow_filters(bp, turn_on);
3104 }
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003105 return 0;
3106}
3107
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003108static const struct net_device_ops macb_netdev_ops = {
3109 .ndo_open = macb_open,
3110 .ndo_stop = macb_close,
3111 .ndo_start_xmit = macb_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00003112 .ndo_set_rx_mode = macb_set_rx_mode,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003113 .ndo_get_stats = macb_get_stats,
3114 .ndo_do_ioctl = macb_ioctl,
3115 .ndo_validate_addr = eth_validate_addr,
Harini Katakama5898ea2015-05-06 22:27:18 +05303116 .ndo_change_mtu = macb_change_mtu,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003117 .ndo_set_mac_address = eth_mac_addr,
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07003118#ifdef CONFIG_NET_POLL_CONTROLLER
3119 .ndo_poll_controller = macb_poll_controller,
3120#endif
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003121 .ndo_set_features = macb_set_features,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00003122 .ndo_features_check = macb_features_check,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003123};
3124
Moritz Fischer64ec42f2016-03-29 19:11:12 -07003125/* Configure peripheral capabilities according to device tree
Nicolas Ferree1755872014-07-24 13:50:58 +02003126 * and integration options used
3127 */
Moritz Fischer64ec42f2016-03-29 19:11:12 -07003128static void macb_configure_caps(struct macb *bp,
3129 const struct macb_config *dt_conf)
Nicolas Ferree1755872014-07-24 13:50:58 +02003130{
3131 u32 dcfg;
Nicolas Ferree1755872014-07-24 13:50:58 +02003132
Nicolas Ferref6970502015-03-31 15:02:01 +02003133 if (dt_conf)
3134 bp->caps = dt_conf->caps;
3135
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003136 if (hw_is_gem(bp->regs, bp->native_io)) {
Nicolas Ferree1755872014-07-24 13:50:58 +02003137 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3138
Nicolas Ferree1755872014-07-24 13:50:58 +02003139 dcfg = gem_readl(bp, DCFG1);
3140 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3141 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3142 dcfg = gem_readl(bp, DCFG2);
3143 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3144 bp->caps |= MACB_CAPS_FIFO_MODE;
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003145#ifdef CONFIG_MACB_USE_HWSTAMP
3146 if (gem_has_ptp(bp)) {
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003147 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3148 pr_err("GEM doesn't support hardware ptp.\n");
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003149 else {
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003150 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003151 bp->ptp_info = &gem_ptp_info;
3152 }
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003153 }
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003154#endif
Nicolas Ferree1755872014-07-24 13:50:58 +02003155 }
3156
Andy Shevchenkoa35919e2015-07-24 21:24:01 +03003157 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
Nicolas Ferree1755872014-07-24 13:50:58 +02003158}
3159
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003160static void macb_probe_queues(void __iomem *mem,
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003161 bool native_io,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003162 unsigned int *queue_mask,
3163 unsigned int *num_queues)
3164{
3165 unsigned int hw_q;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003166
3167 *queue_mask = 0x1;
3168 *num_queues = 1;
3169
Nicolas Ferreda120112015-03-31 15:02:00 +02003170 /* is it macb or gem ?
3171 *
3172 * We need to read directly from the hardware here because
3173 * we are early in the probe process and don't have the
3174 * MACB_CAPS_MACB_IS_GEM flag positioned
3175 */
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003176 if (!hw_is_gem(mem, native_io))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003177 return;
3178
3179 /* bit 0 is never set but queue 0 always exists */
Arun Chandrana50dad32015-02-18 16:59:35 +05303180 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3181
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003182 *queue_mask |= 0x1;
3183
3184 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3185 if (*queue_mask & (1 << hw_q))
3186 (*num_queues)++;
3187}
3188
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003189static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303190 struct clk **hclk, struct clk **tx_clk,
3191 struct clk **rx_clk)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003192{
Bartosz Folta83a77e92016-12-14 06:39:15 +00003193 struct macb_platform_data *pdata;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003194 int err;
3195
Bartosz Folta83a77e92016-12-14 06:39:15 +00003196 pdata = dev_get_platdata(&pdev->dev);
3197 if (pdata) {
3198 *pclk = pdata->pclk;
3199 *hclk = pdata->hclk;
3200 } else {
3201 *pclk = devm_clk_get(&pdev->dev, "pclk");
3202 *hclk = devm_clk_get(&pdev->dev, "hclk");
3203 }
3204
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003205 if (IS_ERR(*pclk)) {
3206 err = PTR_ERR(*pclk);
3207 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3208 return err;
3209 }
3210
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003211 if (IS_ERR(*hclk)) {
3212 err = PTR_ERR(*hclk);
3213 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3214 return err;
3215 }
3216
3217 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3218 if (IS_ERR(*tx_clk))
3219 *tx_clk = NULL;
3220
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303221 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3222 if (IS_ERR(*rx_clk))
3223 *rx_clk = NULL;
3224
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003225 err = clk_prepare_enable(*pclk);
3226 if (err) {
3227 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3228 return err;
3229 }
3230
3231 err = clk_prepare_enable(*hclk);
3232 if (err) {
3233 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3234 goto err_disable_pclk;
3235 }
3236
3237 err = clk_prepare_enable(*tx_clk);
3238 if (err) {
3239 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3240 goto err_disable_hclk;
3241 }
3242
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303243 err = clk_prepare_enable(*rx_clk);
3244 if (err) {
3245 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
3246 goto err_disable_txclk;
3247 }
3248
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003249 return 0;
3250
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303251err_disable_txclk:
3252 clk_disable_unprepare(*tx_clk);
3253
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003254err_disable_hclk:
3255 clk_disable_unprepare(*hclk);
3256
3257err_disable_pclk:
3258 clk_disable_unprepare(*pclk);
3259
3260 return err;
3261}
3262
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003263static int macb_init(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003264{
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003265 struct net_device *dev = platform_get_drvdata(pdev);
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003266 unsigned int hw_q, q;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003267 struct macb *bp = netdev_priv(dev);
3268 struct macb_queue *queue;
3269 int err;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003270 u32 val, reg;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003271
Zach Brownb410d132016-10-19 09:56:57 -05003272 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3273 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3274
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003275 /* set the queue register mapping once for all: queue0 has a special
3276 * register mapping but we don't want to test the queue index then
3277 * compute the corresponding register offset at run time.
3278 */
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003279 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003280 if (!(bp->queue_mask & (1 << hw_q)))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003281 continue;
Jamie Iles461845d2011-03-08 20:19:23 +00003282
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003283 queue = &bp->queues[q];
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003284 queue->bp = bp;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003285 netif_napi_add(dev, &queue->napi, macb_poll, 64);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003286 if (hw_q) {
3287 queue->ISR = GEM_ISR(hw_q - 1);
3288 queue->IER = GEM_IER(hw_q - 1);
3289 queue->IDR = GEM_IDR(hw_q - 1);
3290 queue->IMR = GEM_IMR(hw_q - 1);
3291 queue->TBQP = GEM_TBQP(hw_q - 1);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003292 queue->RBQP = GEM_RBQP(hw_q - 1);
3293 queue->RBQS = GEM_RBQS(hw_q - 1);
Harini Katakamfff80192016-08-09 13:15:53 +05303294#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003295 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003296 queue->TBQPH = GEM_TBQPH(hw_q - 1);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003297 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3298 }
Harini Katakamfff80192016-08-09 13:15:53 +05303299#endif
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003300 } else {
3301 /* queue0 uses legacy registers */
3302 queue->ISR = MACB_ISR;
3303 queue->IER = MACB_IER;
3304 queue->IDR = MACB_IDR;
3305 queue->IMR = MACB_IMR;
3306 queue->TBQP = MACB_TBQP;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003307 queue->RBQP = MACB_RBQP;
Harini Katakamfff80192016-08-09 13:15:53 +05303308#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003309 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003310 queue->TBQPH = MACB_TBQPH;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003311 queue->RBQPH = MACB_RBQPH;
3312 }
Harini Katakamfff80192016-08-09 13:15:53 +05303313#endif
Soren Brinkmanne1824df2013-12-10 16:07:23 -08003314 }
Soren Brinkmanne1824df2013-12-10 16:07:23 -08003315
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003316 /* get irq: here we use the linux queue index, not the hardware
3317 * queue index. the queue irq definitions in the device tree
3318 * must remove the optional gaps that could exist in the
3319 * hardware queue mask.
3320 */
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003321 queue->irq = platform_get_irq(pdev, q);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003322 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
Punnaiah Choudary Kalluri20488232015-03-06 18:29:12 +01003323 IRQF_SHARED, dev->name, queue);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003324 if (err) {
3325 dev_err(&pdev->dev,
3326 "Unable to request IRQ %d (error %d)\n",
3327 queue->irq, err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003328 return err;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003329 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003330
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003331 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003332 q++;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003333 }
3334
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003335 dev->netdev_ops = &macb_netdev_ops;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003336
Nicolas Ferre4df95132013-06-04 21:57:12 +00003337 /* setup appropriated routines according to adapter type */
3338 if (macb_is_gem(bp)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003339 bp->max_tx_length = GEM_MAX_TX_LEN;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003340 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3341 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3342 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3343 bp->macbgem_ops.mog_rx = gem_rx;
Xander Huff8cd5a562015-01-15 15:55:20 -06003344 dev->ethtool_ops = &gem_ethtool_ops;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003345 } else {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003346 bp->max_tx_length = MACB_MAX_TX_LEN;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003347 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3348 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3349 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3350 bp->macbgem_ops.mog_rx = macb_rx;
Xander Huff8cd5a562015-01-15 15:55:20 -06003351 dev->ethtool_ops = &macb_ethtool_ops;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003352 }
3353
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003354 /* Set features */
3355 dev->hw_features = NETIF_F_SG;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00003356
3357 /* Check LSO capability */
3358 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3359 dev->hw_features |= MACB_NETIF_LSO;
3360
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003361 /* Checksum offload is only available on gem with packet buffer */
3362 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003363 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003364 if (bp->caps & MACB_CAPS_SG_DISABLED)
3365 dev->hw_features &= ~NETIF_F_SG;
3366 dev->features = dev->hw_features;
3367
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003368 /* Check RX Flow Filters support.
3369 * Max Rx flows set by availability of screeners & compare regs:
3370 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
3371 */
3372 reg = gem_readl(bp, DCFG8);
3373 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3374 GEM_BFEXT(T2SCR, reg));
3375 if (bp->max_tuples > 0) {
3376 /* also needs one ethtype match to check IPv4 */
3377 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3378 /* program this reg now */
3379 reg = 0;
3380 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3381 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3382 /* Filtering is supported in hw but don't enable it in kernel now */
3383 dev->hw_features |= NETIF_F_NTUPLE;
3384 /* init Rx flow definitions */
3385 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3386 bp->rx_fs_list.count = 0;
3387 spin_lock_init(&bp->rx_fs_lock);
3388 } else
3389 bp->max_tuples = 0;
3390 }
3391
Neil Armstrongce721a72016-01-05 14:39:16 +01003392 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3393 val = 0;
3394 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3395 val = GEM_BIT(RGMII);
3396 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003397 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
Neil Armstrongce721a72016-01-05 14:39:16 +01003398 val = MACB_BIT(RMII);
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003399 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
Neil Armstrongce721a72016-01-05 14:39:16 +01003400 val = MACB_BIT(MII);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003401
Neil Armstrongce721a72016-01-05 14:39:16 +01003402 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3403 val |= MACB_BIT(CLKEN);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003404
Neil Armstrongce721a72016-01-05 14:39:16 +01003405 macb_or_gem_writel(bp, USRIO, val);
3406 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003407
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003408 /* Set MII management clock divider */
3409 val = macb_mdc_clk_div(bp);
3410 val |= macb_dbw(bp);
Punnaiah Choudary Kalluri022be252015-11-18 09:03:50 +05303411 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3412 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003413 macb_writel(bp, NCFGR, val);
3414
3415 return 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003416}
3417
3418#if defined(CONFIG_OF)
3419/* 1518 rounded up */
3420#define AT91ETHER_MAX_RBUFF_SZ 0x600
3421/* max number of receive buffers */
3422#define AT91ETHER_MAX_RX_DESCR 9
3423
3424/* Initialize and start the Receiver and Transmit subsystems */
3425static int at91ether_start(struct net_device *dev)
3426{
3427 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003428 struct macb_queue *q = &lp->queues[0];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003429 struct macb_dma_desc *desc;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003430 dma_addr_t addr;
3431 u32 ctl;
3432 int i;
3433
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003434 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003435 (AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003436 macb_dma_desc_get_size(lp)),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003437 &q->rx_ring_dma, GFP_KERNEL);
3438 if (!q->rx_ring)
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003439 return -ENOMEM;
3440
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003441 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003442 AT91ETHER_MAX_RX_DESCR *
3443 AT91ETHER_MAX_RBUFF_SZ,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003444 &q->rx_buffers_dma, GFP_KERNEL);
3445 if (!q->rx_buffers) {
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003446 dma_free_coherent(&lp->pdev->dev,
3447 AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003448 macb_dma_desc_get_size(lp),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003449 q->rx_ring, q->rx_ring_dma);
3450 q->rx_ring = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003451 return -ENOMEM;
3452 }
3453
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003454 addr = q->rx_buffers_dma;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003455 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003456 desc = macb_rx_desc(q, i);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003457 macb_set_addr(lp, desc, addr);
3458 desc->ctrl = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003459 addr += AT91ETHER_MAX_RBUFF_SZ;
3460 }
3461
3462 /* Set the Wrap bit on the last descriptor */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003463 desc->addr |= MACB_BIT(RX_WRAP);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003464
3465 /* Reset buffer index */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003466 q->rx_tail = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003467
3468 /* Program address of descriptor list in Rx Buffer Queue register */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003469 macb_writel(lp, RBQP, q->rx_ring_dma);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003470
3471 /* Enable Receive and Transmit */
3472 ctl = macb_readl(lp, NCR);
3473 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3474
3475 return 0;
3476}
3477
3478/* Open the ethernet interface */
3479static int at91ether_open(struct net_device *dev)
3480{
3481 struct macb *lp = netdev_priv(dev);
3482 u32 ctl;
3483 int ret;
3484
3485 /* Clear internal statistics */
3486 ctl = macb_readl(lp, NCR);
3487 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3488
3489 macb_set_hwaddr(lp);
3490
3491 ret = at91ether_start(dev);
3492 if (ret)
3493 return ret;
3494
3495 /* Enable MAC interrupts */
3496 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3497 MACB_BIT(RXUBR) |
3498 MACB_BIT(ISR_TUND) |
3499 MACB_BIT(ISR_RLE) |
3500 MACB_BIT(TCOMP) |
3501 MACB_BIT(ISR_ROVR) |
3502 MACB_BIT(HRESP));
3503
3504 /* schedule a link state check */
Philippe Reynes0a912812016-06-22 00:32:35 +02003505 phy_start(dev->phydev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003506
3507 netif_start_queue(dev);
3508
3509 return 0;
3510}
3511
3512/* Close the interface */
3513static int at91ether_close(struct net_device *dev)
3514{
3515 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003516 struct macb_queue *q = &lp->queues[0];
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003517 u32 ctl;
3518
3519 /* Disable Receiver and Transmitter */
3520 ctl = macb_readl(lp, NCR);
3521 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3522
3523 /* Disable MAC interrupts */
3524 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3525 MACB_BIT(RXUBR) |
3526 MACB_BIT(ISR_TUND) |
3527 MACB_BIT(ISR_RLE) |
3528 MACB_BIT(TCOMP) |
3529 MACB_BIT(ISR_ROVR) |
3530 MACB_BIT(HRESP));
3531
3532 netif_stop_queue(dev);
3533
3534 dma_free_coherent(&lp->pdev->dev,
3535 AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003536 macb_dma_desc_get_size(lp),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003537 q->rx_ring, q->rx_ring_dma);
3538 q->rx_ring = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003539
3540 dma_free_coherent(&lp->pdev->dev,
3541 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003542 q->rx_buffers, q->rx_buffers_dma);
3543 q->rx_buffers = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003544
3545 return 0;
3546}
3547
3548/* Transmit packet */
3549static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
3550{
3551 struct macb *lp = netdev_priv(dev);
3552
3553 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3554 netif_stop_queue(dev);
3555
3556 /* Store packet information (to free when Tx completed) */
3557 lp->skb = skb;
3558 lp->skb_length = skb->len;
3559 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3560 DMA_TO_DEVICE);
Alexey Khoroshilov178c7ae2016-11-19 01:40:10 +03003561 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
3562 dev_kfree_skb_any(skb);
3563 dev->stats.tx_dropped++;
3564 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3565 return NETDEV_TX_OK;
3566 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003567
3568 /* Set address of the data in the Transmit Address register */
3569 macb_writel(lp, TAR, lp->skb_physaddr);
3570 /* Set length of the packet in the Transmit Control register */
3571 macb_writel(lp, TCR, skb->len);
3572
3573 } else {
3574 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3575 return NETDEV_TX_BUSY;
3576 }
3577
3578 return NETDEV_TX_OK;
3579}
3580
3581/* Extract received frame from buffer descriptors and sent to upper layers.
3582 * (Called from interrupt context)
3583 */
3584static void at91ether_rx(struct net_device *dev)
3585{
3586 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003587 struct macb_queue *q = &lp->queues[0];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003588 struct macb_dma_desc *desc;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003589 unsigned char *p_recv;
3590 struct sk_buff *skb;
3591 unsigned int pktlen;
3592
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003593 desc = macb_rx_desc(q, q->rx_tail);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003594 while (desc->addr & MACB_BIT(RX_USED)) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003595 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003596 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003597 skb = netdev_alloc_skb(dev, pktlen + 2);
3598 if (skb) {
3599 skb_reserve(skb, 2);
Johannes Berg59ae1d12017-06-16 14:29:20 +02003600 skb_put_data(skb, p_recv, pktlen);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003601
3602 skb->protocol = eth_type_trans(skb, dev);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003603 dev->stats.rx_packets++;
3604 dev->stats.rx_bytes += pktlen;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003605 netif_rx(skb);
3606 } else {
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003607 dev->stats.rx_dropped++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003608 }
3609
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003610 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003611 dev->stats.multicast++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003612
3613 /* reset ownership bit */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003614 desc->addr &= ~MACB_BIT(RX_USED);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003615
3616 /* wrap after last buffer */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003617 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3618 q->rx_tail = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003619 else
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003620 q->rx_tail++;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003621
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003622 desc = macb_rx_desc(q, q->rx_tail);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003623 }
3624}
3625
3626/* MAC interrupt handler */
3627static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3628{
3629 struct net_device *dev = dev_id;
3630 struct macb *lp = netdev_priv(dev);
3631 u32 intstatus, ctl;
3632
3633 /* MAC Interrupt Status register indicates what interrupts are pending.
3634 * It is automatically cleared once read.
3635 */
3636 intstatus = macb_readl(lp, ISR);
3637
3638 /* Receive complete */
3639 if (intstatus & MACB_BIT(RCOMP))
3640 at91ether_rx(dev);
3641
3642 /* Transmit complete */
3643 if (intstatus & MACB_BIT(TCOMP)) {
3644 /* The TCOM bit is set even if the transmission failed */
3645 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003646 dev->stats.tx_errors++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003647
3648 if (lp->skb) {
3649 dev_kfree_skb_irq(lp->skb);
3650 lp->skb = NULL;
3651 dma_unmap_single(NULL, lp->skb_physaddr,
3652 lp->skb_length, DMA_TO_DEVICE);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02003653 dev->stats.tx_packets++;
3654 dev->stats.tx_bytes += lp->skb_length;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003655 }
3656 netif_wake_queue(dev);
3657 }
3658
3659 /* Work-around for EMAC Errata section 41.3.1 */
3660 if (intstatus & MACB_BIT(RXUBR)) {
3661 ctl = macb_readl(lp, NCR);
3662 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
Zumeng Chenffac0e92016-11-28 21:55:00 +08003663 wmb();
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003664 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3665 }
3666
3667 if (intstatus & MACB_BIT(ISR_ROVR))
3668 netdev_err(dev, "ROVR error\n");
3669
3670 return IRQ_HANDLED;
3671}
3672
3673#ifdef CONFIG_NET_POLL_CONTROLLER
3674static void at91ether_poll_controller(struct net_device *dev)
3675{
3676 unsigned long flags;
3677
3678 local_irq_save(flags);
3679 at91ether_interrupt(dev->irq, dev);
3680 local_irq_restore(flags);
3681}
3682#endif
3683
3684static const struct net_device_ops at91ether_netdev_ops = {
3685 .ndo_open = at91ether_open,
3686 .ndo_stop = at91ether_close,
3687 .ndo_start_xmit = at91ether_start_xmit,
3688 .ndo_get_stats = macb_get_stats,
3689 .ndo_set_rx_mode = macb_set_rx_mode,
3690 .ndo_set_mac_address = eth_mac_addr,
3691 .ndo_do_ioctl = macb_ioctl,
3692 .ndo_validate_addr = eth_validate_addr,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003693#ifdef CONFIG_NET_POLL_CONTROLLER
3694 .ndo_poll_controller = at91ether_poll_controller,
3695#endif
3696};
3697
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003698static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303699 struct clk **hclk, struct clk **tx_clk,
3700 struct clk **rx_clk)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003701{
3702 int err;
3703
3704 *hclk = NULL;
3705 *tx_clk = NULL;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303706 *rx_clk = NULL;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003707
3708 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3709 if (IS_ERR(*pclk))
3710 return PTR_ERR(*pclk);
3711
3712 err = clk_prepare_enable(*pclk);
3713 if (err) {
3714 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3715 return err;
3716 }
3717
3718 return 0;
3719}
3720
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003721static int at91ether_init(struct platform_device *pdev)
3722{
3723 struct net_device *dev = platform_get_drvdata(pdev);
3724 struct macb *bp = netdev_priv(dev);
3725 int err;
3726 u32 reg;
3727
Alexandre Bellonifec9d3b2018-06-26 10:44:01 +02003728 bp->queues[0].bp = bp;
3729
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003730 dev->netdev_ops = &at91ether_netdev_ops;
3731 dev->ethtool_ops = &macb_ethtool_ops;
3732
3733 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3734 0, dev->name, dev);
3735 if (err)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003736 return err;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003737
3738 macb_writel(bp, NCR, 0);
3739
3740 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3741 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3742 reg |= MACB_BIT(RM9200_RMII);
3743
3744 macb_writel(bp, NCFGR, reg);
3745
3746 return 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003747}
3748
David S. Miller3cef5c52015-03-09 23:38:02 -04003749static const struct macb_config at91sam9260_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003750 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003751 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003752 .init = macb_init,
3753};
3754
David S. Miller3cef5c52015-03-09 23:38:02 -04003755static const struct macb_config pc302gem_config = {
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003756 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3757 .dma_burst_length = 16,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003758 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003759 .init = macb_init,
3760};
3761
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02003762static const struct macb_config sama5d2_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003763 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02003764 .dma_burst_length = 16,
3765 .clk_init = macb_clk_init,
3766 .init = macb_init,
3767};
3768
David S. Miller3cef5c52015-03-09 23:38:02 -04003769static const struct macb_config sama5d3_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003770 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
vishnuvardhan233a1582017-07-05 17:36:16 +02003771 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003772 .dma_burst_length = 16,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003773 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003774 .init = macb_init,
vishnuvardhan233a1582017-07-05 17:36:16 +02003775 .jumbo_max_len = 10240,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003776};
3777
David S. Miller3cef5c52015-03-09 23:38:02 -04003778static const struct macb_config sama5d4_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003779 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003780 .dma_burst_length = 4,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003781 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003782 .init = macb_init,
3783};
3784
David S. Miller3cef5c52015-03-09 23:38:02 -04003785static const struct macb_config emac_config = {
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003786 .clk_init = at91ether_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003787 .init = at91ether_init,
3788};
3789
Neil Armstronge611b5b2016-01-05 14:39:17 +01003790static const struct macb_config np4_config = {
3791 .caps = MACB_CAPS_USRIO_DISABLED,
3792 .clk_init = macb_clk_init,
3793 .init = macb_init,
3794};
David S. Miller36583eb2015-05-23 01:22:35 -04003795
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303796static const struct macb_config zynqmp_config = {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003797 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3798 MACB_CAPS_JUMBO |
3799 MACB_CAPS_GEM_HAS_PTP,
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303800 .dma_burst_length = 16,
3801 .clk_init = macb_clk_init,
3802 .init = macb_init,
Harini Katakam98b5a0f42015-05-06 22:27:17 +05303803 .jumbo_max_len = 10240,
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303804};
3805
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05003806static const struct macb_config zynq_config = {
Punnaiah Choudary Kalluri7baaa902015-07-06 10:02:53 +05303807 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05003808 .dma_burst_length = 16,
3809 .clk_init = macb_clk_init,
3810 .init = macb_init,
3811};
3812
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003813static const struct of_device_id macb_dt_ids[] = {
3814 { .compatible = "cdns,at32ap7000-macb" },
3815 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3816 { .compatible = "cdns,macb" },
Neil Armstronge611b5b2016-01-05 14:39:17 +01003817 { .compatible = "cdns,np4-macb", .data = &np4_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003818 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3819 { .compatible = "cdns,gem", .data = &pc302gem_config },
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02003820 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003821 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3822 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3823 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3824 { .compatible = "cdns,emac", .data = &emac_config },
Harini Katakam7b61f9c2015-05-06 22:27:16 +05303825 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05003826 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003827 { /* sentinel */ }
3828};
3829MODULE_DEVICE_TABLE(of, macb_dt_ids);
3830#endif /* CONFIG_OF */
3831
Bartosz Folta83a77e92016-12-14 06:39:15 +00003832static const struct macb_config default_gem_config = {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003833 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3834 MACB_CAPS_JUMBO |
3835 MACB_CAPS_GEM_HAS_PTP,
Bartosz Folta83a77e92016-12-14 06:39:15 +00003836 .dma_burst_length = 16,
3837 .clk_init = macb_clk_init,
3838 .init = macb_init,
3839 .jumbo_max_len = 10240,
3840};
3841
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003842static int macb_probe(struct platform_device *pdev)
3843{
Bartosz Folta83a77e92016-12-14 06:39:15 +00003844 const struct macb_config *macb_config = &default_gem_config;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003845 int (*clk_init)(struct platform_device *, struct clk **,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303846 struct clk **, struct clk **, struct clk **)
Bartosz Folta83a77e92016-12-14 06:39:15 +00003847 = macb_config->clk_init;
3848 int (*init)(struct platform_device *) = macb_config->init;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003849 struct device_node *np = pdev->dev.of_node;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303850 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003851 unsigned int queue_mask, num_queues;
3852 struct macb_platform_data *pdata;
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003853 bool native_io;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003854 struct phy_device *phydev;
3855 struct net_device *dev;
3856 struct resource *regs;
3857 void __iomem *mem;
3858 const char *mac;
3859 struct macb *bp;
3860 int err;
3861
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003862 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3863 mem = devm_ioremap_resource(&pdev->dev, regs);
3864 if (IS_ERR(mem))
3865 return PTR_ERR(mem);
3866
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003867 if (np) {
3868 const struct of_device_id *match;
3869
3870 match = of_match_node(macb_dt_ids, np);
3871 if (match && match->data) {
3872 macb_config = match->data;
3873 clk_init = macb_config->clk_init;
3874 init = macb_config->init;
3875 }
3876 }
3877
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303878 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003879 if (err)
3880 return err;
3881
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003882 native_io = hw_is_native_io(mem);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003883
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003884 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003885 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003886 if (!dev) {
3887 err = -ENOMEM;
3888 goto err_disable_clocks;
3889 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003890
3891 dev->base_addr = regs->start;
3892
3893 SET_NETDEV_DEV(dev, &pdev->dev);
3894
3895 bp = netdev_priv(dev);
3896 bp->pdev = pdev;
3897 bp->dev = dev;
3898 bp->regs = mem;
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003899 bp->native_io = native_io;
3900 if (native_io) {
David S. Miller7a6e0702015-07-27 14:24:48 -07003901 bp->macb_reg_readl = hw_readl_native;
3902 bp->macb_reg_writel = hw_writel_native;
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003903 } else {
David S. Miller7a6e0702015-07-27 14:24:48 -07003904 bp->macb_reg_readl = hw_readl;
3905 bp->macb_reg_writel = hw_writel;
Andy Shevchenkof2ce8a92015-07-24 21:23:59 +03003906 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003907 bp->num_queues = num_queues;
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003908 bp->queue_mask = queue_mask;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003909 if (macb_config)
3910 bp->dma_burst_length = macb_config->dma_burst_length;
3911 bp->pclk = pclk;
3912 bp->hclk = hclk;
3913 bp->tx_clk = tx_clk;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303914 bp->rx_clk = rx_clk;
Andy Shevchenkof36dbe62015-07-24 21:24:00 +03003915 if (macb_config)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05303916 bp->jumbo_max_len = macb_config->jumbo_max_len;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05303917
Sergio Prado3e2a5e12016-02-09 12:07:16 -02003918 bp->wol = 0;
Sergio Prado7c4a1d02016-02-16 21:10:45 -02003919 if (of_get_property(np, "magic-packet", NULL))
Sergio Prado3e2a5e12016-02-09 12:07:16 -02003920 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3921 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3922
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003923 spin_lock_init(&bp->lock);
3924
Nicolas Ferread783472015-03-31 15:02:02 +02003925 /* setup capabilities */
Nicolas Ferref6970502015-03-31 15:02:01 +02003926 macb_configure_caps(bp, macb_config);
3927
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003928#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3929 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3930 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3931 bp->hw_dma_cap |= HW_DMA_CAP_64B;
3932 }
3933#endif
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003934 platform_set_drvdata(pdev, dev);
3935
3936 dev->irq = platform_get_irq(pdev, 0);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003937 if (dev->irq < 0) {
3938 err = dev->irq;
Wei Yongjunb22ae0b2016-08-12 15:43:54 +00003939 goto err_out_free_netdev;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003940 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003941
Jarod Wilson44770e12016-10-17 15:54:17 -04003942 /* MTU range: 68 - 1500 or 10240 */
3943 dev->min_mtu = GEM_MTU_MIN_SIZE;
3944 if (bp->caps & MACB_CAPS_JUMBO)
3945 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3946 else
3947 dev->max_mtu = ETH_DATA_LEN;
3948
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003949 mac = of_get_mac_address(np);
Mike Looijmansaa076e32018-03-29 07:29:49 +02003950 if (mac) {
Moritz Fischereefb52d2016-03-29 19:11:14 -07003951 ether_addr_copy(bp->dev->dev_addr, mac);
Mike Looijmansaa076e32018-03-29 07:29:49 +02003952 } else {
3953 err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
3954 if (err) {
3955 if (err == -EPROBE_DEFER)
3956 goto err_out_free_netdev;
3957 macb_get_hwaddr(bp);
3958 }
3959 }
frederic RODO6c36a702007-07-12 19:07:24 +02003960
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003961 err = of_get_phy_mode(np);
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01003962 if (err < 0) {
Jingoo Hanc607a0d2013-08-30 14:12:21 +09003963 pdata = dev_get_platdata(&pdev->dev);
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01003964 if (pdata && pdata->is_rmii)
3965 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3966 else
3967 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3968 } else {
3969 bp->phy_interface = err;
3970 }
3971
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003972 /* IP specific init */
3973 err = init(pdev);
3974 if (err)
3975 goto err_out_free_netdev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003976
Florian Fainellicf669662016-05-02 18:38:45 -07003977 err = macb_mii_init(bp);
3978 if (err)
3979 goto err_out_free_netdev;
3980
Philippe Reynes0a912812016-06-22 00:32:35 +02003981 phydev = dev->phydev;
Florian Fainellicf669662016-05-02 18:38:45 -07003982
3983 netif_carrier_off(dev);
3984
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003985 err = register_netdev(dev);
3986 if (err) {
3987 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
Florian Fainellicf669662016-05-02 18:38:45 -07003988 goto err_out_unregister_mdio;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003989 }
3990
Harini Katakam032dc412018-01-27 12:09:01 +05303991 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
3992 (unsigned long)bp);
3993
Florian Fainellicf669662016-05-02 18:38:45 -07003994 phy_attached_info(phydev);
Nicolas Ferre03fc4722012-07-03 23:14:13 +00003995
Bo Shen58798232014-09-13 01:57:49 +02003996 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3997 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3998 dev->base_addr, dev->irq, dev->dev_addr);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003999
4000 return 0;
4001
Florian Fainellicf669662016-05-02 18:38:45 -07004002err_out_unregister_mdio:
Philippe Reynes0a912812016-06-22 00:32:35 +02004003 phy_disconnect(dev->phydev);
Florian Fainellicf669662016-05-02 18:38:45 -07004004 mdiobus_unregister(bp->mii_bus);
Michael Grzeschik66ee6a02017-11-08 09:56:35 +01004005 of_node_put(bp->phy_node);
Michael Grzeschik9ce98142017-11-08 09:56:34 +01004006 if (np && of_phy_is_fixed_link(np))
4007 of_phy_deregister_fixed_link(np);
Florian Fainellicf669662016-05-02 18:38:45 -07004008 mdiobus_free(bp->mii_bus);
4009
Cyrille Pitchencf250de2014-12-15 15:13:32 +01004010err_out_free_netdev:
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004011 free_netdev(dev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004012
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004013err_disable_clocks:
4014 clk_disable_unprepare(tx_clk);
4015 clk_disable_unprepare(hclk);
4016 clk_disable_unprepare(pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304017 clk_disable_unprepare(rx_clk);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004018
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004019 return err;
4020}
4021
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004022static int macb_remove(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004023{
4024 struct net_device *dev;
4025 struct macb *bp;
Michael Grzeschik9ce98142017-11-08 09:56:34 +01004026 struct device_node *np = pdev->dev.of_node;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004027
4028 dev = platform_get_drvdata(pdev);
4029
4030 if (dev) {
4031 bp = netdev_priv(dev);
Philippe Reynes0a912812016-06-22 00:32:35 +02004032 if (dev->phydev)
4033 phy_disconnect(dev->phydev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07004034 mdiobus_unregister(bp->mii_bus);
Michael Grzeschik9ce98142017-11-08 09:56:34 +01004035 if (np && of_phy_is_fixed_link(np))
4036 of_phy_deregister_fixed_link(np);
Nathan Sullivanfa6114d2016-10-07 10:13:22 -05004037 dev->phydev = NULL;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07004038 mdiobus_free(bp->mii_bus);
Gregory CLEMENT5833e052015-12-11 11:34:53 +01004039
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004040 unregister_netdev(dev);
Cyrille Pitchen93b31f42015-03-07 07:23:31 +01004041 clk_disable_unprepare(bp->tx_clk);
Steffen Trumtrarace58012013-03-27 23:07:07 +00004042 clk_disable_unprepare(bp->hclk);
Steffen Trumtrarace58012013-03-27 23:07:07 +00004043 clk_disable_unprepare(bp->pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304044 clk_disable_unprepare(bp->rx_clk);
Michael Grzeschikdacdbb42017-06-23 16:54:10 +02004045 of_node_put(bp->phy_node);
Cyrille Pitchene965be72014-12-15 15:13:31 +01004046 free_netdev(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004047 }
4048
4049 return 0;
4050}
4051
Michal Simekd23823d2015-01-23 09:36:03 +01004052static int __maybe_unused macb_suspend(struct device *dev)
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004053{
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004054 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004055 struct net_device *netdev = platform_get_drvdata(pdev);
4056 struct macb *bp = netdev_priv(netdev);
4057
Nicolas Ferre03fc4722012-07-03 23:14:13 +00004058 netif_carrier_off(netdev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004059 netif_device_detach(netdev);
4060
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004061 if (bp->wol & MACB_WOL_ENABLED) {
4062 macb_writel(bp, IER, MACB_BIT(WOL));
4063 macb_writel(bp, WOL, MACB_BIT(MAG));
4064 enable_irq_wake(bp->queues[0].irq);
4065 } else {
4066 clk_disable_unprepare(bp->tx_clk);
4067 clk_disable_unprepare(bp->hclk);
4068 clk_disable_unprepare(bp->pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304069 clk_disable_unprepare(bp->rx_clk);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004070 }
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004071
4072 return 0;
4073}
4074
Michal Simekd23823d2015-01-23 09:36:03 +01004075static int __maybe_unused macb_resume(struct device *dev)
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004076{
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004077 struct platform_device *pdev = to_platform_device(dev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004078 struct net_device *netdev = platform_get_drvdata(pdev);
4079 struct macb *bp = netdev_priv(netdev);
4080
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004081 if (bp->wol & MACB_WOL_ENABLED) {
4082 macb_writel(bp, IDR, MACB_BIT(WOL));
4083 macb_writel(bp, WOL, 0);
4084 disable_irq_wake(bp->queues[0].irq);
4085 } else {
4086 clk_prepare_enable(bp->pclk);
4087 clk_prepare_enable(bp->hclk);
4088 clk_prepare_enable(bp->tx_clk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304089 clk_prepare_enable(bp->rx_clk);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004090 }
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004091
4092 netif_device_attach(netdev);
4093
4094 return 0;
4095}
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004096
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004097static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
4098
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004099static struct platform_driver macb_driver = {
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004100 .probe = macb_probe,
4101 .remove = macb_remove,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004102 .driver = {
4103 .name = "macb",
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01004104 .of_match_table = of_match_ptr(macb_dt_ids),
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004105 .pm = &macb_pm_ops,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004106 },
4107};
4108
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004109module_platform_driver(macb_driver);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004110
4111MODULE_LICENSE("GPL");
Jamie Ilesf75ba502011-11-08 10:12:32 +00004112MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02004113MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Kay Sievers72abb462008-04-18 13:50:44 -07004114MODULE_ALIAS("platform:macb");