blob: fcbf1255ae5a919ad5fb052d9bcbb7045534cc9e [file] [log] [blame]
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001/*
2 * Broadcom GENET (Gigabit Ethernet) controller driver
3 *
4 * Copyright (c) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
Florian Fainelli1c1008c2014-02-13 16:08:47 -08009 */
10
11#define pr_fmt(fmt) "bcmgenet: " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/types.h>
17#include <linux/fcntl.h>
18#include <linux/interrupt.h>
19#include <linux/string.h>
20#include <linux/if_ether.h>
21#include <linux/init.h>
22#include <linux/errno.h>
23#include <linux/delay.h>
24#include <linux/platform_device.h>
25#include <linux/dma-mapping.h>
26#include <linux/pm.h>
27#include <linux/clk.h>
Florian Fainelli1c1008c2014-02-13 16:08:47 -080028#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_irq.h>
31#include <linux/of_net.h>
32#include <linux/of_platform.h>
33#include <net/arp.h>
34
35#include <linux/mii.h>
36#include <linux/ethtool.h>
37#include <linux/netdevice.h>
38#include <linux/inetdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/skbuff.h>
41#include <linux/in.h>
42#include <linux/ip.h>
43#include <linux/ipv6.h>
44#include <linux/phy.h>
45
46#include <asm/unaligned.h>
47
48#include "bcmgenet.h"
49
50/* Maximum number of hardware queues, downsized if needed */
51#define GENET_MAX_MQ_CNT 4
52
53/* Default highest priority queue for multi queue support */
54#define GENET_Q0_PRIORITY 0
55
56#define GENET_DEFAULT_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
58
59#define RX_BUF_LENGTH 2048
60#define SKB_ALIGNMENT 32
61
62/* Tx/Rx DMA register offset, skip 256 descriptors */
63#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
65
66#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
68
69#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
71
72static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -070073 void __iomem *d, u32 value)
Florian Fainelli1c1008c2014-02-13 16:08:47 -080074{
75 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
76}
77
78static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -070079 void __iomem *d)
Florian Fainelli1c1008c2014-02-13 16:08:47 -080080{
81 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
82}
83
84static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
85 void __iomem *d,
86 dma_addr_t addr)
87{
88 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
89
90 /* Register writes to GISB bus can take couple hundred nanoseconds
91 * and are done for each packet, save these expensive writes unless
Brian Norris7fc527f2014-07-29 14:34:14 -070092 * the platform is explicitly configured for 64-bits/LPAE.
Florian Fainelli1c1008c2014-02-13 16:08:47 -080093 */
94#ifdef CONFIG_PHYS_ADDR_T_64BIT
95 if (priv->hw_params->flags & GENET_HAS_40BITS)
96 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
97#endif
98}
99
100/* Combined address + length/status setter */
101static inline void dmadesc_set(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700102 void __iomem *d, dma_addr_t addr, u32 val)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800103{
104 dmadesc_set_length_status(priv, d, val);
105 dmadesc_set_addr(priv, d, addr);
106}
107
108static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
109 void __iomem *d)
110{
111 dma_addr_t addr;
112
113 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
114
115 /* Register writes to GISB bus can take couple hundred nanoseconds
116 * and are done for each packet, save these expensive writes unless
Brian Norris7fc527f2014-07-29 14:34:14 -0700117 * the platform is explicitly configured for 64-bits/LPAE.
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800118 */
119#ifdef CONFIG_PHYS_ADDR_T_64BIT
120 if (priv->hw_params->flags & GENET_HAS_40BITS)
121 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
122#endif
123 return addr;
124}
125
126#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
127
128#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
129 NETIF_MSG_LINK)
130
131static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
132{
133 if (GENET_IS_V1(priv))
134 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
135 else
136 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
137}
138
139static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
140{
141 if (GENET_IS_V1(priv))
142 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
143 else
144 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
145}
146
147/* These macros are defined to deal with register map change
148 * between GENET1.1 and GENET2. Only those currently being used
149 * by driver are defined.
150 */
151static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
152{
153 if (GENET_IS_V1(priv))
154 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
155 else
156 return __raw_readl(priv->base +
157 priv->hw_params->tbuf_offset + TBUF_CTRL);
158}
159
160static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
161{
162 if (GENET_IS_V1(priv))
163 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
164 else
165 __raw_writel(val, priv->base +
166 priv->hw_params->tbuf_offset + TBUF_CTRL);
167}
168
169static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
170{
171 if (GENET_IS_V1(priv))
172 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
173 else
174 return __raw_readl(priv->base +
175 priv->hw_params->tbuf_offset + TBUF_BP_MC);
176}
177
178static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
179{
180 if (GENET_IS_V1(priv))
181 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
182 else
183 __raw_writel(val, priv->base +
184 priv->hw_params->tbuf_offset + TBUF_BP_MC);
185}
186
187/* RX/TX DMA register accessors */
188enum dma_reg {
189 DMA_RING_CFG = 0,
190 DMA_CTRL,
191 DMA_STATUS,
192 DMA_SCB_BURST_SIZE,
193 DMA_ARB_CTRL,
Petri Gynther37742162014-10-07 09:30:01 -0700194 DMA_PRIORITY_0,
195 DMA_PRIORITY_1,
196 DMA_PRIORITY_2,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800197};
198
199static const u8 bcmgenet_dma_regs_v3plus[] = {
200 [DMA_RING_CFG] = 0x00,
201 [DMA_CTRL] = 0x04,
202 [DMA_STATUS] = 0x08,
203 [DMA_SCB_BURST_SIZE] = 0x0C,
204 [DMA_ARB_CTRL] = 0x2C,
Petri Gynther37742162014-10-07 09:30:01 -0700205 [DMA_PRIORITY_0] = 0x30,
206 [DMA_PRIORITY_1] = 0x34,
207 [DMA_PRIORITY_2] = 0x38,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800208};
209
210static const u8 bcmgenet_dma_regs_v2[] = {
211 [DMA_RING_CFG] = 0x00,
212 [DMA_CTRL] = 0x04,
213 [DMA_STATUS] = 0x08,
214 [DMA_SCB_BURST_SIZE] = 0x0C,
215 [DMA_ARB_CTRL] = 0x30,
Petri Gynther37742162014-10-07 09:30:01 -0700216 [DMA_PRIORITY_0] = 0x34,
217 [DMA_PRIORITY_1] = 0x38,
218 [DMA_PRIORITY_2] = 0x3C,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800219};
220
221static const u8 bcmgenet_dma_regs_v1[] = {
222 [DMA_CTRL] = 0x00,
223 [DMA_STATUS] = 0x04,
224 [DMA_SCB_BURST_SIZE] = 0x0C,
225 [DMA_ARB_CTRL] = 0x30,
Petri Gynther37742162014-10-07 09:30:01 -0700226 [DMA_PRIORITY_0] = 0x34,
227 [DMA_PRIORITY_1] = 0x38,
228 [DMA_PRIORITY_2] = 0x3C,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800229};
230
231/* Set at runtime once bcmgenet version is known */
232static const u8 *bcmgenet_dma_regs;
233
234static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
235{
236 return netdev_priv(dev_get_drvdata(dev));
237}
238
239static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700240 enum dma_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800241{
242 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
243 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
244}
245
246static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
247 u32 val, enum dma_reg r)
248{
249 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
250 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
251}
252
253static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700254 enum dma_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800255{
256 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
257 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
258}
259
260static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
261 u32 val, enum dma_reg r)
262{
263 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
264 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
265}
266
267/* RDMA/TDMA ring registers and accessors
268 * we merge the common fields and just prefix with T/D the registers
269 * having different meaning depending on the direction
270 */
271enum dma_ring_reg {
272 TDMA_READ_PTR = 0,
273 RDMA_WRITE_PTR = TDMA_READ_PTR,
274 TDMA_READ_PTR_HI,
275 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
276 TDMA_CONS_INDEX,
277 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
278 TDMA_PROD_INDEX,
279 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
280 DMA_RING_BUF_SIZE,
281 DMA_START_ADDR,
282 DMA_START_ADDR_HI,
283 DMA_END_ADDR,
284 DMA_END_ADDR_HI,
285 DMA_MBUF_DONE_THRESH,
286 TDMA_FLOW_PERIOD,
287 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
288 TDMA_WRITE_PTR,
289 RDMA_READ_PTR = TDMA_WRITE_PTR,
290 TDMA_WRITE_PTR_HI,
291 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
292};
293
294/* GENET v4 supports 40-bits pointer addressing
295 * for obvious reasons the LO and HI word parts
296 * are contiguous, but this offsets the other
297 * registers.
298 */
299static const u8 genet_dma_ring_regs_v4[] = {
300 [TDMA_READ_PTR] = 0x00,
301 [TDMA_READ_PTR_HI] = 0x04,
302 [TDMA_CONS_INDEX] = 0x08,
303 [TDMA_PROD_INDEX] = 0x0C,
304 [DMA_RING_BUF_SIZE] = 0x10,
305 [DMA_START_ADDR] = 0x14,
306 [DMA_START_ADDR_HI] = 0x18,
307 [DMA_END_ADDR] = 0x1C,
308 [DMA_END_ADDR_HI] = 0x20,
309 [DMA_MBUF_DONE_THRESH] = 0x24,
310 [TDMA_FLOW_PERIOD] = 0x28,
311 [TDMA_WRITE_PTR] = 0x2C,
312 [TDMA_WRITE_PTR_HI] = 0x30,
313};
314
315static const u8 genet_dma_ring_regs_v123[] = {
316 [TDMA_READ_PTR] = 0x00,
317 [TDMA_CONS_INDEX] = 0x04,
318 [TDMA_PROD_INDEX] = 0x08,
319 [DMA_RING_BUF_SIZE] = 0x0C,
320 [DMA_START_ADDR] = 0x10,
321 [DMA_END_ADDR] = 0x14,
322 [DMA_MBUF_DONE_THRESH] = 0x18,
323 [TDMA_FLOW_PERIOD] = 0x1C,
324 [TDMA_WRITE_PTR] = 0x20,
325};
326
327/* Set at runtime once GENET version is known */
328static const u8 *genet_dma_ring_regs;
329
330static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700331 unsigned int ring,
332 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800333{
334 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
335 (DMA_RING_SIZE * ring) +
336 genet_dma_ring_regs[r]);
337}
338
339static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700340 unsigned int ring, u32 val,
341 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800342{
343 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
344 (DMA_RING_SIZE * ring) +
345 genet_dma_ring_regs[r]);
346}
347
348static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700349 unsigned int ring,
350 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800351{
352 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
353 (DMA_RING_SIZE * ring) +
354 genet_dma_ring_regs[r]);
355}
356
357static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700358 unsigned int ring, u32 val,
359 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800360{
361 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
362 (DMA_RING_SIZE * ring) +
363 genet_dma_ring_regs[r]);
364}
365
366static int bcmgenet_get_settings(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700367 struct ethtool_cmd *cmd)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800368{
369 struct bcmgenet_priv *priv = netdev_priv(dev);
370
371 if (!netif_running(dev))
372 return -EINVAL;
373
374 if (!priv->phydev)
375 return -ENODEV;
376
377 return phy_ethtool_gset(priv->phydev, cmd);
378}
379
380static int bcmgenet_set_settings(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700381 struct ethtool_cmd *cmd)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800382{
383 struct bcmgenet_priv *priv = netdev_priv(dev);
384
385 if (!netif_running(dev))
386 return -EINVAL;
387
388 if (!priv->phydev)
389 return -ENODEV;
390
391 return phy_ethtool_sset(priv->phydev, cmd);
392}
393
394static int bcmgenet_set_rx_csum(struct net_device *dev,
395 netdev_features_t wanted)
396{
397 struct bcmgenet_priv *priv = netdev_priv(dev);
398 u32 rbuf_chk_ctrl;
399 bool rx_csum_en;
400
401 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
402
403 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
404
405 /* enable rx checksumming */
406 if (rx_csum_en)
407 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
408 else
409 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
410 priv->desc_rxchk_en = rx_csum_en;
Florian Fainelliebe5e3c2014-03-26 21:18:39 -0700411
412 /* If UniMAC forwards CRC, we need to skip over it to get
413 * a valid CHK bit to be set in the per-packet status word
414 */
415 if (rx_csum_en && priv->crc_fwd_en)
416 rbuf_chk_ctrl |= RBUF_SKIP_FCS;
417 else
418 rbuf_chk_ctrl &= ~RBUF_SKIP_FCS;
419
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800420 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
421
422 return 0;
423}
424
425static int bcmgenet_set_tx_csum(struct net_device *dev,
426 netdev_features_t wanted)
427{
428 struct bcmgenet_priv *priv = netdev_priv(dev);
429 bool desc_64b_en;
430 u32 tbuf_ctrl, rbuf_ctrl;
431
432 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
433 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
434
435 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
436
437 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
438 if (desc_64b_en) {
439 tbuf_ctrl |= RBUF_64B_EN;
440 rbuf_ctrl |= RBUF_64B_EN;
441 } else {
442 tbuf_ctrl &= ~RBUF_64B_EN;
443 rbuf_ctrl &= ~RBUF_64B_EN;
444 }
445 priv->desc_64b_en = desc_64b_en;
446
447 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
448 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
449
450 return 0;
451}
452
453static int bcmgenet_set_features(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700454 netdev_features_t features)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800455{
456 netdev_features_t changed = features ^ dev->features;
457 netdev_features_t wanted = dev->wanted_features;
458 int ret = 0;
459
460 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
461 ret = bcmgenet_set_tx_csum(dev, wanted);
462 if (changed & (NETIF_F_RXCSUM))
463 ret = bcmgenet_set_rx_csum(dev, wanted);
464
465 return ret;
466}
467
468static u32 bcmgenet_get_msglevel(struct net_device *dev)
469{
470 struct bcmgenet_priv *priv = netdev_priv(dev);
471
472 return priv->msg_enable;
473}
474
475static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
476{
477 struct bcmgenet_priv *priv = netdev_priv(dev);
478
479 priv->msg_enable = level;
480}
481
482/* standard ethtool support functions. */
483enum bcmgenet_stat_type {
484 BCMGENET_STAT_NETDEV = -1,
485 BCMGENET_STAT_MIB_RX,
486 BCMGENET_STAT_MIB_TX,
487 BCMGENET_STAT_RUNT,
488 BCMGENET_STAT_MISC,
489};
490
491struct bcmgenet_stats {
492 char stat_string[ETH_GSTRING_LEN];
493 int stat_sizeof;
494 int stat_offset;
495 enum bcmgenet_stat_type type;
496 /* reg offset from UMAC base for misc counters */
497 u16 reg_offset;
498};
499
500#define STAT_NETDEV(m) { \
501 .stat_string = __stringify(m), \
502 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
503 .stat_offset = offsetof(struct net_device_stats, m), \
504 .type = BCMGENET_STAT_NETDEV, \
505}
506
507#define STAT_GENET_MIB(str, m, _type) { \
508 .stat_string = str, \
509 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
510 .stat_offset = offsetof(struct bcmgenet_priv, m), \
511 .type = _type, \
512}
513
514#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
515#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
516#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
517
518#define STAT_GENET_MISC(str, m, offset) { \
519 .stat_string = str, \
520 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
521 .stat_offset = offsetof(struct bcmgenet_priv, m), \
522 .type = BCMGENET_STAT_MISC, \
523 .reg_offset = offset, \
524}
525
526
527/* There is a 0xC gap between the end of RX and beginning of TX stats and then
528 * between the end of TX stats and the beginning of the RX RUNT
529 */
530#define BCMGENET_STAT_OFFSET 0xc
531
532/* Hardware counters must be kept in sync because the order/offset
533 * is important here (order in structure declaration = order in hardware)
534 */
535static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
536 /* general stats */
537 STAT_NETDEV(rx_packets),
538 STAT_NETDEV(tx_packets),
539 STAT_NETDEV(rx_bytes),
540 STAT_NETDEV(tx_bytes),
541 STAT_NETDEV(rx_errors),
542 STAT_NETDEV(tx_errors),
543 STAT_NETDEV(rx_dropped),
544 STAT_NETDEV(tx_dropped),
545 STAT_NETDEV(multicast),
546 /* UniMAC RSV counters */
547 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
548 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
549 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
550 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
551 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
552 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
553 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
554 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
555 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
556 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
557 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
558 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
559 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
560 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
561 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
562 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
563 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
564 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
565 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
566 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
567 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
568 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
569 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
570 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
571 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
572 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
573 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
574 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
575 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
576 /* UniMAC TSV counters */
577 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
578 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
579 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
580 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
581 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
582 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
583 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
584 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
585 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
586 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
587 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
588 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
589 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
590 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
591 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
592 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
593 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
594 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
595 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
596 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
597 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
598 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
599 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
600 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
601 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
602 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
603 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
604 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
605 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
606 /* UniMAC RUNT counters */
607 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
608 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
609 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
610 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
611 /* Misc UniMAC counters */
612 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
613 UMAC_RBUF_OVFL_CNT),
614 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
615 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
Florian Fainelli44c8bc32014-11-19 10:29:56 -0800616 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
617 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
618 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800619};
620
621#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
622
623static void bcmgenet_get_drvinfo(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700624 struct ethtool_drvinfo *info)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800625{
626 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
627 strlcpy(info->version, "v2.0", sizeof(info->version));
628 info->n_stats = BCMGENET_STATS_LEN;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800629}
630
631static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
632{
633 switch (string_set) {
634 case ETH_SS_STATS:
635 return BCMGENET_STATS_LEN;
636 default:
637 return -EOPNOTSUPP;
638 }
639}
640
Florian Fainellic91b7f62014-07-23 10:42:12 -0700641static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
642 u8 *data)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800643{
644 int i;
645
646 switch (stringset) {
647 case ETH_SS_STATS:
648 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
649 memcpy(data + i * ETH_GSTRING_LEN,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700650 bcmgenet_gstrings_stats[i].stat_string,
651 ETH_GSTRING_LEN);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800652 }
653 break;
654 }
655}
656
657static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
658{
659 int i, j = 0;
660
661 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
662 const struct bcmgenet_stats *s;
663 u8 offset = 0;
664 u32 val = 0;
665 char *p;
666
667 s = &bcmgenet_gstrings_stats[i];
668 switch (s->type) {
669 case BCMGENET_STAT_NETDEV:
670 continue;
671 case BCMGENET_STAT_MIB_RX:
672 case BCMGENET_STAT_MIB_TX:
673 case BCMGENET_STAT_RUNT:
674 if (s->type != BCMGENET_STAT_MIB_RX)
675 offset = BCMGENET_STAT_OFFSET;
Florian Fainellic91b7f62014-07-23 10:42:12 -0700676 val = bcmgenet_umac_readl(priv,
677 UMAC_MIB_START + j + offset);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800678 break;
679 case BCMGENET_STAT_MISC:
680 val = bcmgenet_umac_readl(priv, s->reg_offset);
681 /* clear if overflowed */
682 if (val == ~0)
683 bcmgenet_umac_writel(priv, 0, s->reg_offset);
684 break;
685 }
686
687 j += s->stat_sizeof;
688 p = (char *)priv + s->stat_offset;
689 *(u32 *)p = val;
690 }
691}
692
693static void bcmgenet_get_ethtool_stats(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700694 struct ethtool_stats *stats,
695 u64 *data)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800696{
697 struct bcmgenet_priv *priv = netdev_priv(dev);
698 int i;
699
700 if (netif_running(dev))
701 bcmgenet_update_mib_counters(priv);
702
703 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
704 const struct bcmgenet_stats *s;
705 char *p;
706
707 s = &bcmgenet_gstrings_stats[i];
708 if (s->type == BCMGENET_STAT_NETDEV)
709 p = (char *)&dev->stats;
710 else
711 p = (char *)priv;
712 p += s->stat_offset;
713 data[i] = *(u32 *)p;
714 }
715}
716
717/* standard ethtool support functions. */
718static struct ethtool_ops bcmgenet_ethtool_ops = {
719 .get_strings = bcmgenet_get_strings,
720 .get_sset_count = bcmgenet_get_sset_count,
721 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
722 .get_settings = bcmgenet_get_settings,
723 .set_settings = bcmgenet_set_settings,
724 .get_drvinfo = bcmgenet_get_drvinfo,
725 .get_link = ethtool_op_get_link,
726 .get_msglevel = bcmgenet_get_msglevel,
727 .set_msglevel = bcmgenet_set_msglevel,
Florian Fainelli06ba8372014-07-21 15:29:29 -0700728 .get_wol = bcmgenet_get_wol,
729 .set_wol = bcmgenet_set_wol,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800730};
731
732/* Power down the unimac, based on mode. */
733static void bcmgenet_power_down(struct bcmgenet_priv *priv,
734 enum bcmgenet_power_mode mode)
735{
736 u32 reg;
737
738 switch (mode) {
739 case GENET_POWER_CABLE_SENSE:
Florian Fainelli80d8e962014-02-24 16:56:11 -0800740 phy_detach(priv->phydev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800741 break;
742
Florian Fainellic3ae64a2014-07-21 15:29:25 -0700743 case GENET_POWER_WOL_MAGIC:
744 bcmgenet_wol_power_down_cfg(priv, mode);
745 break;
746
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800747 case GENET_POWER_PASSIVE:
748 /* Power down LED */
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800749 if (priv->hw_params->flags & GENET_HAS_EXT) {
750 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
751 reg |= (EXT_PWR_DOWN_PHY |
752 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
753 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
754 }
755 break;
756 default:
757 break;
758 }
759}
760
761static void bcmgenet_power_up(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700762 enum bcmgenet_power_mode mode)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800763{
764 u32 reg;
765
766 if (!(priv->hw_params->flags & GENET_HAS_EXT))
767 return;
768
769 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
770
771 switch (mode) {
772 case GENET_POWER_PASSIVE:
773 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
774 EXT_PWR_DOWN_BIAS);
775 /* fallthrough */
776 case GENET_POWER_CABLE_SENSE:
777 /* enable APD */
778 reg |= EXT_PWR_DN_EN_LD;
779 break;
Florian Fainellic3ae64a2014-07-21 15:29:25 -0700780 case GENET_POWER_WOL_MAGIC:
781 bcmgenet_wol_power_up_cfg(priv, mode);
782 return;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800783 default:
784 break;
785 }
786
787 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
Florian Fainellicc013fb2014-08-11 14:50:43 -0700788
789 if (mode == GENET_POWER_PASSIVE)
790 bcmgenet_mii_reset(priv->dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800791}
792
793/* ioctl handle special commands that are not present in ethtool. */
794static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
795{
796 struct bcmgenet_priv *priv = netdev_priv(dev);
797 int val = 0;
798
799 if (!netif_running(dev))
800 return -EINVAL;
801
802 switch (cmd) {
803 case SIOCGMIIPHY:
804 case SIOCGMIIREG:
805 case SIOCSMIIREG:
806 if (!priv->phydev)
807 val = -ENODEV;
808 else
809 val = phy_mii_ioctl(priv->phydev, rq, cmd);
810 break;
811
812 default:
813 val = -EINVAL;
814 break;
815 }
816
817 return val;
818}
819
820static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
821 struct bcmgenet_tx_ring *ring)
822{
823 struct enet_cb *tx_cb_ptr;
824
825 tx_cb_ptr = ring->cbs;
826 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
827 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
828 /* Advancing local write pointer */
829 if (ring->write_ptr == ring->end_ptr)
830 ring->write_ptr = ring->cb_ptr;
831 else
832 ring->write_ptr++;
833
834 return tx_cb_ptr;
835}
836
837/* Simple helper to free a control block's resources */
838static void bcmgenet_free_cb(struct enet_cb *cb)
839{
840 dev_kfree_skb_any(cb->skb);
841 cb->skb = NULL;
842 dma_unmap_addr_set(cb, dma_addr, 0);
843}
844
845static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
846 struct bcmgenet_tx_ring *ring)
847{
848 bcmgenet_intrl2_0_writel(priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700849 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
850 INTRL2_CPU_MASK_SET);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800851}
852
853static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
854 struct bcmgenet_tx_ring *ring)
855{
856 bcmgenet_intrl2_0_writel(priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700857 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
858 INTRL2_CPU_MASK_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800859}
860
861static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700862 struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800863{
Florian Fainellic91b7f62014-07-23 10:42:12 -0700864 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
865 INTRL2_CPU_MASK_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800866 priv->int1_mask &= ~(1 << ring->index);
867}
868
869static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
870 struct bcmgenet_tx_ring *ring)
871{
Florian Fainellic91b7f62014-07-23 10:42:12 -0700872 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
873 INTRL2_CPU_MASK_SET);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800874 priv->int1_mask |= (1 << ring->index);
875}
876
877/* Unlocked version of the reclaim routine */
878static void __bcmgenet_tx_reclaim(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700879 struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800880{
881 struct bcmgenet_priv *priv = netdev_priv(dev);
882 int last_tx_cn, last_c_index, num_tx_bds;
883 struct enet_cb *tx_cb_ptr;
Florian Fainellib2cde2c2014-03-20 10:53:23 -0700884 struct netdev_queue *txq;
Florian Fainelli478a0102014-09-22 11:54:42 -0700885 unsigned int bds_compl;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800886 unsigned int c_index;
887
Brian Norris7fc527f2014-07-29 14:34:14 -0700888 /* Compute how many buffers are transmitted since last xmit call */
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800889 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
Florian Fainellib2cde2c2014-03-20 10:53:23 -0700890 txq = netdev_get_tx_queue(dev, ring->queue);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800891
892 last_c_index = ring->c_index;
893 num_tx_bds = ring->size;
894
895 c_index &= (num_tx_bds - 1);
896
897 if (c_index >= last_c_index)
898 last_tx_cn = c_index - last_c_index;
899 else
900 last_tx_cn = num_tx_bds - last_c_index + c_index;
901
902 netif_dbg(priv, tx_done, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700903 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
904 __func__, ring->index,
905 c_index, last_tx_cn, last_c_index);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800906
907 /* Reclaim transmitted buffers */
908 while (last_tx_cn-- > 0) {
909 tx_cb_ptr = ring->cbs + last_c_index;
Florian Fainelli478a0102014-09-22 11:54:42 -0700910 bds_compl = 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800911 if (tx_cb_ptr->skb) {
Florian Fainelli478a0102014-09-22 11:54:42 -0700912 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800913 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
914 dma_unmap_single(&dev->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700915 dma_unmap_addr(tx_cb_ptr, dma_addr),
916 tx_cb_ptr->skb->len,
917 DMA_TO_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800918 bcmgenet_free_cb(tx_cb_ptr);
919 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
920 dev->stats.tx_bytes +=
921 dma_unmap_len(tx_cb_ptr, dma_len);
922 dma_unmap_page(&dev->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700923 dma_unmap_addr(tx_cb_ptr, dma_addr),
924 dma_unmap_len(tx_cb_ptr, dma_len),
925 DMA_TO_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800926 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
927 }
928 dev->stats.tx_packets++;
Florian Fainelli478a0102014-09-22 11:54:42 -0700929 ring->free_bds += bds_compl;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800930
931 last_c_index++;
932 last_c_index &= (num_tx_bds - 1);
933 }
934
935 if (ring->free_bds > (MAX_SKB_FRAGS + 1))
936 ring->int_disable(priv, ring);
937
Florian Fainellib2cde2c2014-03-20 10:53:23 -0700938 if (netif_tx_queue_stopped(txq))
939 netif_tx_wake_queue(txq);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800940
941 ring->c_index = c_index;
942}
943
944static void bcmgenet_tx_reclaim(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700945 struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800946{
947 unsigned long flags;
948
949 spin_lock_irqsave(&ring->lock, flags);
950 __bcmgenet_tx_reclaim(dev, ring);
951 spin_unlock_irqrestore(&ring->lock, flags);
952}
953
954static void bcmgenet_tx_reclaim_all(struct net_device *dev)
955{
956 struct bcmgenet_priv *priv = netdev_priv(dev);
957 int i;
958
959 if (netif_is_multiqueue(dev)) {
960 for (i = 0; i < priv->hw_params->tx_queues; i++)
961 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
962 }
963
964 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
965}
966
967/* Transmits a single SKB (either head of a fragment or a single SKB)
968 * caller must hold priv->lock
969 */
970static int bcmgenet_xmit_single(struct net_device *dev,
971 struct sk_buff *skb,
972 u16 dma_desc_flags,
973 struct bcmgenet_tx_ring *ring)
974{
975 struct bcmgenet_priv *priv = netdev_priv(dev);
976 struct device *kdev = &priv->pdev->dev;
977 struct enet_cb *tx_cb_ptr;
978 unsigned int skb_len;
979 dma_addr_t mapping;
980 u32 length_status;
981 int ret;
982
983 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
984
985 if (unlikely(!tx_cb_ptr))
986 BUG();
987
988 tx_cb_ptr->skb = skb;
989
990 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
991
992 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
993 ret = dma_mapping_error(kdev, mapping);
994 if (ret) {
Florian Fainelli44c8bc32014-11-19 10:29:56 -0800995 priv->mib.tx_dma_failed++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800996 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
997 dev_kfree_skb(skb);
998 return ret;
999 }
1000
1001 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1002 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
1003 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1004 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
1005 DMA_TX_APPEND_CRC;
1006
1007 if (skb->ip_summed == CHECKSUM_PARTIAL)
1008 length_status |= DMA_TX_DO_CSUM;
1009
1010 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
1011
1012 /* Decrement total BD count and advance our write pointer */
1013 ring->free_bds -= 1;
1014 ring->prod_index += 1;
1015 ring->prod_index &= DMA_P_INDEX_MASK;
1016
1017 return 0;
1018}
1019
Brian Norris7fc527f2014-07-29 14:34:14 -07001020/* Transmit a SKB fragment */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001021static int bcmgenet_xmit_frag(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001022 skb_frag_t *frag,
1023 u16 dma_desc_flags,
1024 struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001025{
1026 struct bcmgenet_priv *priv = netdev_priv(dev);
1027 struct device *kdev = &priv->pdev->dev;
1028 struct enet_cb *tx_cb_ptr;
1029 dma_addr_t mapping;
1030 int ret;
1031
1032 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1033
1034 if (unlikely(!tx_cb_ptr))
1035 BUG();
1036 tx_cb_ptr->skb = NULL;
1037
1038 mapping = skb_frag_dma_map(kdev, frag, 0,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001039 skb_frag_size(frag), DMA_TO_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001040 ret = dma_mapping_error(kdev, mapping);
1041 if (ret) {
Florian Fainelli44c8bc32014-11-19 10:29:56 -08001042 priv->mib.tx_dma_failed++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001043 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
Florian Fainellic91b7f62014-07-23 10:42:12 -07001044 __func__);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001045 return ret;
1046 }
1047
1048 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1049 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1050
1051 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001052 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1053 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001054
1055
1056 ring->free_bds -= 1;
1057 ring->prod_index += 1;
1058 ring->prod_index &= DMA_P_INDEX_MASK;
1059
1060 return 0;
1061}
1062
1063/* Reallocate the SKB to put enough headroom in front of it and insert
1064 * the transmit checksum offsets in the descriptors
1065 */
Petri Gyntherbc233332014-10-01 11:30:01 -07001066static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev,
1067 struct sk_buff *skb)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001068{
1069 struct status_64 *status = NULL;
1070 struct sk_buff *new_skb;
1071 u16 offset;
1072 u8 ip_proto;
1073 u16 ip_ver;
1074 u32 tx_csum_info;
1075
1076 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1077 /* If 64 byte status block enabled, must make sure skb has
1078 * enough headroom for us to insert 64B status block.
1079 */
1080 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1081 dev_kfree_skb(skb);
1082 if (!new_skb) {
1083 dev->stats.tx_errors++;
1084 dev->stats.tx_dropped++;
Petri Gyntherbc233332014-10-01 11:30:01 -07001085 return NULL;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001086 }
1087 skb = new_skb;
1088 }
1089
1090 skb_push(skb, sizeof(*status));
1091 status = (struct status_64 *)skb->data;
1092
1093 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1094 ip_ver = htons(skb->protocol);
1095 switch (ip_ver) {
1096 case ETH_P_IP:
1097 ip_proto = ip_hdr(skb)->protocol;
1098 break;
1099 case ETH_P_IPV6:
1100 ip_proto = ipv6_hdr(skb)->nexthdr;
1101 break;
1102 default:
Petri Gyntherbc233332014-10-01 11:30:01 -07001103 return skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001104 }
1105
1106 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1107 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1108 (offset + skb->csum_offset);
1109
1110 /* Set the length valid bit for TCP and UDP and just set
1111 * the special UDP flag for IPv4, else just set to 0.
1112 */
1113 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1114 tx_csum_info |= STATUS_TX_CSUM_LV;
1115 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1116 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
Florian Fainelli8900ea572014-07-23 10:42:14 -07001117 } else {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001118 tx_csum_info = 0;
Florian Fainelli8900ea572014-07-23 10:42:14 -07001119 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001120
1121 status->tx_csum_info = tx_csum_info;
1122 }
1123
Petri Gyntherbc233332014-10-01 11:30:01 -07001124 return skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001125}
1126
1127static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1128{
1129 struct bcmgenet_priv *priv = netdev_priv(dev);
1130 struct bcmgenet_tx_ring *ring = NULL;
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001131 struct netdev_queue *txq;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001132 unsigned long flags = 0;
1133 int nr_frags, index;
1134 u16 dma_desc_flags;
1135 int ret;
1136 int i;
1137
1138 index = skb_get_queue_mapping(skb);
1139 /* Mapping strategy:
1140 * queue_mapping = 0, unclassified, packet xmited through ring16
1141 * queue_mapping = 1, goes to ring 0. (highest priority queue
1142 * queue_mapping = 2, goes to ring 1.
1143 * queue_mapping = 3, goes to ring 2.
1144 * queue_mapping = 4, goes to ring 3.
1145 */
1146 if (index == 0)
1147 index = DESC_INDEX;
1148 else
1149 index -= 1;
1150
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001151 nr_frags = skb_shinfo(skb)->nr_frags;
1152 ring = &priv->tx_rings[index];
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001153 txq = netdev_get_tx_queue(dev, ring->queue);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001154
1155 spin_lock_irqsave(&ring->lock, flags);
1156 if (ring->free_bds <= nr_frags + 1) {
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001157 netif_tx_stop_queue(txq);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001158 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
Florian Fainellic91b7f62014-07-23 10:42:12 -07001159 __func__, index, ring->queue);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001160 ret = NETDEV_TX_BUSY;
1161 goto out;
1162 }
1163
Florian Fainelli474ea9c2014-07-22 11:01:52 -07001164 if (skb_padto(skb, ETH_ZLEN)) {
1165 ret = NETDEV_TX_OK;
1166 goto out;
1167 }
1168
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001169 /* set the SKB transmit checksum */
1170 if (priv->desc_64b_en) {
Petri Gyntherbc233332014-10-01 11:30:01 -07001171 skb = bcmgenet_put_tx_csum(dev, skb);
1172 if (!skb) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001173 ret = NETDEV_TX_OK;
1174 goto out;
1175 }
1176 }
1177
1178 dma_desc_flags = DMA_SOP;
1179 if (nr_frags == 0)
1180 dma_desc_flags |= DMA_EOP;
1181
1182 /* Transmit single SKB or head of fragment list */
1183 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1184 if (ret) {
1185 ret = NETDEV_TX_OK;
1186 goto out;
1187 }
1188
1189 /* xmit fragment */
1190 for (i = 0; i < nr_frags; i++) {
1191 ret = bcmgenet_xmit_frag(dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001192 &skb_shinfo(skb)->frags[i],
1193 (i == nr_frags - 1) ? DMA_EOP : 0,
1194 ring);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001195 if (ret) {
1196 ret = NETDEV_TX_OK;
1197 goto out;
1198 }
1199 }
1200
Florian Fainellid03825f2014-03-20 10:53:21 -07001201 skb_tx_timestamp(skb);
1202
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001203 /* we kept a software copy of how much we should advance the TDMA
1204 * producer index, now write it down to the hardware
1205 */
1206 bcmgenet_tdma_ring_writel(priv, ring->index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001207 ring->prod_index, TDMA_PROD_INDEX);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001208
1209 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001210 netif_tx_stop_queue(txq);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001211 ring->int_enable(priv, ring);
1212 }
1213
1214out:
1215 spin_unlock_irqrestore(&ring->lock, flags);
1216
1217 return ret;
1218}
1219
1220
Florian Fainellic91b7f62014-07-23 10:42:12 -07001221static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001222{
1223 struct device *kdev = &priv->pdev->dev;
1224 struct sk_buff *skb;
1225 dma_addr_t mapping;
1226 int ret;
1227
Florian Fainellic91b7f62014-07-23 10:42:12 -07001228 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001229 if (!skb)
1230 return -ENOMEM;
1231
1232 /* a caller did not release this control block */
1233 WARN_ON(cb->skb != NULL);
1234 cb->skb = skb;
1235 mapping = dma_map_single(kdev, skb->data,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001236 priv->rx_buf_len, DMA_FROM_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001237 ret = dma_mapping_error(kdev, mapping);
1238 if (ret) {
Florian Fainelli44c8bc32014-11-19 10:29:56 -08001239 priv->mib.rx_dma_failed++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001240 bcmgenet_free_cb(cb);
1241 netif_err(priv, rx_err, priv->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001242 "%s DMA map failed\n", __func__);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001243 return ret;
1244 }
1245
1246 dma_unmap_addr_set(cb, dma_addr, mapping);
1247 /* assign packet, prepare descriptor, and advance pointer */
1248
1249 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1250
1251 /* turn on the newly assigned BD for DMA to use */
1252 priv->rx_bd_assign_index++;
1253 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1254
1255 priv->rx_bd_assign_ptr = priv->rx_bds +
1256 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1257
1258 return 0;
1259}
1260
1261/* bcmgenet_desc_rx - descriptor based rx process.
1262 * this could be called from bottom half, or from NAPI polling method.
1263 */
1264static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1265 unsigned int budget)
1266{
1267 struct net_device *dev = priv->dev;
1268 struct enet_cb *cb;
1269 struct sk_buff *skb;
1270 u32 dma_length_status;
1271 unsigned long dma_flag;
1272 int len, err;
1273 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1274 unsigned int p_index;
1275 unsigned int chksum_ok = 0;
1276
Florian Fainellic91b7f62014-07-23 10:42:12 -07001277 p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001278 p_index &= DMA_P_INDEX_MASK;
1279
1280 if (p_index < priv->rx_c_index)
1281 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1282 priv->rx_c_index + p_index;
1283 else
1284 rxpkttoprocess = p_index - priv->rx_c_index;
1285
1286 netif_dbg(priv, rx_status, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001287 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001288
1289 while ((rxpktprocessed < rxpkttoprocess) &&
Florian Fainellic91b7f62014-07-23 10:42:12 -07001290 (rxpktprocessed < budget)) {
Florian Fainellib629be52014-09-08 11:37:52 -07001291 cb = &priv->rx_cbs[priv->rx_read_ptr];
1292 skb = cb->skb;
1293
Florian Fainellib629be52014-09-08 11:37:52 -07001294 /* We do not have a backing SKB, so we do not have a
1295 * corresponding DMA mapping for this incoming packet since
1296 * bcmgenet_rx_refill always either has both skb and mapping or
1297 * none.
1298 */
1299 if (unlikely(!skb)) {
1300 dev->stats.rx_dropped++;
1301 dev->stats.rx_errors++;
1302 goto refill;
1303 }
1304
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001305 /* Unmap the packet contents such that we can use the
1306 * RSV from the 64 bytes descriptor when enabled and save
1307 * a 32-bits register read
1308 */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001309 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
Florian Fainellic91b7f62014-07-23 10:42:12 -07001310 priv->rx_buf_len, DMA_FROM_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001311
1312 if (!priv->desc_64b_en) {
Florian Fainellic91b7f62014-07-23 10:42:12 -07001313 dma_length_status =
1314 dmadesc_get_length_status(priv,
1315 priv->rx_bds +
1316 (priv->rx_read_ptr *
1317 DMA_DESC_SIZE));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001318 } else {
1319 struct status_64 *status;
Florian Fainelli164d4f22014-07-23 10:42:13 -07001320
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001321 status = (struct status_64 *)skb->data;
1322 dma_length_status = status->length_status;
1323 }
1324
1325 /* DMA flags and length are still valid no matter how
1326 * we got the Receive Status Vector (64B RSB or register)
1327 */
1328 dma_flag = dma_length_status & 0xffff;
1329 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1330
1331 netif_dbg(priv, rx_status, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001332 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1333 __func__, p_index, priv->rx_c_index,
1334 priv->rx_read_ptr, dma_length_status);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001335
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001336 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1337 netif_err(priv, rx_status, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001338 "dropping fragmented packet!\n");
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001339 dev->stats.rx_dropped++;
1340 dev->stats.rx_errors++;
1341 dev_kfree_skb_any(cb->skb);
1342 cb->skb = NULL;
1343 goto refill;
1344 }
1345 /* report errors */
1346 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1347 DMA_RX_OV |
1348 DMA_RX_NO |
1349 DMA_RX_LG |
1350 DMA_RX_RXER))) {
1351 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
Florian Fainellic91b7f62014-07-23 10:42:12 -07001352 (unsigned int)dma_flag);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001353 if (dma_flag & DMA_RX_CRC_ERROR)
1354 dev->stats.rx_crc_errors++;
1355 if (dma_flag & DMA_RX_OV)
1356 dev->stats.rx_over_errors++;
1357 if (dma_flag & DMA_RX_NO)
1358 dev->stats.rx_frame_errors++;
1359 if (dma_flag & DMA_RX_LG)
1360 dev->stats.rx_length_errors++;
1361 dev->stats.rx_dropped++;
1362 dev->stats.rx_errors++;
1363
1364 /* discard the packet and advance consumer index.*/
1365 dev_kfree_skb_any(cb->skb);
1366 cb->skb = NULL;
1367 goto refill;
1368 } /* error packet */
1369
1370 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
Florian Fainellic91b7f62014-07-23 10:42:12 -07001371 priv->desc_rxchk_en;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001372
1373 skb_put(skb, len);
1374 if (priv->desc_64b_en) {
1375 skb_pull(skb, 64);
1376 len -= 64;
1377 }
1378
1379 if (likely(chksum_ok))
1380 skb->ip_summed = CHECKSUM_UNNECESSARY;
1381
1382 /* remove hardware 2bytes added for IP alignment */
1383 skb_pull(skb, 2);
1384 len -= 2;
1385
1386 if (priv->crc_fwd_en) {
1387 skb_trim(skb, len - ETH_FCS_LEN);
1388 len -= ETH_FCS_LEN;
1389 }
1390
1391 /*Finish setting up the received SKB and send it to the kernel*/
1392 skb->protocol = eth_type_trans(skb, priv->dev);
1393 dev->stats.rx_packets++;
1394 dev->stats.rx_bytes += len;
1395 if (dma_flag & DMA_RX_MULT)
1396 dev->stats.multicast++;
1397
1398 /* Notify kernel */
1399 napi_gro_receive(&priv->napi, skb);
1400 cb->skb = NULL;
1401 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1402
1403 /* refill RX path on the current control block */
1404refill:
1405 err = bcmgenet_rx_refill(priv, cb);
Florian Fainelli44c8bc32014-11-19 10:29:56 -08001406 if (err) {
1407 priv->mib.alloc_rx_buff_failed++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001408 netif_err(priv, rx_err, dev, "Rx refill failed\n");
Florian Fainelli44c8bc32014-11-19 10:29:56 -08001409 }
Florian Fainellicf377d82014-10-10 10:51:52 -07001410
1411 rxpktprocessed++;
1412 priv->rx_read_ptr++;
1413 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001414 }
1415
1416 return rxpktprocessed;
1417}
1418
1419/* Assign skb to RX DMA descriptor. */
1420static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1421{
1422 struct enet_cb *cb;
1423 int ret = 0;
1424 int i;
1425
1426 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1427
1428 /* loop here for each buffer needing assign */
1429 for (i = 0; i < priv->num_rx_bds; i++) {
1430 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1431 if (cb->skb)
1432 continue;
1433
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001434 ret = bcmgenet_rx_refill(priv, cb);
1435 if (ret)
1436 break;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001437 }
1438
1439 return ret;
1440}
1441
1442static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1443{
1444 struct enet_cb *cb;
1445 int i;
1446
1447 for (i = 0; i < priv->num_rx_bds; i++) {
1448 cb = &priv->rx_cbs[i];
1449
1450 if (dma_unmap_addr(cb, dma_addr)) {
1451 dma_unmap_single(&priv->dev->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001452 dma_unmap_addr(cb, dma_addr),
1453 priv->rx_buf_len, DMA_FROM_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001454 dma_unmap_addr_set(cb, dma_addr, 0);
1455 }
1456
1457 if (cb->skb)
1458 bcmgenet_free_cb(cb);
1459 }
1460}
1461
Florian Fainellic91b7f62014-07-23 10:42:12 -07001462static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
Florian Fainellie29585b2014-07-21 15:29:20 -07001463{
1464 u32 reg;
1465
1466 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1467 if (enable)
1468 reg |= mask;
1469 else
1470 reg &= ~mask;
1471 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1472
1473 /* UniMAC stops on a packet boundary, wait for a full-size packet
1474 * to be processed
1475 */
1476 if (enable == 0)
1477 usleep_range(1000, 2000);
1478}
1479
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001480static int reset_umac(struct bcmgenet_priv *priv)
1481{
1482 struct device *kdev = &priv->pdev->dev;
1483 unsigned int timeout = 0;
1484 u32 reg;
1485
1486 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1487 bcmgenet_rbuf_ctrl_set(priv, 0);
1488 udelay(10);
1489
1490 /* disable MAC while updating its registers */
1491 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1492
1493 /* issue soft reset, wait for it to complete */
1494 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1495 while (timeout++ < 1000) {
1496 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1497 if (!(reg & CMD_SW_RESET))
1498 return 0;
1499
1500 udelay(1);
1501 }
1502
1503 if (timeout == 1000) {
1504 dev_err(kdev,
Brian Norris7fc527f2014-07-29 14:34:14 -07001505 "timeout waiting for MAC to come out of reset\n");
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001506 return -ETIMEDOUT;
1507 }
1508
1509 return 0;
1510}
1511
Florian Fainelli909ff5e2014-07-21 15:29:21 -07001512static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1513{
1514 /* Mask all interrupts.*/
1515 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1516 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1517 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1518 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1519 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1520 bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1521}
1522
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001523static int init_umac(struct bcmgenet_priv *priv)
1524{
1525 struct device *kdev = &priv->pdev->dev;
1526 int ret;
1527 u32 reg, cpu_mask_clear;
1528
1529 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1530
1531 ret = reset_umac(priv);
1532 if (ret)
1533 return ret;
1534
1535 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1536 /* clear tx/rx counter */
1537 bcmgenet_umac_writel(priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001538 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1539 UMAC_MIB_CTRL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001540 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1541
1542 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1543
1544 /* init rx registers, enable ip header optimization */
1545 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1546 reg |= RBUF_ALIGN_2B;
1547 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1548
1549 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1550 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1551
Florian Fainelli909ff5e2014-07-21 15:29:21 -07001552 bcmgenet_intr_disable(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001553
1554 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
1555
1556 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1557
Brian Norris7fc527f2014-07-29 14:34:14 -07001558 /* Monitor cable plug/unplugged event for internal PHY */
Florian Fainelli8900ea572014-07-23 10:42:14 -07001559 if (phy_is_internal(priv->phydev)) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001560 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
Florian Fainelli8900ea572014-07-23 10:42:14 -07001561 } else if (priv->ext_phy) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001562 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
Florian Fainelli8900ea572014-07-23 10:42:14 -07001563 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001564 reg = bcmgenet_bp_mc_get(priv);
1565 reg |= BIT(priv->hw_params->bp_in_en_shift);
1566
1567 /* bp_mask: back pressure mask */
1568 if (netif_is_multiqueue(priv->dev))
1569 reg |= priv->hw_params->bp_in_mask;
1570 else
1571 reg &= ~priv->hw_params->bp_in_mask;
1572 bcmgenet_bp_mc_set(priv, reg);
1573 }
1574
1575 /* Enable MDIO interrupts on GENET v3+ */
1576 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1577 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1578
Florian Fainellic91b7f62014-07-23 10:42:12 -07001579 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001580
1581 /* Enable rx/tx engine.*/
1582 dev_dbg(kdev, "done init umac\n");
1583
1584 return 0;
1585}
1586
1587/* Initialize all house-keeping variables for a TX ring, along
1588 * with corresponding hardware registers
1589 */
1590static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1591 unsigned int index, unsigned int size,
1592 unsigned int write_ptr, unsigned int end_ptr)
1593{
1594 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1595 u32 words_per_bd = WORDS_PER_BD(priv);
1596 u32 flow_period_val = 0;
1597 unsigned int first_bd;
1598
1599 spin_lock_init(&ring->lock);
1600 ring->index = index;
1601 if (index == DESC_INDEX) {
1602 ring->queue = 0;
1603 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1604 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1605 } else {
1606 ring->queue = index + 1;
1607 ring->int_enable = bcmgenet_tx_ring_int_enable;
1608 ring->int_disable = bcmgenet_tx_ring_int_disable;
1609 }
1610 ring->cbs = priv->tx_cbs + write_ptr;
1611 ring->size = size;
1612 ring->c_index = 0;
1613 ring->free_bds = size;
1614 ring->write_ptr = write_ptr;
1615 ring->cb_ptr = write_ptr;
1616 ring->end_ptr = end_ptr - 1;
1617 ring->prod_index = 0;
1618
1619 /* Set flow period for ring != 16 */
1620 if (index != DESC_INDEX)
1621 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1622
1623 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1624 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1625 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1626 /* Disable rate control for now */
1627 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001628 TDMA_FLOW_PERIOD);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001629 /* Unclassified traffic goes to ring 16 */
1630 bcmgenet_tdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001631 ((size << DMA_RING_SIZE_SHIFT) |
1632 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001633
1634 first_bd = write_ptr;
1635
1636 /* Set start and end address, read and write pointers */
1637 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001638 DMA_START_ADDR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001639 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001640 TDMA_READ_PTR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001641 bcmgenet_tdma_ring_writel(priv, index, first_bd,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001642 TDMA_WRITE_PTR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001643 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001644 DMA_END_ADDR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001645}
1646
1647/* Initialize a RDMA ring */
1648static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001649 unsigned int index, unsigned int size)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001650{
1651 u32 words_per_bd = WORDS_PER_BD(priv);
1652 int ret;
1653
1654 priv->num_rx_bds = TOTAL_DESC;
1655 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1656 priv->rx_bd_assign_ptr = priv->rx_bds;
1657 priv->rx_bd_assign_index = 0;
1658 priv->rx_c_index = 0;
1659 priv->rx_read_ptr = 0;
Florian Fainellic489be02014-07-23 10:42:15 -07001660 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
1661 GFP_KERNEL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001662 if (!priv->rx_cbs)
1663 return -ENOMEM;
1664
1665 ret = bcmgenet_alloc_rx_buffers(priv);
1666 if (ret) {
1667 kfree(priv->rx_cbs);
1668 return ret;
1669 }
1670
1671 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1672 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1673 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1674 bcmgenet_rdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001675 ((size << DMA_RING_SIZE_SHIFT) |
1676 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001677 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1678 bcmgenet_rdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001679 words_per_bd * size - 1, DMA_END_ADDR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001680 bcmgenet_rdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001681 (DMA_FC_THRESH_LO <<
1682 DMA_XOFF_THRESHOLD_SHIFT) |
1683 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001684 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1685
1686 return ret;
1687}
1688
1689/* init multi xmit queues, only available for GENET2+
1690 * the queue is partitioned as follows:
1691 *
1692 * queue 0 - 3 is priority based, each one has 32 descriptors,
1693 * with queue 0 being the highest priority queue.
1694 *
1695 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1696 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1697 * descriptors.
1698 *
1699 * The transmit control block pool is then partitioned as following:
1700 * - tx_cbs[0...127] are for queue 16
1701 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1702 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1703 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1704 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1705 */
1706static void bcmgenet_init_multiq(struct net_device *dev)
1707{
1708 struct bcmgenet_priv *priv = netdev_priv(dev);
1709 unsigned int i, dma_enable;
Petri Gynther37742162014-10-07 09:30:01 -07001710 u32 reg, dma_ctrl, ring_cfg = 0;
1711 u32 dma_priority[3] = {0, 0, 0};
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001712
1713 if (!netif_is_multiqueue(dev)) {
1714 netdev_warn(dev, "called with non multi queue aware HW\n");
1715 return;
1716 }
1717
1718 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1719 dma_enable = dma_ctrl & DMA_EN;
1720 dma_ctrl &= ~DMA_EN;
1721 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1722
1723 /* Enable strict priority arbiter mode */
1724 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1725
1726 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1727 /* first 64 tx_cbs are reserved for default tx queue
1728 * (ring 16)
1729 */
1730 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001731 i * priv->hw_params->bds_cnt,
1732 (i + 1) * priv->hw_params->bds_cnt);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001733
Brian Norris7fc527f2014-07-29 14:34:14 -07001734 /* Configure ring as descriptor ring and setup priority */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001735 ring_cfg |= 1 << i;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001736 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
Petri Gynther37742162014-10-07 09:30:01 -07001737
1738 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
1739 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001740 }
1741
Petri Gynther37742162014-10-07 09:30:01 -07001742 /* Set ring 16 priority and program the hardware registers */
1743 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
1744 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
1745 DMA_PRIO_REG_SHIFT(DESC_INDEX));
1746 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
1747 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
1748 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
1749
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001750 /* Enable rings */
1751 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
1752 reg |= ring_cfg;
1753 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
1754
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001755 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1756 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1757 reg |= dma_ctrl;
1758 if (dma_enable)
1759 reg |= DMA_EN;
1760 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1761}
1762
Florian Fainelli4a0c081e2014-09-22 11:54:43 -07001763static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1764{
1765 int ret = 0;
1766 int timeout = 0;
1767 u32 reg;
1768
1769 /* Disable TDMA to stop add more frames in TX DMA */
1770 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1771 reg &= ~DMA_EN;
1772 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1773
1774 /* Check TDMA status register to confirm TDMA is disabled */
1775 while (timeout++ < DMA_TIMEOUT_VAL) {
1776 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1777 if (reg & DMA_DISABLED)
1778 break;
1779
1780 udelay(1);
1781 }
1782
1783 if (timeout == DMA_TIMEOUT_VAL) {
1784 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1785 ret = -ETIMEDOUT;
1786 }
1787
1788 /* Wait 10ms for packet drain in both tx and rx dma */
1789 usleep_range(10000, 20000);
1790
1791 /* Disable RDMA */
1792 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1793 reg &= ~DMA_EN;
1794 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1795
1796 timeout = 0;
1797 /* Check RDMA status register to confirm RDMA is disabled */
1798 while (timeout++ < DMA_TIMEOUT_VAL) {
1799 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1800 if (reg & DMA_DISABLED)
1801 break;
1802
1803 udelay(1);
1804 }
1805
1806 if (timeout == DMA_TIMEOUT_VAL) {
1807 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1808 ret = -ETIMEDOUT;
1809 }
1810
1811 return ret;
1812}
1813
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001814static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1815{
1816 int i;
1817
1818 /* disable DMA */
Florian Fainelli4a0c081e2014-09-22 11:54:43 -07001819 bcmgenet_dma_teardown(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001820
1821 for (i = 0; i < priv->num_tx_bds; i++) {
1822 if (priv->tx_cbs[i].skb != NULL) {
1823 dev_kfree_skb(priv->tx_cbs[i].skb);
1824 priv->tx_cbs[i].skb = NULL;
1825 }
1826 }
1827
1828 bcmgenet_free_rx_buffers(priv);
1829 kfree(priv->rx_cbs);
1830 kfree(priv->tx_cbs);
1831}
1832
1833/* init_edma: Initialize DMA control register */
1834static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1835{
1836 int ret;
1837
1838 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1839
1840 /* by default, enable ring 16 (descriptor based) */
1841 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1842 if (ret) {
1843 netdev_err(priv->dev, "failed to initialize RX ring\n");
1844 return ret;
1845 }
1846
1847 /* init rDma */
1848 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1849
1850 /* Init tDma */
1851 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1852
Brian Norris7fc527f2014-07-29 14:34:14 -07001853 /* Initialize common TX ring structures */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001854 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1855 priv->num_tx_bds = TOTAL_DESC;
Florian Fainellic489be02014-07-23 10:42:15 -07001856 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
Florian Fainellic91b7f62014-07-23 10:42:12 -07001857 GFP_KERNEL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001858 if (!priv->tx_cbs) {
1859 bcmgenet_fini_dma(priv);
1860 return -ENOMEM;
1861 }
1862
1863 /* initialize multi xmit queue */
1864 bcmgenet_init_multiq(priv->dev);
1865
1866 /* initialize special ring 16 */
1867 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001868 priv->hw_params->tx_queues *
1869 priv->hw_params->bds_cnt,
1870 TOTAL_DESC);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001871
1872 return 0;
1873}
1874
1875/* NAPI polling method*/
1876static int bcmgenet_poll(struct napi_struct *napi, int budget)
1877{
1878 struct bcmgenet_priv *priv = container_of(napi,
1879 struct bcmgenet_priv, napi);
1880 unsigned int work_done;
1881
1882 /* tx reclaim */
1883 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1884
1885 work_done = bcmgenet_desc_rx(priv, budget);
1886
1887 /* Advancing our consumer index*/
1888 priv->rx_c_index += work_done;
1889 priv->rx_c_index &= DMA_C_INDEX_MASK;
1890 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001891 priv->rx_c_index, RDMA_CONS_INDEX);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001892 if (work_done < budget) {
1893 napi_complete(napi);
Florian Fainellic91b7f62014-07-23 10:42:12 -07001894 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1895 INTRL2_CPU_MASK_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001896 }
1897
1898 return work_done;
1899}
1900
1901/* Interrupt bottom half */
1902static void bcmgenet_irq_task(struct work_struct *work)
1903{
1904 struct bcmgenet_priv *priv = container_of(
1905 work, struct bcmgenet_priv, bcmgenet_irq_work);
1906
1907 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1908
Florian Fainelli8fdb0e02014-07-21 15:29:26 -07001909 if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
1910 priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
1911 netif_dbg(priv, wol, priv->dev,
1912 "magic packet detected, waking up\n");
1913 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
1914 }
1915
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001916 /* Link UP/DOWN event */
1917 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
Florian Fainellic91b7f62014-07-23 10:42:12 -07001918 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
Florian Fainelli80d8e962014-02-24 16:56:11 -08001919 phy_mac_interrupt(priv->phydev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001920 priv->irq0_stat & UMAC_IRQ_LINK_UP);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001921 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
1922 }
1923}
1924
1925/* bcmgenet_isr1: interrupt handler for ring buffer. */
1926static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1927{
1928 struct bcmgenet_priv *priv = dev_id;
1929 unsigned int index;
1930
1931 /* Save irq status for bottom-half processing. */
1932 priv->irq1_stat =
1933 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1934 ~priv->int1_mask;
Brian Norris7fc527f2014-07-29 14:34:14 -07001935 /* clear interrupts */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001936 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1937
1938 netif_dbg(priv, intr, priv->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001939 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001940 /* Check the MBDONE interrupts.
1941 * packet is done, reclaim descriptors
1942 */
1943 if (priv->irq1_stat & 0x0000ffff) {
1944 index = 0;
1945 for (index = 0; index < 16; index++) {
1946 if (priv->irq1_stat & (1 << index))
1947 bcmgenet_tx_reclaim(priv->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001948 &priv->tx_rings[index]);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001949 }
1950 }
1951 return IRQ_HANDLED;
1952}
1953
1954/* bcmgenet_isr0: Handle various interrupts. */
1955static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1956{
1957 struct bcmgenet_priv *priv = dev_id;
1958
1959 /* Save irq status for bottom-half processing. */
1960 priv->irq0_stat =
1961 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1962 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
Brian Norris7fc527f2014-07-29 14:34:14 -07001963 /* clear interrupts */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001964 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1965
1966 netif_dbg(priv, intr, priv->dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001967 "IRQ=0x%x\n", priv->irq0_stat);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001968
1969 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
1970 /* We use NAPI(software interrupt throttling, if
1971 * Rx Descriptor throttling is not used.
1972 * Disable interrupt, will be enabled in the poll method.
1973 */
1974 if (likely(napi_schedule_prep(&priv->napi))) {
Florian Fainellic91b7f62014-07-23 10:42:12 -07001975 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
1976 INTRL2_CPU_MASK_SET);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001977 __napi_schedule(&priv->napi);
1978 }
1979 }
1980 if (priv->irq0_stat &
1981 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
1982 /* Tx reclaim */
1983 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1984 }
1985 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
1986 UMAC_IRQ_PHY_DET_F |
1987 UMAC_IRQ_LINK_UP |
1988 UMAC_IRQ_LINK_DOWN |
1989 UMAC_IRQ_HFB_SM |
1990 UMAC_IRQ_HFB_MM |
1991 UMAC_IRQ_MPD_R)) {
1992 /* all other interested interrupts handled in bottom half */
1993 schedule_work(&priv->bcmgenet_irq_work);
1994 }
1995
1996 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
Florian Fainellic91b7f62014-07-23 10:42:12 -07001997 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001998 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1999 wake_up(&priv->wq);
2000 }
2001
2002 return IRQ_HANDLED;
2003}
2004
Florian Fainelli85620562014-07-21 15:29:23 -07002005static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2006{
2007 struct bcmgenet_priv *priv = dev_id;
2008
2009 pm_wakeup_event(&priv->pdev->dev, 0);
2010
2011 return IRQ_HANDLED;
2012}
2013
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002014static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2015{
2016 u32 reg;
2017
2018 reg = bcmgenet_rbuf_ctrl_get(priv);
2019 reg |= BIT(1);
2020 bcmgenet_rbuf_ctrl_set(priv, reg);
2021 udelay(10);
2022
2023 reg &= ~BIT(1);
2024 bcmgenet_rbuf_ctrl_set(priv, reg);
2025 udelay(10);
2026}
2027
2028static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002029 unsigned char *addr)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002030{
2031 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2032 (addr[2] << 8) | addr[3], UMAC_MAC0);
2033 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2034}
2035
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002036/* Returns a reusable dma control register value */
2037static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2038{
2039 u32 reg;
2040 u32 dma_ctrl;
2041
2042 /* disable DMA */
2043 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2044 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2045 reg &= ~dma_ctrl;
2046 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2047
2048 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2049 reg &= ~dma_ctrl;
2050 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2051
2052 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2053 udelay(10);
2054 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2055
2056 return dma_ctrl;
2057}
2058
2059static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2060{
2061 u32 reg;
2062
2063 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2064 reg |= dma_ctrl;
2065 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2066
2067 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2068 reg |= dma_ctrl;
2069 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2070}
2071
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002072static void bcmgenet_netif_start(struct net_device *dev)
2073{
2074 struct bcmgenet_priv *priv = netdev_priv(dev);
2075
2076 /* Start the network engine */
2077 napi_enable(&priv->napi);
2078
2079 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2080
2081 if (phy_is_internal(priv->phydev))
2082 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2083
2084 netif_tx_start_all_queues(dev);
2085
2086 phy_start(priv->phydev);
2087}
2088
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002089static int bcmgenet_open(struct net_device *dev)
2090{
2091 struct bcmgenet_priv *priv = netdev_priv(dev);
2092 unsigned long dma_ctrl;
2093 u32 reg;
2094 int ret;
2095
2096 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2097
2098 /* Turn on the clock */
2099 if (!IS_ERR(priv->clk))
2100 clk_prepare_enable(priv->clk);
2101
2102 /* take MAC out of reset */
2103 bcmgenet_umac_reset(priv);
2104
2105 ret = init_umac(priv);
2106 if (ret)
2107 goto err_clk_disable;
2108
2109 /* disable ethernet MAC while updating its registers */
Florian Fainellie29585b2014-07-21 15:29:20 -07002110 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002111
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002112 /* Make sure we reflect the value of CRC_CMD_FWD */
2113 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2114 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2115
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002116 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2117
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002118 if (phy_is_internal(priv->phydev)) {
2119 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2120 reg |= EXT_ENERGY_DET_MASK;
2121 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2122 }
2123
2124 /* Disable RX/TX DMA and flush TX queues */
2125 dma_ctrl = bcmgenet_dma_disable(priv);
2126
2127 /* Reinitialize TDMA and RDMA and SW housekeeping */
2128 ret = bcmgenet_init_dma(priv);
2129 if (ret) {
2130 netdev_err(dev, "failed to initialize DMA\n");
2131 goto err_fini_dma;
2132 }
2133
2134 /* Always enable ring 16 - descriptor ring */
2135 bcmgenet_enable_dma(priv, dma_ctrl);
2136
2137 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002138 dev->name, priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002139 if (ret < 0) {
2140 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2141 goto err_fini_dma;
2142 }
2143
2144 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002145 dev->name, priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002146 if (ret < 0) {
2147 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2148 goto err_irq0;
2149 }
2150
Florian Fainellidbd479d2014-11-10 18:06:21 -08002151 /* Re-configure the port multiplexer towards the PHY device */
2152 bcmgenet_mii_config(priv->dev, false);
2153
Florian Fainellic96e7312014-11-10 18:06:20 -08002154 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2155 priv->phy_interface);
2156
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002157 bcmgenet_netif_start(dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002158
2159 return 0;
2160
2161err_irq0:
2162 free_irq(priv->irq0, dev);
2163err_fini_dma:
2164 bcmgenet_fini_dma(priv);
2165err_clk_disable:
2166 if (!IS_ERR(priv->clk))
2167 clk_disable_unprepare(priv->clk);
2168 return ret;
2169}
2170
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002171static void bcmgenet_netif_stop(struct net_device *dev)
2172{
2173 struct bcmgenet_priv *priv = netdev_priv(dev);
2174
2175 netif_tx_stop_all_queues(dev);
2176 napi_disable(&priv->napi);
2177 phy_stop(priv->phydev);
2178
2179 bcmgenet_intr_disable(priv);
2180
2181 /* Wait for pending work items to complete. Since interrupts are
2182 * disabled no new work will be scheduled.
2183 */
2184 cancel_work_sync(&priv->bcmgenet_irq_work);
Florian Fainellicc013fb2014-08-11 14:50:43 -07002185
Florian Fainellicc013fb2014-08-11 14:50:43 -07002186 priv->old_link = -1;
Petri Gynther5ad6e6c2014-10-03 12:25:01 -07002187 priv->old_speed = -1;
Florian Fainellicc013fb2014-08-11 14:50:43 -07002188 priv->old_duplex = -1;
Petri Gynther5ad6e6c2014-10-03 12:25:01 -07002189 priv->old_pause = -1;
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002190}
2191
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002192static int bcmgenet_close(struct net_device *dev)
2193{
2194 struct bcmgenet_priv *priv = netdev_priv(dev);
2195 int ret;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002196
2197 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2198
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002199 bcmgenet_netif_stop(dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002200
Florian Fainellic96e7312014-11-10 18:06:20 -08002201 /* Really kill the PHY state machine and disconnect from it */
2202 phy_disconnect(priv->phydev);
2203
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002204 /* Disable MAC receive */
Florian Fainellie29585b2014-07-21 15:29:20 -07002205 umac_enable_set(priv, CMD_RX_EN, false);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002206
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002207 ret = bcmgenet_dma_teardown(priv);
2208 if (ret)
2209 return ret;
2210
2211 /* Disable MAC transmit. TX DMA disabled have to done before this */
Florian Fainellie29585b2014-07-21 15:29:20 -07002212 umac_enable_set(priv, CMD_TX_EN, false);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002213
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002214 /* tx reclaim */
2215 bcmgenet_tx_reclaim_all(dev);
2216 bcmgenet_fini_dma(priv);
2217
2218 free_irq(priv->irq0, priv);
2219 free_irq(priv->irq1, priv);
2220
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002221 if (phy_is_internal(priv->phydev))
2222 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2223
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002224 if (!IS_ERR(priv->clk))
2225 clk_disable_unprepare(priv->clk);
2226
2227 return 0;
2228}
2229
2230static void bcmgenet_timeout(struct net_device *dev)
2231{
2232 struct bcmgenet_priv *priv = netdev_priv(dev);
2233
2234 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2235
2236 dev->trans_start = jiffies;
2237
2238 dev->stats.tx_errors++;
2239
2240 netif_tx_wake_all_queues(dev);
2241}
2242
2243#define MAX_MC_COUNT 16
2244
2245static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2246 unsigned char *addr,
2247 int *i,
2248 int *mc)
2249{
2250 u32 reg;
2251
Florian Fainellic91b7f62014-07-23 10:42:12 -07002252 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
2253 UMAC_MDF_ADDR + (*i * 4));
2254 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
2255 addr[4] << 8 | addr[5],
2256 UMAC_MDF_ADDR + ((*i + 1) * 4));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002257 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2258 reg |= (1 << (MAX_MC_COUNT - *mc));
2259 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2260 *i += 2;
2261 (*mc)++;
2262}
2263
2264static void bcmgenet_set_rx_mode(struct net_device *dev)
2265{
2266 struct bcmgenet_priv *priv = netdev_priv(dev);
2267 struct netdev_hw_addr *ha;
2268 int i, mc;
2269 u32 reg;
2270
2271 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2272
Brian Norris7fc527f2014-07-29 14:34:14 -07002273 /* Promiscuous mode */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002274 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2275 if (dev->flags & IFF_PROMISC) {
2276 reg |= CMD_PROMISC;
2277 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2278 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2279 return;
2280 } else {
2281 reg &= ~CMD_PROMISC;
2282 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2283 }
2284
2285 /* UniMac doesn't support ALLMULTI */
2286 if (dev->flags & IFF_ALLMULTI) {
2287 netdev_warn(dev, "ALLMULTI is not supported\n");
2288 return;
2289 }
2290
2291 /* update MDF filter */
2292 i = 0;
2293 mc = 0;
2294 /* Broadcast */
2295 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2296 /* my own address.*/
2297 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2298 /* Unicast list*/
2299 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2300 return;
2301
2302 if (!netdev_uc_empty(dev))
2303 netdev_for_each_uc_addr(ha, dev)
2304 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2305 /* Multicast */
2306 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2307 return;
2308
2309 netdev_for_each_mc_addr(ha, dev)
2310 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2311}
2312
2313/* Set the hardware MAC address. */
2314static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2315{
2316 struct sockaddr *addr = p;
2317
2318 /* Setting the MAC address at the hardware level is not possible
2319 * without disabling the UniMAC RX/TX enable bits.
2320 */
2321 if (netif_running(dev))
2322 return -EBUSY;
2323
2324 ether_addr_copy(dev->dev_addr, addr->sa_data);
2325
2326 return 0;
2327}
2328
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002329static const struct net_device_ops bcmgenet_netdev_ops = {
2330 .ndo_open = bcmgenet_open,
2331 .ndo_stop = bcmgenet_close,
2332 .ndo_start_xmit = bcmgenet_xmit,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002333 .ndo_tx_timeout = bcmgenet_timeout,
2334 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2335 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2336 .ndo_do_ioctl = bcmgenet_ioctl,
2337 .ndo_set_features = bcmgenet_set_features,
2338};
2339
2340/* Array of GENET hardware parameters/characteristics */
2341static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2342 [GENET_V1] = {
2343 .tx_queues = 0,
2344 .rx_queues = 0,
2345 .bds_cnt = 0,
2346 .bp_in_en_shift = 16,
2347 .bp_in_mask = 0xffff,
2348 .hfb_filter_cnt = 16,
2349 .qtag_mask = 0x1F,
2350 .hfb_offset = 0x1000,
2351 .rdma_offset = 0x2000,
2352 .tdma_offset = 0x3000,
2353 .words_per_bd = 2,
2354 },
2355 [GENET_V2] = {
2356 .tx_queues = 4,
2357 .rx_queues = 4,
2358 .bds_cnt = 32,
2359 .bp_in_en_shift = 16,
2360 .bp_in_mask = 0xffff,
2361 .hfb_filter_cnt = 16,
2362 .qtag_mask = 0x1F,
2363 .tbuf_offset = 0x0600,
2364 .hfb_offset = 0x1000,
2365 .hfb_reg_offset = 0x2000,
2366 .rdma_offset = 0x3000,
2367 .tdma_offset = 0x4000,
2368 .words_per_bd = 2,
2369 .flags = GENET_HAS_EXT,
2370 },
2371 [GENET_V3] = {
2372 .tx_queues = 4,
2373 .rx_queues = 4,
2374 .bds_cnt = 32,
2375 .bp_in_en_shift = 17,
2376 .bp_in_mask = 0x1ffff,
2377 .hfb_filter_cnt = 48,
2378 .qtag_mask = 0x3F,
2379 .tbuf_offset = 0x0600,
2380 .hfb_offset = 0x8000,
2381 .hfb_reg_offset = 0xfc00,
2382 .rdma_offset = 0x10000,
2383 .tdma_offset = 0x11000,
2384 .words_per_bd = 2,
2385 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2386 },
2387 [GENET_V4] = {
2388 .tx_queues = 4,
2389 .rx_queues = 4,
2390 .bds_cnt = 32,
2391 .bp_in_en_shift = 17,
2392 .bp_in_mask = 0x1ffff,
2393 .hfb_filter_cnt = 48,
2394 .qtag_mask = 0x3F,
2395 .tbuf_offset = 0x0600,
2396 .hfb_offset = 0x8000,
2397 .hfb_reg_offset = 0xfc00,
2398 .rdma_offset = 0x2000,
2399 .tdma_offset = 0x4000,
2400 .words_per_bd = 3,
2401 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2402 },
2403};
2404
2405/* Infer hardware parameters from the detected GENET version */
2406static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2407{
2408 struct bcmgenet_hw_params *params;
2409 u32 reg;
2410 u8 major;
2411
2412 if (GENET_IS_V4(priv)) {
2413 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2414 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2415 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2416 priv->version = GENET_V4;
2417 } else if (GENET_IS_V3(priv)) {
2418 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2419 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2420 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2421 priv->version = GENET_V3;
2422 } else if (GENET_IS_V2(priv)) {
2423 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2424 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2425 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2426 priv->version = GENET_V2;
2427 } else if (GENET_IS_V1(priv)) {
2428 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2429 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2430 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2431 priv->version = GENET_V1;
2432 }
2433
2434 /* enum genet_version starts at 1 */
2435 priv->hw_params = &bcmgenet_hw_params[priv->version];
2436 params = priv->hw_params;
2437
2438 /* Read GENET HW version */
2439 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2440 major = (reg >> 24 & 0x0f);
2441 if (major == 5)
2442 major = 4;
2443 else if (major == 0)
2444 major = 1;
2445 if (major != priv->version) {
2446 dev_err(&priv->pdev->dev,
2447 "GENET version mismatch, got: %d, configured for: %d\n",
2448 major, priv->version);
2449 }
2450
2451 /* Print the GENET core version */
2452 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002453 major, (reg >> 16) & 0x0f, reg & 0xffff);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002454
Florian Fainelli487320c2014-09-19 13:07:53 -07002455 /* Store the integrated PHY revision for the MDIO probing function
2456 * to pass this information to the PHY driver. The PHY driver expects
2457 * to find the PHY major revision in bits 15:8 while the GENET register
2458 * stores that information in bits 7:0, account for that.
2459 */
2460 priv->gphy_rev = (reg & 0xffff) << 8;
2461
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002462#ifdef CONFIG_PHYS_ADDR_T_64BIT
2463 if (!(params->flags & GENET_HAS_40BITS))
2464 pr_warn("GENET does not support 40-bits PA\n");
2465#endif
2466
2467 pr_debug("Configuration for version: %d\n"
2468 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2469 "BP << en: %2d, BP msk: 0x%05x\n"
2470 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2471 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2472 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2473 "Words/BD: %d\n",
2474 priv->version,
2475 params->tx_queues, params->rx_queues, params->bds_cnt,
2476 params->bp_in_en_shift, params->bp_in_mask,
2477 params->hfb_filter_cnt, params->qtag_mask,
2478 params->tbuf_offset, params->hfb_offset,
2479 params->hfb_reg_offset,
2480 params->rdma_offset, params->tdma_offset,
2481 params->words_per_bd);
2482}
2483
2484static const struct of_device_id bcmgenet_match[] = {
2485 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2486 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2487 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2488 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2489 { },
2490};
2491
2492static int bcmgenet_probe(struct platform_device *pdev)
2493{
2494 struct device_node *dn = pdev->dev.of_node;
2495 const struct of_device_id *of_id;
2496 struct bcmgenet_priv *priv;
2497 struct net_device *dev;
2498 const void *macaddr;
2499 struct resource *r;
2500 int err = -EIO;
2501
2502 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2503 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2504 if (!dev) {
2505 dev_err(&pdev->dev, "can't allocate net device\n");
2506 return -ENOMEM;
2507 }
2508
2509 of_id = of_match_node(bcmgenet_match, dn);
2510 if (!of_id)
2511 return -EINVAL;
2512
2513 priv = netdev_priv(dev);
2514 priv->irq0 = platform_get_irq(pdev, 0);
2515 priv->irq1 = platform_get_irq(pdev, 1);
Florian Fainelli85620562014-07-21 15:29:23 -07002516 priv->wol_irq = platform_get_irq(pdev, 2);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002517 if (!priv->irq0 || !priv->irq1) {
2518 dev_err(&pdev->dev, "can't find IRQs\n");
2519 err = -EINVAL;
2520 goto err;
2521 }
2522
2523 macaddr = of_get_mac_address(dn);
2524 if (!macaddr) {
2525 dev_err(&pdev->dev, "can't find MAC address\n");
2526 err = -EINVAL;
2527 goto err;
2528 }
2529
2530 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Fabio Estevam5343a102014-02-24 00:47:24 -03002531 priv->base = devm_ioremap_resource(&pdev->dev, r);
2532 if (IS_ERR(priv->base)) {
2533 err = PTR_ERR(priv->base);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002534 goto err;
2535 }
2536
2537 SET_NETDEV_DEV(dev, &pdev->dev);
2538 dev_set_drvdata(&pdev->dev, dev);
2539 ether_addr_copy(dev->dev_addr, macaddr);
2540 dev->watchdog_timeo = 2 * HZ;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002541 dev->ethtool_ops = &bcmgenet_ethtool_ops;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002542 dev->netdev_ops = &bcmgenet_netdev_ops;
2543 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2544
2545 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2546
2547 /* Set hardware features */
2548 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2549 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2550
Florian Fainelli85620562014-07-21 15:29:23 -07002551 /* Request the WOL interrupt and advertise suspend if available */
2552 priv->wol_irq_disabled = true;
2553 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
2554 dev->name, priv);
2555 if (!err)
2556 device_set_wakeup_capable(&pdev->dev, 1);
2557
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002558 /* Set the needed headroom to account for any possible
2559 * features enabling/disabling at runtime
2560 */
2561 dev->needed_headroom += 64;
2562
2563 netdev_boot_setup_check(dev);
2564
2565 priv->dev = dev;
2566 priv->pdev = pdev;
2567 priv->version = (enum bcmgenet_version)of_id->data;
2568
Florian Fainellie4a60a92014-08-11 14:50:42 -07002569 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2570 if (IS_ERR(priv->clk))
2571 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2572
2573 if (!IS_ERR(priv->clk))
2574 clk_prepare_enable(priv->clk);
2575
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002576 bcmgenet_set_hw_params(priv);
2577
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002578 /* Mii wait queue */
2579 init_waitqueue_head(&priv->wq);
2580 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2581 priv->rx_buf_len = RX_BUF_LENGTH;
2582 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2583
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002584 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2585 if (IS_ERR(priv->clk_wol))
2586 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2587
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002588 err = reset_umac(priv);
2589 if (err)
2590 goto err_clk_disable;
2591
2592 err = bcmgenet_mii_init(dev);
2593 if (err)
2594 goto err_clk_disable;
2595
2596 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2597 * just the ring 16 descriptor based TX
2598 */
2599 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2600 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2601
Florian Fainelli219575e2014-06-26 10:26:21 -07002602 /* libphy will determine the link state */
2603 netif_carrier_off(dev);
2604
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002605 /* Turn off the main clock, WOL clock is handled separately */
2606 if (!IS_ERR(priv->clk))
2607 clk_disable_unprepare(priv->clk);
2608
Florian Fainelli0f50ce92014-06-26 10:26:20 -07002609 err = register_netdev(dev);
2610 if (err)
2611 goto err;
2612
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002613 return err;
2614
2615err_clk_disable:
2616 if (!IS_ERR(priv->clk))
2617 clk_disable_unprepare(priv->clk);
2618err:
2619 free_netdev(dev);
2620 return err;
2621}
2622
2623static int bcmgenet_remove(struct platform_device *pdev)
2624{
2625 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2626
2627 dev_set_drvdata(&pdev->dev, NULL);
2628 unregister_netdev(priv->dev);
2629 bcmgenet_mii_exit(priv->dev);
2630 free_netdev(priv->dev);
2631
2632 return 0;
2633}
2634
Florian Fainellib6e978e2014-07-21 15:29:22 -07002635#ifdef CONFIG_PM_SLEEP
2636static int bcmgenet_suspend(struct device *d)
2637{
2638 struct net_device *dev = dev_get_drvdata(d);
2639 struct bcmgenet_priv *priv = netdev_priv(dev);
2640 int ret;
2641
2642 if (!netif_running(dev))
2643 return 0;
2644
2645 bcmgenet_netif_stop(dev);
2646
Florian Fainellicc013fb2014-08-11 14:50:43 -07002647 phy_suspend(priv->phydev);
2648
Florian Fainellib6e978e2014-07-21 15:29:22 -07002649 netif_device_detach(dev);
2650
2651 /* Disable MAC receive */
2652 umac_enable_set(priv, CMD_RX_EN, false);
2653
2654 ret = bcmgenet_dma_teardown(priv);
2655 if (ret)
2656 return ret;
2657
2658 /* Disable MAC transmit. TX DMA disabled have to done before this */
2659 umac_enable_set(priv, CMD_TX_EN, false);
2660
2661 /* tx reclaim */
2662 bcmgenet_tx_reclaim_all(dev);
2663 bcmgenet_fini_dma(priv);
2664
Florian Fainelli8c90db72014-07-21 15:29:28 -07002665 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
2666 if (device_may_wakeup(d) && priv->wolopts) {
2667 bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
2668 clk_prepare_enable(priv->clk_wol);
2669 }
2670
Florian Fainellib6e978e2014-07-21 15:29:22 -07002671 /* Turn off the clocks */
2672 clk_disable_unprepare(priv->clk);
2673
2674 return 0;
2675}
2676
2677static int bcmgenet_resume(struct device *d)
2678{
2679 struct net_device *dev = dev_get_drvdata(d);
2680 struct bcmgenet_priv *priv = netdev_priv(dev);
2681 unsigned long dma_ctrl;
2682 int ret;
2683 u32 reg;
2684
2685 if (!netif_running(dev))
2686 return 0;
2687
2688 /* Turn on the clock */
2689 ret = clk_prepare_enable(priv->clk);
2690 if (ret)
2691 return ret;
2692
2693 bcmgenet_umac_reset(priv);
2694
2695 ret = init_umac(priv);
2696 if (ret)
2697 goto out_clk_disable;
2698
Tobias Klauser0a29b3d2014-09-23 15:19:41 +02002699 /* From WOL-enabled suspend, switch to regular clock */
2700 if (priv->wolopts)
2701 clk_disable_unprepare(priv->clk_wol);
2702
2703 phy_init_hw(priv->phydev);
2704 /* Speed settings must be restored */
Florian Fainellidbd479d2014-11-10 18:06:21 -08002705 bcmgenet_mii_config(priv->dev, false);
Florian Fainelli8c90db72014-07-21 15:29:28 -07002706
Florian Fainellib6e978e2014-07-21 15:29:22 -07002707 /* disable ethernet MAC while updating its registers */
2708 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
2709
2710 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2711
2712 if (phy_is_internal(priv->phydev)) {
2713 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2714 reg |= EXT_ENERGY_DET_MASK;
2715 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2716 }
2717
Florian Fainelli98bb7392014-08-11 14:50:45 -07002718 if (priv->wolopts)
2719 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
2720
Florian Fainellib6e978e2014-07-21 15:29:22 -07002721 /* Disable RX/TX DMA and flush TX queues */
2722 dma_ctrl = bcmgenet_dma_disable(priv);
2723
2724 /* Reinitialize TDMA and RDMA and SW housekeeping */
2725 ret = bcmgenet_init_dma(priv);
2726 if (ret) {
2727 netdev_err(dev, "failed to initialize DMA\n");
2728 goto out_clk_disable;
2729 }
2730
2731 /* Always enable ring 16 - descriptor ring */
2732 bcmgenet_enable_dma(priv, dma_ctrl);
2733
2734 netif_device_attach(dev);
2735
Florian Fainellicc013fb2014-08-11 14:50:43 -07002736 phy_resume(priv->phydev);
2737
Florian Fainellib6e978e2014-07-21 15:29:22 -07002738 bcmgenet_netif_start(dev);
2739
2740 return 0;
2741
2742out_clk_disable:
2743 clk_disable_unprepare(priv->clk);
2744 return ret;
2745}
2746#endif /* CONFIG_PM_SLEEP */
2747
2748static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
2749
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002750static struct platform_driver bcmgenet_driver = {
2751 .probe = bcmgenet_probe,
2752 .remove = bcmgenet_remove,
2753 .driver = {
2754 .name = "bcmgenet",
2755 .owner = THIS_MODULE,
2756 .of_match_table = bcmgenet_match,
Florian Fainellib6e978e2014-07-21 15:29:22 -07002757 .pm = &bcmgenet_pm_ops,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002758 },
2759};
2760module_platform_driver(bcmgenet_driver);
2761
2762MODULE_AUTHOR("Broadcom Corporation");
2763MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2764MODULE_ALIAS("platform:bcmgenet");
2765MODULE_LICENSE("GPL");