blob: 986fb05529fc0cfe6193e7f52ca604dd623a61cd [file] [log] [blame]
Florian Fainelli80105be2014-04-24 18:08:57 -07001/*
2 * Broadcom BCM7xxx System Port Ethernet MAC driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/etherdevice.h>
19#include <linux/platform_device.h>
20#include <linux/of.h>
21#include <linux/of_net.h>
22#include <linux/of_mdio.h>
23#include <linux/phy.h>
24#include <linux/phy_fixed.h>
25#include <net/ip.h>
26#include <net/ipv6.h>
27
28#include "bcmsysport.h"
29
30/* I/O accessors register helpers */
31#define BCM_SYSPORT_IO_MACRO(name, offset) \
32static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
33{ \
34 u32 reg = __raw_readl(priv->base + offset + off); \
35 return reg; \
36} \
37static inline void name##_writel(struct bcm_sysport_priv *priv, \
38 u32 val, u32 off) \
39{ \
40 __raw_writel(val, priv->base + offset + off); \
41} \
42
43BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
44BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
45BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
Florian Fainelli44a45242017-01-20 11:08:27 -080046BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
Florian Fainelli80105be2014-04-24 18:08:57 -070047BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
Florian Fainelli80105be2014-04-24 18:08:57 -070048BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
49BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
50BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
51BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
52BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
53
Florian Fainelli44a45242017-01-20 11:08:27 -080054/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
55 * same layout, except it has been moved by 4 bytes up, *sigh*
56 */
57static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
58{
59 if (priv->is_lite && off >= RDMA_STATUS)
60 off += 4;
61 return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
62}
63
64static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
65{
66 if (priv->is_lite && off >= RDMA_STATUS)
67 off += 4;
68 __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
69}
70
71static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
72{
73 if (!priv->is_lite) {
74 return BIT(bit);
75 } else {
76 if (bit >= ACB_ALGO)
77 return BIT(bit + 1);
78 else
79 return BIT(bit);
80 }
81}
82
Florian Fainelli80105be2014-04-24 18:08:57 -070083/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
84 * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
85 */
86#define BCM_SYSPORT_INTR_L2(which) \
87static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
88 u32 mask) \
89{ \
Florian Fainelli80105be2014-04-24 18:08:57 -070090 priv->irq##which##_mask &= ~(mask); \
Florian Fainelli9a0a5c42016-08-24 14:21:41 -070091 intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
Florian Fainelli80105be2014-04-24 18:08:57 -070092} \
93static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
94 u32 mask) \
95{ \
96 intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
97 priv->irq##which##_mask |= (mask); \
98} \
99
100BCM_SYSPORT_INTR_L2(0)
101BCM_SYSPORT_INTR_L2(1)
102
103/* Register accesses to GISB/RBUS registers are expensive (few hundred
104 * nanoseconds), so keep the check for 64-bits explicit here to save
105 * one register write per-packet on 32-bits platforms.
106 */
107static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
108 void __iomem *d,
109 dma_addr_t addr)
110{
111#ifdef CONFIG_PHYS_ADDR_T_64BIT
112 __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700113 d + DESC_ADDR_HI_STATUS_LEN);
Florian Fainelli80105be2014-04-24 18:08:57 -0700114#endif
115 __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
116}
117
118static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700119 struct dma_desc *desc,
120 unsigned int port)
Florian Fainelli80105be2014-04-24 18:08:57 -0700121{
122 /* Ports are latched, so write upper address first */
123 tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
124 tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
125}
126
127/* Ethtool operations */
Florian Fainelli80105be2014-04-24 18:08:57 -0700128static int bcm_sysport_set_rx_csum(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700129 netdev_features_t wanted)
Florian Fainelli80105be2014-04-24 18:08:57 -0700130{
131 struct bcm_sysport_priv *priv = netdev_priv(dev);
132 u32 reg;
133
Florian Fainelli9d34c1c2014-07-01 21:08:39 -0700134 priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
Florian Fainelli80105be2014-04-24 18:08:57 -0700135 reg = rxchk_readl(priv, RXCHK_CONTROL);
Florian Fainelli9d34c1c2014-07-01 21:08:39 -0700136 if (priv->rx_chk_en)
Florian Fainelli80105be2014-04-24 18:08:57 -0700137 reg |= RXCHK_EN;
138 else
139 reg &= ~RXCHK_EN;
140
141 /* If UniMAC forwards CRC, we need to skip over it to get
142 * a valid CHK bit to be set in the per-packet status word
143 */
Florian Fainelli9d34c1c2014-07-01 21:08:39 -0700144 if (priv->rx_chk_en && priv->crc_fwd)
Florian Fainelli80105be2014-04-24 18:08:57 -0700145 reg |= RXCHK_SKIP_FCS;
146 else
147 reg &= ~RXCHK_SKIP_FCS;
148
Florian Fainellid09d3032014-08-28 15:11:03 -0700149 /* If Broadcom tags are enabled (e.g: using a switch), make
150 * sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
151 * tag after the Ethernet MAC Source Address.
152 */
153 if (netdev_uses_dsa(dev))
154 reg |= RXCHK_BRCM_TAG_EN;
155 else
156 reg &= ~RXCHK_BRCM_TAG_EN;
157
Florian Fainelli80105be2014-04-24 18:08:57 -0700158 rxchk_writel(priv, reg, RXCHK_CONTROL);
159
160 return 0;
161}
162
163static int bcm_sysport_set_tx_csum(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700164 netdev_features_t wanted)
Florian Fainelli80105be2014-04-24 18:08:57 -0700165{
166 struct bcm_sysport_priv *priv = netdev_priv(dev);
167 u32 reg;
168
169 /* Hardware transmit checksum requires us to enable the Transmit status
170 * block prepended to the packet contents
171 */
172 priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
173 reg = tdma_readl(priv, TDMA_CONTROL);
174 if (priv->tsb_en)
Florian Fainelli44a45242017-01-20 11:08:27 -0800175 reg |= tdma_control_bit(priv, TSB_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -0700176 else
Florian Fainelli44a45242017-01-20 11:08:27 -0800177 reg &= ~tdma_control_bit(priv, TSB_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -0700178 tdma_writel(priv, reg, TDMA_CONTROL);
179
180 return 0;
181}
182
183static int bcm_sysport_set_features(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700184 netdev_features_t features)
Florian Fainelli80105be2014-04-24 18:08:57 -0700185{
186 netdev_features_t changed = features ^ dev->features;
187 netdev_features_t wanted = dev->wanted_features;
188 int ret = 0;
189
190 if (changed & NETIF_F_RXCSUM)
191 ret = bcm_sysport_set_rx_csum(dev, wanted);
192 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
193 ret = bcm_sysport_set_tx_csum(dev, wanted);
194
195 return ret;
196}
197
198/* Hardware counters must be kept in sync because the order/offset
199 * is important here (order in structure declaration = order in hardware)
200 */
201static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
202 /* general stats */
203 STAT_NETDEV(rx_packets),
204 STAT_NETDEV(tx_packets),
205 STAT_NETDEV(rx_bytes),
206 STAT_NETDEV(tx_bytes),
207 STAT_NETDEV(rx_errors),
208 STAT_NETDEV(tx_errors),
209 STAT_NETDEV(rx_dropped),
210 STAT_NETDEV(tx_dropped),
211 STAT_NETDEV(multicast),
212 /* UniMAC RSV counters */
213 STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
214 STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
215 STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
216 STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
217 STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
218 STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
219 STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
220 STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
221 STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
222 STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
223 STAT_MIB_RX("rx_pkts", mib.rx.pkt),
224 STAT_MIB_RX("rx_bytes", mib.rx.bytes),
225 STAT_MIB_RX("rx_multicast", mib.rx.mca),
226 STAT_MIB_RX("rx_broadcast", mib.rx.bca),
227 STAT_MIB_RX("rx_fcs", mib.rx.fcs),
228 STAT_MIB_RX("rx_control", mib.rx.cf),
229 STAT_MIB_RX("rx_pause", mib.rx.pf),
230 STAT_MIB_RX("rx_unknown", mib.rx.uo),
231 STAT_MIB_RX("rx_align", mib.rx.aln),
232 STAT_MIB_RX("rx_outrange", mib.rx.flr),
233 STAT_MIB_RX("rx_code", mib.rx.cde),
234 STAT_MIB_RX("rx_carrier", mib.rx.fcr),
235 STAT_MIB_RX("rx_oversize", mib.rx.ovr),
236 STAT_MIB_RX("rx_jabber", mib.rx.jbr),
237 STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
238 STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
239 STAT_MIB_RX("rx_unicast", mib.rx.uc),
240 STAT_MIB_RX("rx_ppp", mib.rx.ppp),
241 STAT_MIB_RX("rx_crc", mib.rx.rcrc),
242 /* UniMAC TSV counters */
243 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
244 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
245 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
246 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
247 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
248 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
249 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
250 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
251 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
252 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
253 STAT_MIB_TX("tx_pkts", mib.tx.pkts),
254 STAT_MIB_TX("tx_multicast", mib.tx.mca),
255 STAT_MIB_TX("tx_broadcast", mib.tx.bca),
256 STAT_MIB_TX("tx_pause", mib.tx.pf),
257 STAT_MIB_TX("tx_control", mib.tx.cf),
258 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
259 STAT_MIB_TX("tx_oversize", mib.tx.ovr),
260 STAT_MIB_TX("tx_defer", mib.tx.drf),
261 STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
262 STAT_MIB_TX("tx_single_col", mib.tx.scl),
263 STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
264 STAT_MIB_TX("tx_late_col", mib.tx.lcl),
265 STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
266 STAT_MIB_TX("tx_frags", mib.tx.frg),
267 STAT_MIB_TX("tx_total_col", mib.tx.ncl),
268 STAT_MIB_TX("tx_jabber", mib.tx.jbr),
269 STAT_MIB_TX("tx_bytes", mib.tx.bytes),
270 STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
271 STAT_MIB_TX("tx_unicast", mib.tx.uc),
272 /* UniMAC RUNT counters */
273 STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
274 STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
275 STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
276 STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
277 /* RXCHK misc statistics */
278 STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
279 STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700280 RXCHK_OTHER_DISC_CNTR),
Florian Fainelli80105be2014-04-24 18:08:57 -0700281 /* RBUF misc statistics */
282 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
283 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
Florian Fainelli55ff4ea2015-02-28 18:09:17 -0800284 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
285 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
286 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
Florian Fainelli30defeb2017-03-23 10:36:46 -0700287 /* Per TX-queue statistics are dynamically appended */
Florian Fainelli80105be2014-04-24 18:08:57 -0700288};
289
290#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
291
292static void bcm_sysport_get_drvinfo(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700293 struct ethtool_drvinfo *info)
Florian Fainelli80105be2014-04-24 18:08:57 -0700294{
295 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
296 strlcpy(info->version, "0.1", sizeof(info->version));
297 strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
Florian Fainelli80105be2014-04-24 18:08:57 -0700298}
299
300static u32 bcm_sysport_get_msglvl(struct net_device *dev)
301{
302 struct bcm_sysport_priv *priv = netdev_priv(dev);
303
304 return priv->msg_enable;
305}
306
307static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
308{
309 struct bcm_sysport_priv *priv = netdev_priv(dev);
310
311 priv->msg_enable = enable;
312}
313
Florian Fainelli44a45242017-01-20 11:08:27 -0800314static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
315{
316 switch (type) {
317 case BCM_SYSPORT_STAT_NETDEV:
318 case BCM_SYSPORT_STAT_RXCHK:
319 case BCM_SYSPORT_STAT_RBUF:
320 case BCM_SYSPORT_STAT_SOFT:
321 return true;
322 default:
323 return false;
324 }
325}
326
Florian Fainelli80105be2014-04-24 18:08:57 -0700327static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
328{
Florian Fainelli44a45242017-01-20 11:08:27 -0800329 struct bcm_sysport_priv *priv = netdev_priv(dev);
330 const struct bcm_sysport_stats *s;
331 unsigned int i, j;
332
Florian Fainelli80105be2014-04-24 18:08:57 -0700333 switch (string_set) {
334 case ETH_SS_STATS:
Florian Fainelli44a45242017-01-20 11:08:27 -0800335 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
336 s = &bcm_sysport_gstrings_stats[i];
337 if (priv->is_lite &&
338 !bcm_sysport_lite_stat_valid(s->type))
339 continue;
340 j++;
341 }
Florian Fainelli30defeb2017-03-23 10:36:46 -0700342 /* Include per-queue statistics */
343 return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
Florian Fainelli80105be2014-04-24 18:08:57 -0700344 default:
345 return -EOPNOTSUPP;
346 }
347}
348
349static void bcm_sysport_get_strings(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700350 u32 stringset, u8 *data)
Florian Fainelli80105be2014-04-24 18:08:57 -0700351{
Florian Fainelli44a45242017-01-20 11:08:27 -0800352 struct bcm_sysport_priv *priv = netdev_priv(dev);
353 const struct bcm_sysport_stats *s;
Florian Fainelli30defeb2017-03-23 10:36:46 -0700354 char buf[128];
Florian Fainelli44a45242017-01-20 11:08:27 -0800355 int i, j;
Florian Fainelli80105be2014-04-24 18:08:57 -0700356
357 switch (stringset) {
358 case ETH_SS_STATS:
Florian Fainelli44a45242017-01-20 11:08:27 -0800359 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
360 s = &bcm_sysport_gstrings_stats[i];
361 if (priv->is_lite &&
362 !bcm_sysport_lite_stat_valid(s->type))
363 continue;
364
365 memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700366 ETH_GSTRING_LEN);
Florian Fainelli44a45242017-01-20 11:08:27 -0800367 j++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700368 }
Florian Fainelli30defeb2017-03-23 10:36:46 -0700369
370 for (i = 0; i < dev->num_tx_queues; i++) {
371 snprintf(buf, sizeof(buf), "txq%d_packets", i);
372 memcpy(data + j * ETH_GSTRING_LEN, buf,
373 ETH_GSTRING_LEN);
374 j++;
375
376 snprintf(buf, sizeof(buf), "txq%d_bytes", i);
377 memcpy(data + j * ETH_GSTRING_LEN, buf,
378 ETH_GSTRING_LEN);
379 j++;
380 }
Florian Fainelli80105be2014-04-24 18:08:57 -0700381 break;
382 default:
383 break;
384 }
385}
386
387static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
388{
389 int i, j = 0;
390
391 for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
392 const struct bcm_sysport_stats *s;
393 u8 offset = 0;
394 u32 val = 0;
395 char *p;
396
397 s = &bcm_sysport_gstrings_stats[i];
398 switch (s->type) {
399 case BCM_SYSPORT_STAT_NETDEV:
Florian Fainelli55ff4ea2015-02-28 18:09:17 -0800400 case BCM_SYSPORT_STAT_SOFT:
Florian Fainelli80105be2014-04-24 18:08:57 -0700401 continue;
402 case BCM_SYSPORT_STAT_MIB_RX:
403 case BCM_SYSPORT_STAT_MIB_TX:
404 case BCM_SYSPORT_STAT_RUNT:
Florian Fainelli44a45242017-01-20 11:08:27 -0800405 if (priv->is_lite)
406 continue;
407
Florian Fainelli80105be2014-04-24 18:08:57 -0700408 if (s->type != BCM_SYSPORT_STAT_MIB_RX)
409 offset = UMAC_MIB_STAT_OFFSET;
410 val = umac_readl(priv, UMAC_MIB_START + j + offset);
411 break;
412 case BCM_SYSPORT_STAT_RXCHK:
413 val = rxchk_readl(priv, s->reg_offset);
414 if (val == ~0)
415 rxchk_writel(priv, 0, s->reg_offset);
416 break;
417 case BCM_SYSPORT_STAT_RBUF:
418 val = rbuf_readl(priv, s->reg_offset);
419 if (val == ~0)
420 rbuf_writel(priv, 0, s->reg_offset);
421 break;
422 }
423
424 j += s->stat_sizeof;
425 p = (char *)priv + s->stat_offset;
426 *(u32 *)p = val;
427 }
428
429 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
430}
431
432static void bcm_sysport_get_stats(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700433 struct ethtool_stats *stats, u64 *data)
Florian Fainelli80105be2014-04-24 18:08:57 -0700434{
435 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli30defeb2017-03-23 10:36:46 -0700436 struct bcm_sysport_tx_ring *ring;
Florian Fainelli44a45242017-01-20 11:08:27 -0800437 int i, j;
Florian Fainelli80105be2014-04-24 18:08:57 -0700438
439 if (netif_running(dev))
440 bcm_sysport_update_mib_counters(priv);
441
Florian Fainelli44a45242017-01-20 11:08:27 -0800442 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700443 const struct bcm_sysport_stats *s;
444 char *p;
445
446 s = &bcm_sysport_gstrings_stats[i];
447 if (s->type == BCM_SYSPORT_STAT_NETDEV)
448 p = (char *)&dev->stats;
449 else
450 p = (char *)priv;
451 p += s->stat_offset;
Florian Fainelli44a45242017-01-20 11:08:27 -0800452 data[j] = *(unsigned long *)p;
453 j++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700454 }
Florian Fainelli30defeb2017-03-23 10:36:46 -0700455
456 /* For SYSTEMPORT Lite since we have holes in our statistics, j would
457 * be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
458 * needs to point to how many total statistics we have minus the
459 * number of per TX queue statistics
460 */
461 j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
462 dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
463
464 for (i = 0; i < dev->num_tx_queues; i++) {
465 ring = &priv->tx_rings[i];
466 data[j] = ring->packets;
467 j++;
468 data[j] = ring->bytes;
469 j++;
470 }
Florian Fainelli80105be2014-04-24 18:08:57 -0700471}
472
Florian Fainelli83e82f42014-07-01 21:08:40 -0700473static void bcm_sysport_get_wol(struct net_device *dev,
474 struct ethtool_wolinfo *wol)
475{
476 struct bcm_sysport_priv *priv = netdev_priv(dev);
477 u32 reg;
478
479 wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
480 wol->wolopts = priv->wolopts;
481
482 if (!(priv->wolopts & WAKE_MAGICSECURE))
483 return;
484
485 /* Return the programmed SecureOn password */
486 reg = umac_readl(priv, UMAC_PSW_MS);
487 put_unaligned_be16(reg, &wol->sopass[0]);
488 reg = umac_readl(priv, UMAC_PSW_LS);
489 put_unaligned_be32(reg, &wol->sopass[2]);
490}
491
492static int bcm_sysport_set_wol(struct net_device *dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700493 struct ethtool_wolinfo *wol)
Florian Fainelli83e82f42014-07-01 21:08:40 -0700494{
495 struct bcm_sysport_priv *priv = netdev_priv(dev);
496 struct device *kdev = &priv->pdev->dev;
497 u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
498
499 if (!device_can_wakeup(kdev))
500 return -ENOTSUPP;
501
502 if (wol->wolopts & ~supported)
503 return -EINVAL;
504
505 /* Program the SecureOn password */
506 if (wol->wolopts & WAKE_MAGICSECURE) {
507 umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700508 UMAC_PSW_MS);
Florian Fainelli83e82f42014-07-01 21:08:40 -0700509 umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700510 UMAC_PSW_LS);
Florian Fainelli83e82f42014-07-01 21:08:40 -0700511 }
512
513 /* Flag the device and relevant IRQ as wakeup capable */
514 if (wol->wolopts) {
515 device_set_wakeup_enable(kdev, 1);
Florian Fainelli61b423a2014-10-10 10:51:54 -0700516 if (priv->wol_irq_disabled)
517 enable_irq_wake(priv->wol_irq);
Florian Fainelli83e82f42014-07-01 21:08:40 -0700518 priv->wol_irq_disabled = 0;
519 } else {
520 device_set_wakeup_enable(kdev, 0);
521 /* Avoid unbalanced disable_irq_wake calls */
522 if (!priv->wol_irq_disabled)
523 disable_irq_wake(priv->wol_irq);
524 priv->wol_irq_disabled = 1;
525 }
526
527 priv->wolopts = wol->wolopts;
528
529 return 0;
530}
531
Florian Fainellib1a15e82015-05-11 15:12:41 -0700532static int bcm_sysport_get_coalesce(struct net_device *dev,
533 struct ethtool_coalesce *ec)
534{
535 struct bcm_sysport_priv *priv = netdev_priv(dev);
536 u32 reg;
537
538 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
539
540 ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
541 ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
542
Florian Fainellid0634862015-05-11 15:12:42 -0700543 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
544
545 ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
546 ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
547
Florian Fainellib1a15e82015-05-11 15:12:41 -0700548 return 0;
549}
550
551static int bcm_sysport_set_coalesce(struct net_device *dev,
552 struct ethtool_coalesce *ec)
553{
554 struct bcm_sysport_priv *priv = netdev_priv(dev);
555 unsigned int i;
556 u32 reg;
557
Florian Fainellid0634862015-05-11 15:12:42 -0700558 /* Base system clock is 125Mhz, DMA timeout is this reference clock
559 * divided by 1024, which yield roughly 8.192 us, our maximum value has
560 * to fit in the RING_TIMEOUT_MASK (16 bits).
Florian Fainellib1a15e82015-05-11 15:12:41 -0700561 */
562 if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
Florian Fainellid0634862015-05-11 15:12:42 -0700563 ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
564 ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
565 ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
Florian Fainellib1a15e82015-05-11 15:12:41 -0700566 return -EINVAL;
567
Florian Fainellid0634862015-05-11 15:12:42 -0700568 if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
569 (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
Florian Fainellib1a15e82015-05-11 15:12:41 -0700570 return -EINVAL;
571
572 for (i = 0; i < dev->num_tx_queues; i++) {
573 reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
574 reg &= ~(RING_INTR_THRESH_MASK |
575 RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
576 reg |= ec->tx_max_coalesced_frames;
577 reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
578 RING_TIMEOUT_SHIFT;
579 tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
580 }
581
Florian Fainellid0634862015-05-11 15:12:42 -0700582 reg = rdma_readl(priv, RDMA_MBDONE_INTR);
583 reg &= ~(RDMA_INTR_THRESH_MASK |
584 RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
585 reg |= ec->rx_max_coalesced_frames;
586 reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
587 RDMA_TIMEOUT_SHIFT;
588 rdma_writel(priv, reg, RDMA_MBDONE_INTR);
589
Florian Fainellib1a15e82015-05-11 15:12:41 -0700590 return 0;
591}
592
Florian Fainelli80105be2014-04-24 18:08:57 -0700593static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
594{
595 dev_kfree_skb_any(cb->skb);
596 cb->skb = NULL;
597 dma_unmap_addr_set(cb, dma_addr, 0);
598}
599
Florian Fainellic73b0182015-05-28 15:24:43 -0700600static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
601 struct bcm_sysport_cb *cb)
Florian Fainelli80105be2014-04-24 18:08:57 -0700602{
603 struct device *kdev = &priv->pdev->dev;
604 struct net_device *ndev = priv->netdev;
Florian Fainellic73b0182015-05-28 15:24:43 -0700605 struct sk_buff *skb, *rx_skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700606 dma_addr_t mapping;
Florian Fainelli80105be2014-04-24 18:08:57 -0700607
Florian Fainellic73b0182015-05-28 15:24:43 -0700608 /* Allocate a new SKB for a new packet */
609 skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
610 if (!skb) {
611 priv->mib.alloc_rx_buff_failed++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700612 netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
Florian Fainellic73b0182015-05-28 15:24:43 -0700613 return NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -0700614 }
615
Florian Fainellic73b0182015-05-28 15:24:43 -0700616 mapping = dma_map_single(kdev, skb->data,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700617 RX_BUF_LENGTH, DMA_FROM_DEVICE);
Florian Fainellic73b0182015-05-28 15:24:43 -0700618 if (dma_mapping_error(kdev, mapping)) {
Florian Fainelli60b4ea12014-11-19 10:29:55 -0800619 priv->mib.rx_dma_failed++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700620 dev_kfree_skb_any(skb);
Florian Fainelli80105be2014-04-24 18:08:57 -0700621 netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
Florian Fainellic73b0182015-05-28 15:24:43 -0700622 return NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -0700623 }
624
Florian Fainellic73b0182015-05-28 15:24:43 -0700625 /* Grab the current SKB on the ring */
626 rx_skb = cb->skb;
627 if (likely(rx_skb))
628 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
629 RX_BUF_LENGTH, DMA_FROM_DEVICE);
630
631 /* Put the new SKB on the ring */
632 cb->skb = skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700633 dma_unmap_addr_set(cb, dma_addr, mapping);
Florian Fainellibaf387a2015-05-28 15:24:42 -0700634 dma_desc_set_addr(priv, cb->bd_addr, mapping);
Florian Fainelli80105be2014-04-24 18:08:57 -0700635
636 netif_dbg(priv, rx_status, ndev, "RX refill\n");
637
Florian Fainellic73b0182015-05-28 15:24:43 -0700638 /* Return the current SKB to the caller */
639 return rx_skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700640}
641
642static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
643{
644 struct bcm_sysport_cb *cb;
Florian Fainellic73b0182015-05-28 15:24:43 -0700645 struct sk_buff *skb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700646 unsigned int i;
647
648 for (i = 0; i < priv->num_rx_bds; i++) {
Florian Fainellibaf387a2015-05-28 15:24:42 -0700649 cb = &priv->rx_cbs[i];
Florian Fainellic73b0182015-05-28 15:24:43 -0700650 skb = bcm_sysport_rx_refill(priv, cb);
651 if (skb)
652 dev_kfree_skb(skb);
653 if (!cb->skb)
654 return -ENOMEM;
Florian Fainelli80105be2014-04-24 18:08:57 -0700655 }
656
Florian Fainellic73b0182015-05-28 15:24:43 -0700657 return 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700658}
659
660/* Poll the hardware for up to budget packets to process */
661static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
662 unsigned int budget)
663{
Florian Fainelli80105be2014-04-24 18:08:57 -0700664 struct net_device *ndev = priv->netdev;
665 unsigned int processed = 0, to_process;
666 struct bcm_sysport_cb *cb;
667 struct sk_buff *skb;
668 unsigned int p_index;
669 u16 len, status;
Paul Gortmaker3afc5572014-05-30 15:39:30 -0400670 struct bcm_rsb *rsb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700671
Florian Fainelli44a45242017-01-20 11:08:27 -0800672 /* Determine how much we should process since last call, SYSTEMPORT Lite
673 * groups the producer and consumer indexes into the same 32-bit
674 * which we access using RDMA_CONS_INDEX
675 */
676 if (!priv->is_lite)
677 p_index = rdma_readl(priv, RDMA_PROD_INDEX);
678 else
679 p_index = rdma_readl(priv, RDMA_CONS_INDEX);
Florian Fainelli80105be2014-04-24 18:08:57 -0700680 p_index &= RDMA_PROD_INDEX_MASK;
681
682 if (p_index < priv->rx_c_index)
683 to_process = (RDMA_CONS_INDEX_MASK + 1) -
684 priv->rx_c_index + p_index;
685 else
686 to_process = p_index - priv->rx_c_index;
687
688 netif_dbg(priv, rx_status, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700689 "p_index=%d rx_c_index=%d to_process=%d\n",
690 p_index, priv->rx_c_index, to_process);
Florian Fainelli80105be2014-04-24 18:08:57 -0700691
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700692 while ((processed < to_process) && (processed < budget)) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700693 cb = &priv->rx_cbs[priv->rx_read_ptr];
Florian Fainellic73b0182015-05-28 15:24:43 -0700694 skb = bcm_sysport_rx_refill(priv, cb);
Florian Fainellife24ba02014-09-08 11:37:51 -0700695
Florian Fainellife24ba02014-09-08 11:37:51 -0700696
697 /* We do not have a backing SKB, so we do not a corresponding
698 * DMA mapping for this incoming packet since
699 * bcm_sysport_rx_refill always either has both skb and mapping
700 * or none.
701 */
702 if (unlikely(!skb)) {
703 netif_err(priv, rx_err, ndev, "out of memory!\n");
704 ndev->stats.rx_dropped++;
705 ndev->stats.rx_errors++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700706 goto next;
Florian Fainellife24ba02014-09-08 11:37:51 -0700707 }
708
Florian Fainelli80105be2014-04-24 18:08:57 -0700709 /* Extract the Receive Status Block prepended */
Paul Gortmaker3afc5572014-05-30 15:39:30 -0400710 rsb = (struct bcm_rsb *)skb->data;
Florian Fainelli80105be2014-04-24 18:08:57 -0700711 len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
712 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700713 DESC_STATUS_MASK;
Florian Fainelli80105be2014-04-24 18:08:57 -0700714
Florian Fainelli80105be2014-04-24 18:08:57 -0700715 netif_dbg(priv, rx_status, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700716 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
717 p_index, priv->rx_c_index, priv->rx_read_ptr,
718 len, status);
Florian Fainelli80105be2014-04-24 18:08:57 -0700719
Florian Fainelli25977ac2015-05-28 15:24:44 -0700720 if (unlikely(len > RX_BUF_LENGTH)) {
721 netif_err(priv, rx_status, ndev, "oversized packet\n");
722 ndev->stats.rx_length_errors++;
723 ndev->stats.rx_errors++;
724 dev_kfree_skb_any(skb);
725 goto next;
726 }
727
Florian Fainelli80105be2014-04-24 18:08:57 -0700728 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
729 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
730 ndev->stats.rx_dropped++;
731 ndev->stats.rx_errors++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700732 dev_kfree_skb_any(skb);
733 goto next;
Florian Fainelli80105be2014-04-24 18:08:57 -0700734 }
735
736 if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
737 netif_err(priv, rx_err, ndev, "error packet\n");
Florian Fainelliad51c612014-06-05 10:22:16 -0700738 if (status & RX_STATUS_OVFLOW)
Florian Fainelli80105be2014-04-24 18:08:57 -0700739 ndev->stats.rx_over_errors++;
740 ndev->stats.rx_dropped++;
741 ndev->stats.rx_errors++;
Florian Fainellic73b0182015-05-28 15:24:43 -0700742 dev_kfree_skb_any(skb);
743 goto next;
Florian Fainelli80105be2014-04-24 18:08:57 -0700744 }
745
746 skb_put(skb, len);
747
748 /* Hardware validated our checksum */
749 if (likely(status & DESC_L4_CSUM))
750 skb->ip_summed = CHECKSUM_UNNECESSARY;
751
Florian Fainellie0ea05d2014-06-05 10:22:17 -0700752 /* Hardware pre-pends packets with 2bytes before Ethernet
753 * header plus we have the Receive Status Block, strip off all
754 * of this from the SKB.
Florian Fainelli80105be2014-04-24 18:08:57 -0700755 */
756 skb_pull(skb, sizeof(*rsb) + 2);
757 len -= (sizeof(*rsb) + 2);
758
759 /* UniMAC may forward CRC */
760 if (priv->crc_fwd) {
761 skb_trim(skb, len - ETH_FCS_LEN);
762 len -= ETH_FCS_LEN;
763 }
764
765 skb->protocol = eth_type_trans(skb, ndev);
766 ndev->stats.rx_packets++;
767 ndev->stats.rx_bytes += len;
768
769 napi_gro_receive(&priv->napi, skb);
Florian Fainellic73b0182015-05-28 15:24:43 -0700770next:
771 processed++;
772 priv->rx_read_ptr++;
773
774 if (priv->rx_read_ptr == priv->num_rx_bds)
775 priv->rx_read_ptr = 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700776 }
777
778 return processed;
779}
780
Florian Fainelli30defeb2017-03-23 10:36:46 -0700781static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700782 struct bcm_sysport_cb *cb,
783 unsigned int *bytes_compl,
784 unsigned int *pkts_compl)
Florian Fainelli80105be2014-04-24 18:08:57 -0700785{
Florian Fainelli30defeb2017-03-23 10:36:46 -0700786 struct bcm_sysport_priv *priv = ring->priv;
Florian Fainelli80105be2014-04-24 18:08:57 -0700787 struct device *kdev = &priv->pdev->dev;
Florian Fainelli80105be2014-04-24 18:08:57 -0700788
789 if (cb->skb) {
Florian Fainelli30defeb2017-03-23 10:36:46 -0700790 ring->bytes += cb->skb->len;
Florian Fainelli80105be2014-04-24 18:08:57 -0700791 *bytes_compl += cb->skb->len;
792 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700793 dma_unmap_len(cb, dma_len),
794 DMA_TO_DEVICE);
Florian Fainelli30defeb2017-03-23 10:36:46 -0700795 ring->packets++;
Florian Fainelli80105be2014-04-24 18:08:57 -0700796 (*pkts_compl)++;
797 bcm_sysport_free_cb(cb);
798 /* SKB fragment */
799 } else if (dma_unmap_addr(cb, dma_addr)) {
Florian Fainelli30defeb2017-03-23 10:36:46 -0700800 ring->bytes += dma_unmap_len(cb, dma_len);
Florian Fainelli80105be2014-04-24 18:08:57 -0700801 dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700802 dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -0700803 dma_unmap_addr_set(cb, dma_addr, 0);
804 }
805}
806
807/* Reclaim queued SKBs for transmission completion, lockless version */
808static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
809 struct bcm_sysport_tx_ring *ring)
810{
811 struct net_device *ndev = priv->netdev;
812 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
813 unsigned int pkts_compl = 0, bytes_compl = 0;
814 struct bcm_sysport_cb *cb;
Florian Fainelli80105be2014-04-24 18:08:57 -0700815 u32 hw_ind;
816
Florian Fainelli80105be2014-04-24 18:08:57 -0700817 /* Compute how many descriptors have been processed since last call */
818 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
819 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
820 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
821
822 last_c_index = ring->c_index;
823 num_tx_cbs = ring->size;
824
825 c_index &= (num_tx_cbs - 1);
826
827 if (c_index >= last_c_index)
828 last_tx_cn = c_index - last_c_index;
829 else
830 last_tx_cn = num_tx_cbs - last_c_index + c_index;
831
832 netif_dbg(priv, tx_done, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700833 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
834 ring->index, c_index, last_tx_cn, last_c_index);
Florian Fainelli80105be2014-04-24 18:08:57 -0700835
836 while (last_tx_cn-- > 0) {
837 cb = ring->cbs + last_c_index;
Florian Fainelli30defeb2017-03-23 10:36:46 -0700838 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
Florian Fainelli80105be2014-04-24 18:08:57 -0700839
840 ring->desc_count++;
841 last_c_index++;
842 last_c_index &= (num_tx_cbs - 1);
843 }
844
845 ring->c_index = c_index;
846
Florian Fainelli80105be2014-04-24 18:08:57 -0700847 netif_dbg(priv, tx_done, ndev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -0700848 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
849 ring->index, ring->c_index, pkts_compl, bytes_compl);
Florian Fainelli80105be2014-04-24 18:08:57 -0700850
851 return pkts_compl;
852}
853
854/* Locked version of the per-ring TX reclaim routine */
855static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
856 struct bcm_sysport_tx_ring *ring)
857{
Florian Fainelli148d3d02017-01-12 12:09:09 -0800858 struct netdev_queue *txq;
Florian Fainelli80105be2014-04-24 18:08:57 -0700859 unsigned int released;
Florian Fainellid8498082014-06-05 10:22:15 -0700860 unsigned long flags;
Florian Fainelli80105be2014-04-24 18:08:57 -0700861
Florian Fainelli148d3d02017-01-12 12:09:09 -0800862 txq = netdev_get_tx_queue(priv->netdev, ring->index);
863
Florian Fainellid8498082014-06-05 10:22:15 -0700864 spin_lock_irqsave(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -0700865 released = __bcm_sysport_tx_reclaim(priv, ring);
Florian Fainelli148d3d02017-01-12 12:09:09 -0800866 if (released)
867 netif_tx_wake_queue(txq);
868
Florian Fainellid8498082014-06-05 10:22:15 -0700869 spin_unlock_irqrestore(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -0700870
871 return released;
872}
873
Florian Fainelli148d3d02017-01-12 12:09:09 -0800874/* Locked version of the per-ring TX reclaim, but does not wake the queue */
875static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
876 struct bcm_sysport_tx_ring *ring)
877{
878 unsigned long flags;
879
880 spin_lock_irqsave(&ring->lock, flags);
881 __bcm_sysport_tx_reclaim(priv, ring);
882 spin_unlock_irqrestore(&ring->lock, flags);
883}
884
Florian Fainelli80105be2014-04-24 18:08:57 -0700885static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
886{
887 struct bcm_sysport_tx_ring *ring =
888 container_of(napi, struct bcm_sysport_tx_ring, napi);
889 unsigned int work_done = 0;
890
891 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
892
Florian Fainelli16f62d92014-06-26 10:06:46 -0700893 if (work_done == 0) {
Florian Fainelli80105be2014-04-24 18:08:57 -0700894 napi_complete(napi);
895 /* re-enable TX interrupt */
Florian Fainelli44a45242017-01-20 11:08:27 -0800896 if (!ring->priv->is_lite)
897 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
898 else
899 intrl2_0_mask_clear(ring->priv, BIT(ring->index +
900 INTRL2_0_TDMA_MBDONE_SHIFT));
Florian Fainelli9dfa9a22014-11-12 15:40:43 -0800901
902 return 0;
Florian Fainelli80105be2014-04-24 18:08:57 -0700903 }
904
Florian Fainelli9dfa9a22014-11-12 15:40:43 -0800905 return budget;
Florian Fainelli80105be2014-04-24 18:08:57 -0700906}
907
908static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
909{
910 unsigned int q;
911
912 for (q = 0; q < priv->netdev->num_tx_queues; q++)
913 bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
914}
915
916static int bcm_sysport_poll(struct napi_struct *napi, int budget)
917{
918 struct bcm_sysport_priv *priv =
919 container_of(napi, struct bcm_sysport_priv, napi);
920 unsigned int work_done = 0;
921
922 work_done = bcm_sysport_desc_rx(priv, budget);
923
924 priv->rx_c_index += work_done;
925 priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
Florian Fainelli44a45242017-01-20 11:08:27 -0800926
927 /* SYSTEMPORT Lite groups the producer/consumer index, producer is
928 * maintained by HW, but writes to it will be ignore while RDMA
929 * is active
930 */
931 if (!priv->is_lite)
932 rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
933 else
934 rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
Florian Fainelli80105be2014-04-24 18:08:57 -0700935
936 if (work_done < budget) {
Florian Fainellic82f47e2016-04-20 11:37:09 -0700937 napi_complete_done(napi, work_done);
Florian Fainelli80105be2014-04-24 18:08:57 -0700938 /* re-enable RX interrupts */
939 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
940 }
941
942 return work_done;
943}
944
Florian Fainelli83e82f42014-07-01 21:08:40 -0700945static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
946{
947 u32 reg;
948
949 /* Stop monitoring MPD interrupt */
950 intrl2_0_mask_set(priv, INTRL2_0_MPD);
951
952 /* Clear the MagicPacket detection logic */
953 reg = umac_readl(priv, UMAC_MPD_CTRL);
954 reg &= ~MPD_EN;
955 umac_writel(priv, reg, UMAC_MPD_CTRL);
956
957 netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
958}
Florian Fainelli80105be2014-04-24 18:08:57 -0700959
960/* RX and misc interrupt routine */
961static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
962{
963 struct net_device *dev = dev_id;
964 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli44a45242017-01-20 11:08:27 -0800965 struct bcm_sysport_tx_ring *txr;
966 unsigned int ring, ring_bit;
Florian Fainelli80105be2014-04-24 18:08:57 -0700967
968 priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
969 ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
970 intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
971
972 if (unlikely(priv->irq0_stat == 0)) {
973 netdev_warn(priv->netdev, "spurious RX interrupt\n");
974 return IRQ_NONE;
975 }
976
977 if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
978 if (likely(napi_schedule_prep(&priv->napi))) {
979 /* disable RX interrupts */
980 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
Florian Fainelliba909502016-04-20 11:37:08 -0700981 __napi_schedule_irqoff(&priv->napi);
Florian Fainelli80105be2014-04-24 18:08:57 -0700982 }
983 }
984
985 /* TX ring is full, perform a full reclaim since we do not know
986 * which one would trigger this interrupt
987 */
988 if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
989 bcm_sysport_tx_reclaim_all(priv);
990
Florian Fainelli83e82f42014-07-01 21:08:40 -0700991 if (priv->irq0_stat & INTRL2_0_MPD) {
992 netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
993 bcm_sysport_resume_from_wol(priv);
994 }
995
Florian Fainelli44a45242017-01-20 11:08:27 -0800996 if (!priv->is_lite)
997 goto out;
998
999 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1000 ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
1001 if (!(priv->irq0_stat & ring_bit))
1002 continue;
1003
1004 txr = &priv->tx_rings[ring];
1005
1006 if (likely(napi_schedule_prep(&txr->napi))) {
1007 intrl2_0_mask_set(priv, ring_bit);
1008 __napi_schedule(&txr->napi);
1009 }
1010 }
1011out:
Florian Fainelli80105be2014-04-24 18:08:57 -07001012 return IRQ_HANDLED;
1013}
1014
1015/* TX interrupt service routine */
1016static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
1017{
1018 struct net_device *dev = dev_id;
1019 struct bcm_sysport_priv *priv = netdev_priv(dev);
1020 struct bcm_sysport_tx_ring *txr;
1021 unsigned int ring;
1022
1023 priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
1024 ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
1025 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1026
1027 if (unlikely(priv->irq1_stat == 0)) {
1028 netdev_warn(priv->netdev, "spurious TX interrupt\n");
1029 return IRQ_NONE;
1030 }
1031
1032 for (ring = 0; ring < dev->num_tx_queues; ring++) {
1033 if (!(priv->irq1_stat & BIT(ring)))
1034 continue;
1035
1036 txr = &priv->tx_rings[ring];
1037
1038 if (likely(napi_schedule_prep(&txr->napi))) {
1039 intrl2_1_mask_set(priv, BIT(ring));
Florian Fainelliba909502016-04-20 11:37:08 -07001040 __napi_schedule_irqoff(&txr->napi);
Florian Fainelli80105be2014-04-24 18:08:57 -07001041 }
1042 }
1043
1044 return IRQ_HANDLED;
1045}
1046
Florian Fainelli83e82f42014-07-01 21:08:40 -07001047static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
1048{
1049 struct bcm_sysport_priv *priv = dev_id;
1050
1051 pm_wakeup_event(&priv->pdev->dev, 0);
1052
1053 return IRQ_HANDLED;
1054}
1055
Florian Fainelli6cec4f52015-07-31 11:42:55 -07001056#ifdef CONFIG_NET_POLL_CONTROLLER
1057static void bcm_sysport_poll_controller(struct net_device *dev)
1058{
1059 struct bcm_sysport_priv *priv = netdev_priv(dev);
1060
1061 disable_irq(priv->irq0);
1062 bcm_sysport_rx_isr(priv->irq0, priv);
1063 enable_irq(priv->irq0);
1064
Florian Fainelli44a45242017-01-20 11:08:27 -08001065 if (!priv->is_lite) {
1066 disable_irq(priv->irq1);
1067 bcm_sysport_tx_isr(priv->irq1, priv);
1068 enable_irq(priv->irq1);
1069 }
Florian Fainelli6cec4f52015-07-31 11:42:55 -07001070}
1071#endif
1072
Florian Fainellie87474a2014-10-02 09:43:16 -07001073static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
1074 struct net_device *dev)
Florian Fainelli80105be2014-04-24 18:08:57 -07001075{
1076 struct sk_buff *nskb;
Paul Gortmaker3afc5572014-05-30 15:39:30 -04001077 struct bcm_tsb *tsb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001078 u32 csum_info;
1079 u8 ip_proto;
1080 u16 csum_start;
1081 u16 ip_ver;
1082
1083 /* Re-allocate SKB if needed */
1084 if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
1085 nskb = skb_realloc_headroom(skb, sizeof(*tsb));
1086 dev_kfree_skb(skb);
1087 if (!nskb) {
1088 dev->stats.tx_errors++;
1089 dev->stats.tx_dropped++;
Florian Fainellie87474a2014-10-02 09:43:16 -07001090 return NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -07001091 }
1092 skb = nskb;
1093 }
1094
Paul Gortmaker3afc5572014-05-30 15:39:30 -04001095 tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
Florian Fainelli80105be2014-04-24 18:08:57 -07001096 /* Zero-out TSB by default */
1097 memset(tsb, 0, sizeof(*tsb));
1098
1099 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1100 ip_ver = htons(skb->protocol);
1101 switch (ip_ver) {
1102 case ETH_P_IP:
1103 ip_proto = ip_hdr(skb)->protocol;
1104 break;
1105 case ETH_P_IPV6:
1106 ip_proto = ipv6_hdr(skb)->nexthdr;
1107 break;
1108 default:
Florian Fainellie87474a2014-10-02 09:43:16 -07001109 return skb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001110 }
1111
1112 /* Get the checksum offset and the L4 (transport) offset */
1113 csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
1114 csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
1115 csum_info |= (csum_start << L4_PTR_SHIFT);
1116
1117 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1118 csum_info |= L4_LENGTH_VALID;
1119 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1120 csum_info |= L4_UDP;
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001121 } else {
Florian Fainelli80105be2014-04-24 18:08:57 -07001122 csum_info = 0;
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001123 }
Florian Fainelli80105be2014-04-24 18:08:57 -07001124
1125 tsb->l4_ptr_dest_map = csum_info;
1126 }
1127
Florian Fainellie87474a2014-10-02 09:43:16 -07001128 return skb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001129}
1130
1131static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
1132 struct net_device *dev)
1133{
1134 struct bcm_sysport_priv *priv = netdev_priv(dev);
1135 struct device *kdev = &priv->pdev->dev;
1136 struct bcm_sysport_tx_ring *ring;
1137 struct bcm_sysport_cb *cb;
1138 struct netdev_queue *txq;
1139 struct dma_desc *desc;
Florian Fainellidab531b2014-05-14 19:32:14 -07001140 unsigned int skb_len;
Florian Fainellid8498082014-06-05 10:22:15 -07001141 unsigned long flags;
Florian Fainelli80105be2014-04-24 18:08:57 -07001142 dma_addr_t mapping;
1143 u32 len_status;
1144 u16 queue;
1145 int ret;
1146
1147 queue = skb_get_queue_mapping(skb);
1148 txq = netdev_get_tx_queue(dev, queue);
1149 ring = &priv->tx_rings[queue];
1150
Florian Fainellid8498082014-06-05 10:22:15 -07001151 /* lock against tx reclaim in BH context and TX ring full interrupt */
1152 spin_lock_irqsave(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -07001153 if (unlikely(ring->desc_count == 0)) {
1154 netif_tx_stop_queue(txq);
1155 netdev_err(dev, "queue %d awake and ring full!\n", queue);
1156 ret = NETDEV_TX_BUSY;
1157 goto out;
1158 }
1159
Florian Fainellidab531b2014-05-14 19:32:14 -07001160 /* The Ethernet switch we are interfaced with needs packets to be at
1161 * least 64 bytes (including FCS) otherwise they will be discarded when
1162 * they enter the switch port logic. When Broadcom tags are enabled, we
1163 * need to make sure that packets are at least 68 bytes
1164 * (including FCS and tag) because the length verification is done after
1165 * the Broadcom tag is stripped off the ingress packet.
1166 */
Florian Fainellibb7da332017-01-03 16:34:48 -08001167 if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
Florian Fainellidab531b2014-05-14 19:32:14 -07001168 ret = NETDEV_TX_OK;
1169 goto out;
1170 }
1171
Florian Fainelli38e5a852017-01-03 16:34:49 -08001172 /* Insert TSB and checksum infos */
1173 if (priv->tsb_en) {
1174 skb = bcm_sysport_insert_tsb(skb, dev);
1175 if (!skb) {
1176 ret = NETDEV_TX_OK;
1177 goto out;
1178 }
1179 }
1180
Florian Fainellibb7da332017-01-03 16:34:48 -08001181 skb_len = skb->len;
Florian Fainellidab531b2014-05-14 19:32:14 -07001182
1183 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -07001184 if (dma_mapping_error(kdev, mapping)) {
Florian Fainelli60b4ea12014-11-19 10:29:55 -08001185 priv->mib.tx_dma_failed++;
Florian Fainelli80105be2014-04-24 18:08:57 -07001186 netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001187 skb->data, skb_len);
Florian Fainelli80105be2014-04-24 18:08:57 -07001188 ret = NETDEV_TX_OK;
1189 goto out;
1190 }
1191
1192 /* Remember the SKB for future freeing */
1193 cb = &ring->cbs[ring->curr_desc];
1194 cb->skb = skb;
1195 dma_unmap_addr_set(cb, dma_addr, mapping);
Florian Fainellidab531b2014-05-14 19:32:14 -07001196 dma_unmap_len_set(cb, dma_len, skb_len);
Florian Fainelli80105be2014-04-24 18:08:57 -07001197
1198 /* Fetch a descriptor entry from our pool */
1199 desc = ring->desc_cpu;
1200
1201 desc->addr_lo = lower_32_bits(mapping);
1202 len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
Florian Fainellidab531b2014-05-14 19:32:14 -07001203 len_status |= (skb_len << DESC_LEN_SHIFT);
Florian Fainelli80105be2014-04-24 18:08:57 -07001204 len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001205 DESC_STATUS_SHIFT;
Florian Fainelli80105be2014-04-24 18:08:57 -07001206 if (skb->ip_summed == CHECKSUM_PARTIAL)
1207 len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
1208
1209 ring->curr_desc++;
1210 if (ring->curr_desc == ring->size)
1211 ring->curr_desc = 0;
1212 ring->desc_count--;
1213
1214 /* Ensure write completion of the descriptor status/length
1215 * in DRAM before the System Port WRITE_PORT register latches
1216 * the value
1217 */
1218 wmb();
1219 desc->addr_status_len = len_status;
1220 wmb();
1221
1222 /* Write this descriptor address to the RING write port */
1223 tdma_port_write_desc_addr(priv, desc, ring->index);
1224
1225 /* Check ring space and update SW control flow */
1226 if (ring->desc_count == 0)
1227 netif_tx_stop_queue(txq);
1228
1229 netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001230 ring->index, ring->desc_count, ring->curr_desc);
Florian Fainelli80105be2014-04-24 18:08:57 -07001231
1232 ret = NETDEV_TX_OK;
1233out:
Florian Fainellid8498082014-06-05 10:22:15 -07001234 spin_unlock_irqrestore(&ring->lock, flags);
Florian Fainelli80105be2014-04-24 18:08:57 -07001235 return ret;
1236}
1237
1238static void bcm_sysport_tx_timeout(struct net_device *dev)
1239{
1240 netdev_warn(dev, "transmit timeout!\n");
1241
Florian Westphal860e9532016-05-03 16:33:13 +02001242 netif_trans_update(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001243 dev->stats.tx_errors++;
1244
1245 netif_tx_wake_all_queues(dev);
1246}
1247
1248/* phylib adjust link callback */
1249static void bcm_sysport_adj_link(struct net_device *dev)
1250{
1251 struct bcm_sysport_priv *priv = netdev_priv(dev);
Philippe Reynes715a0222016-06-19 20:39:08 +02001252 struct phy_device *phydev = dev->phydev;
Florian Fainelli80105be2014-04-24 18:08:57 -07001253 unsigned int changed = 0;
1254 u32 cmd_bits = 0, reg;
1255
1256 if (priv->old_link != phydev->link) {
1257 changed = 1;
1258 priv->old_link = phydev->link;
1259 }
1260
1261 if (priv->old_duplex != phydev->duplex) {
1262 changed = 1;
1263 priv->old_duplex = phydev->duplex;
1264 }
1265
Florian Fainelli44a45242017-01-20 11:08:27 -08001266 if (priv->is_lite)
1267 goto out;
1268
Florian Fainelli80105be2014-04-24 18:08:57 -07001269 switch (phydev->speed) {
1270 case SPEED_2500:
1271 cmd_bits = CMD_SPEED_2500;
1272 break;
1273 case SPEED_1000:
1274 cmd_bits = CMD_SPEED_1000;
1275 break;
1276 case SPEED_100:
1277 cmd_bits = CMD_SPEED_100;
1278 break;
1279 case SPEED_10:
1280 cmd_bits = CMD_SPEED_10;
1281 break;
1282 default:
1283 break;
1284 }
1285 cmd_bits <<= CMD_SPEED_SHIFT;
1286
1287 if (phydev->duplex == DUPLEX_HALF)
1288 cmd_bits |= CMD_HD_EN;
1289
1290 if (priv->old_pause != phydev->pause) {
1291 changed = 1;
1292 priv->old_pause = phydev->pause;
1293 }
1294
1295 if (!phydev->pause)
1296 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
1297
Florian Fainelli4a804c02014-09-02 11:17:07 -07001298 if (!changed)
1299 return;
1300
1301 if (phydev->link) {
Florian Fainellid5e32cc2014-05-14 19:32:13 -07001302 reg = umac_readl(priv, UMAC_CMD);
1303 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
Florian Fainelli80105be2014-04-24 18:08:57 -07001304 CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
1305 CMD_TX_PAUSE_IGNORE);
Florian Fainellid5e32cc2014-05-14 19:32:13 -07001306 reg |= cmd_bits;
1307 umac_writel(priv, reg, UMAC_CMD);
Florian Fainellid5e32cc2014-05-14 19:32:13 -07001308 }
Florian Fainelli44a45242017-01-20 11:08:27 -08001309out:
1310 if (changed)
1311 phy_print_status(phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001312}
1313
1314static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1315 unsigned int index)
1316{
1317 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1318 struct device *kdev = &priv->pdev->dev;
1319 size_t size;
1320 void *p;
1321 u32 reg;
1322
1323 /* Simple descriptors partitioning for now */
1324 size = 256;
1325
1326 /* We just need one DMA descriptor which is DMA-able, since writing to
1327 * the port will allocate a new descriptor in its internal linked-list
1328 */
Florian Fainelli3e8fc382014-10-31 15:51:34 -07001329 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1330 GFP_KERNEL);
Florian Fainelli80105be2014-04-24 18:08:57 -07001331 if (!p) {
1332 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1333 return -ENOMEM;
1334 }
1335
Florian Fainelli40a8a312014-07-09 17:36:47 -07001336 ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
Florian Fainelli80105be2014-04-24 18:08:57 -07001337 if (!ring->cbs) {
1338 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1339 return -ENOMEM;
1340 }
1341
1342 /* Initialize SW view of the ring */
1343 spin_lock_init(&ring->lock);
1344 ring->priv = priv;
Eric Dumazetd64b5e82015-11-18 06:31:00 -08001345 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
Florian Fainelli80105be2014-04-24 18:08:57 -07001346 ring->index = index;
1347 ring->size = size;
1348 ring->alloc_size = ring->size;
1349 ring->desc_cpu = p;
1350 ring->desc_count = ring->size;
1351 ring->curr_desc = 0;
1352
1353 /* Initialize HW ring */
1354 tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
1355 tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
1356 tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
1357 tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
1358 tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
1359 tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
1360
1361 /* Program the number of descriptors as MAX_THRESHOLD and half of
1362 * its size for the hysteresis trigger
1363 */
1364 tdma_writel(priv, ring->size |
1365 1 << RING_HYST_THRESH_SHIFT,
1366 TDMA_DESC_RING_MAX_HYST(index));
1367
1368 /* Enable the ring queue in the arbiter */
1369 reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
1370 reg |= (1 << index);
1371 tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
1372
1373 napi_enable(&ring->napi);
1374
1375 netif_dbg(priv, hw, priv->netdev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001376 "TDMA cfg, size=%d, desc_cpu=%p\n",
1377 ring->size, ring->desc_cpu);
Florian Fainelli80105be2014-04-24 18:08:57 -07001378
1379 return 0;
1380}
1381
1382static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001383 unsigned int index)
Florian Fainelli80105be2014-04-24 18:08:57 -07001384{
1385 struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
1386 struct device *kdev = &priv->pdev->dev;
1387 u32 reg;
1388
1389 /* Caller should stop the TDMA engine */
1390 reg = tdma_readl(priv, TDMA_STATUS);
1391 if (!(reg & TDMA_DISABLED))
1392 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1393
Florian Fainelli914adb52014-10-31 15:51:35 -07001394 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1395 * fail, so by checking this pointer we know whether the TX ring was
1396 * fully initialized or not.
1397 */
1398 if (!ring->cbs)
1399 return;
1400
Florian Fainelli80105be2014-04-24 18:08:57 -07001401 napi_disable(&ring->napi);
1402 netif_napi_del(&ring->napi);
1403
Florian Fainelli148d3d02017-01-12 12:09:09 -08001404 bcm_sysport_tx_clean(priv, ring);
Florian Fainelli80105be2014-04-24 18:08:57 -07001405
1406 kfree(ring->cbs);
1407 ring->cbs = NULL;
1408
1409 if (ring->desc_dma) {
Florian Fainelli3e8fc382014-10-31 15:51:34 -07001410 dma_free_coherent(kdev, sizeof(struct dma_desc),
1411 ring->desc_cpu, ring->desc_dma);
Florian Fainelli80105be2014-04-24 18:08:57 -07001412 ring->desc_dma = 0;
1413 }
1414 ring->size = 0;
1415 ring->alloc_size = 0;
1416
1417 netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
1418}
1419
1420/* RDMA helper */
1421static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001422 unsigned int enable)
Florian Fainelli80105be2014-04-24 18:08:57 -07001423{
1424 unsigned int timeout = 1000;
1425 u32 reg;
1426
1427 reg = rdma_readl(priv, RDMA_CONTROL);
1428 if (enable)
1429 reg |= RDMA_EN;
1430 else
1431 reg &= ~RDMA_EN;
1432 rdma_writel(priv, reg, RDMA_CONTROL);
1433
1434 /* Poll for RMDA disabling completion */
1435 do {
1436 reg = rdma_readl(priv, RDMA_STATUS);
1437 if (!!(reg & RDMA_DISABLED) == !enable)
1438 return 0;
1439 usleep_range(1000, 2000);
1440 } while (timeout-- > 0);
1441
1442 netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
1443
1444 return -ETIMEDOUT;
1445}
1446
1447/* TDMA helper */
1448static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001449 unsigned int enable)
Florian Fainelli80105be2014-04-24 18:08:57 -07001450{
1451 unsigned int timeout = 1000;
1452 u32 reg;
1453
1454 reg = tdma_readl(priv, TDMA_CONTROL);
1455 if (enable)
Florian Fainelli44a45242017-01-20 11:08:27 -08001456 reg |= tdma_control_bit(priv, TDMA_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -07001457 else
Florian Fainelli44a45242017-01-20 11:08:27 -08001458 reg &= ~tdma_control_bit(priv, TDMA_EN);
Florian Fainelli80105be2014-04-24 18:08:57 -07001459 tdma_writel(priv, reg, TDMA_CONTROL);
1460
1461 /* Poll for TMDA disabling completion */
1462 do {
1463 reg = tdma_readl(priv, TDMA_STATUS);
1464 if (!!(reg & TDMA_DISABLED) == !enable)
1465 return 0;
1466
1467 usleep_range(1000, 2000);
1468 } while (timeout-- > 0);
1469
1470 netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
1471
1472 return -ETIMEDOUT;
1473}
1474
1475static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
1476{
Florian Fainellibaf387a2015-05-28 15:24:42 -07001477 struct bcm_sysport_cb *cb;
Florian Fainelli80105be2014-04-24 18:08:57 -07001478 u32 reg;
1479 int ret;
Florian Fainellibaf387a2015-05-28 15:24:42 -07001480 int i;
Florian Fainelli80105be2014-04-24 18:08:57 -07001481
1482 /* Initialize SW view of the RX ring */
Florian Fainelli44a45242017-01-20 11:08:27 -08001483 priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
Florian Fainelli80105be2014-04-24 18:08:57 -07001484 priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
Florian Fainelli80105be2014-04-24 18:08:57 -07001485 priv->rx_c_index = 0;
1486 priv->rx_read_ptr = 0;
Florian Fainelli40a8a312014-07-09 17:36:47 -07001487 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
1488 GFP_KERNEL);
Florian Fainelli80105be2014-04-24 18:08:57 -07001489 if (!priv->rx_cbs) {
1490 netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
1491 return -ENOMEM;
1492 }
1493
Florian Fainellibaf387a2015-05-28 15:24:42 -07001494 for (i = 0; i < priv->num_rx_bds; i++) {
1495 cb = priv->rx_cbs + i;
1496 cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
1497 }
1498
Florian Fainelli80105be2014-04-24 18:08:57 -07001499 ret = bcm_sysport_alloc_rx_bufs(priv);
1500 if (ret) {
1501 netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
1502 return ret;
1503 }
1504
1505 /* Initialize HW, ensure RDMA is disabled */
1506 reg = rdma_readl(priv, RDMA_STATUS);
1507 if (!(reg & RDMA_DISABLED))
1508 rdma_enable_set(priv, 0);
1509
1510 rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
1511 rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
1512 rdma_writel(priv, 0, RDMA_PROD_INDEX);
1513 rdma_writel(priv, 0, RDMA_CONS_INDEX);
1514 rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
1515 RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
1516 /* Operate the queue in ring mode */
1517 rdma_writel(priv, 0, RDMA_START_ADDR_HI);
1518 rdma_writel(priv, 0, RDMA_START_ADDR_LO);
1519 rdma_writel(priv, 0, RDMA_END_ADDR_HI);
Florian Fainelli44a45242017-01-20 11:08:27 -08001520 rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
Florian Fainelli80105be2014-04-24 18:08:57 -07001521
1522 rdma_writel(priv, 1, RDMA_MBDONE_INTR);
1523
1524 netif_dbg(priv, hw, priv->netdev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001525 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
1526 priv->num_rx_bds, priv->rx_bds);
Florian Fainelli80105be2014-04-24 18:08:57 -07001527
1528 return 0;
1529}
1530
1531static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
1532{
1533 struct bcm_sysport_cb *cb;
1534 unsigned int i;
1535 u32 reg;
1536
1537 /* Caller should ensure RDMA is disabled */
1538 reg = rdma_readl(priv, RDMA_STATUS);
1539 if (!(reg & RDMA_DISABLED))
1540 netdev_warn(priv->netdev, "RDMA not stopped!\n");
1541
1542 for (i = 0; i < priv->num_rx_bds; i++) {
1543 cb = &priv->rx_cbs[i];
1544 if (dma_unmap_addr(cb, dma_addr))
1545 dma_unmap_single(&priv->pdev->dev,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001546 dma_unmap_addr(cb, dma_addr),
1547 RX_BUF_LENGTH, DMA_FROM_DEVICE);
Florian Fainelli80105be2014-04-24 18:08:57 -07001548 bcm_sysport_free_cb(cb);
1549 }
1550
1551 kfree(priv->rx_cbs);
1552 priv->rx_cbs = NULL;
1553
1554 netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
1555}
1556
1557static void bcm_sysport_set_rx_mode(struct net_device *dev)
1558{
1559 struct bcm_sysport_priv *priv = netdev_priv(dev);
1560 u32 reg;
1561
Florian Fainelli44a45242017-01-20 11:08:27 -08001562 if (priv->is_lite)
1563 return;
1564
Florian Fainelli80105be2014-04-24 18:08:57 -07001565 reg = umac_readl(priv, UMAC_CMD);
1566 if (dev->flags & IFF_PROMISC)
1567 reg |= CMD_PROMISC;
1568 else
1569 reg &= ~CMD_PROMISC;
1570 umac_writel(priv, reg, UMAC_CMD);
1571
1572 /* No support for ALLMULTI */
1573 if (dev->flags & IFF_ALLMULTI)
1574 return;
1575}
1576
1577static inline void umac_enable_set(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001578 u32 mask, unsigned int enable)
Florian Fainelli80105be2014-04-24 18:08:57 -07001579{
1580 u32 reg;
1581
Florian Fainelli44a45242017-01-20 11:08:27 -08001582 if (!priv->is_lite) {
1583 reg = umac_readl(priv, UMAC_CMD);
1584 if (enable)
1585 reg |= mask;
1586 else
1587 reg &= ~mask;
1588 umac_writel(priv, reg, UMAC_CMD);
1589 } else {
1590 reg = gib_readl(priv, GIB_CONTROL);
1591 if (enable)
1592 reg |= mask;
1593 else
1594 reg &= ~mask;
1595 gib_writel(priv, reg, GIB_CONTROL);
1596 }
Florian Fainelli00b91c62014-05-15 14:33:53 -07001597
1598 /* UniMAC stops on a packet boundary, wait for a full-sized packet
1599 * to be processed (1 msec).
1600 */
1601 if (enable == 0)
1602 usleep_range(1000, 2000);
Florian Fainelli80105be2014-04-24 18:08:57 -07001603}
1604
Florian Fainelli412bce82014-06-26 10:06:45 -07001605static inline void umac_reset(struct bcm_sysport_priv *priv)
Florian Fainelli80105be2014-04-24 18:08:57 -07001606{
Florian Fainelli80105be2014-04-24 18:08:57 -07001607 u32 reg;
Florian Fainelli80105be2014-04-24 18:08:57 -07001608
Florian Fainelli44a45242017-01-20 11:08:27 -08001609 if (priv->is_lite)
1610 return;
1611
Florian Fainelli412bce82014-06-26 10:06:45 -07001612 reg = umac_readl(priv, UMAC_CMD);
1613 reg |= CMD_SW_RESET;
1614 umac_writel(priv, reg, UMAC_CMD);
1615 udelay(10);
1616 reg = umac_readl(priv, UMAC_CMD);
1617 reg &= ~CMD_SW_RESET;
1618 umac_writel(priv, reg, UMAC_CMD);
Florian Fainelli80105be2014-04-24 18:08:57 -07001619}
1620
1621static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001622 unsigned char *addr)
Florian Fainelli80105be2014-04-24 18:08:57 -07001623{
Florian Fainelli44a45242017-01-20 11:08:27 -08001624 u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
1625 addr[3];
1626 u32 mac1 = (addr[4] << 8) | addr[5];
1627
1628 if (!priv->is_lite) {
1629 umac_writel(priv, mac0, UMAC_MAC0);
1630 umac_writel(priv, mac1, UMAC_MAC1);
1631 } else {
1632 gib_writel(priv, mac0, GIB_MAC0);
1633 gib_writel(priv, mac1, GIB_MAC1);
1634 }
Florian Fainelli80105be2014-04-24 18:08:57 -07001635}
1636
1637static void topctrl_flush(struct bcm_sysport_priv *priv)
1638{
1639 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
1640 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
1641 mdelay(1);
1642 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1643 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
1644}
1645
Florian Fainellifb3b5962014-12-08 15:59:18 -08001646static int bcm_sysport_change_mac(struct net_device *dev, void *p)
1647{
1648 struct bcm_sysport_priv *priv = netdev_priv(dev);
1649 struct sockaddr *addr = p;
1650
1651 if (!is_valid_ether_addr(addr->sa_data))
1652 return -EINVAL;
1653
1654 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1655
1656 /* interface is disabled, changes to MAC will be reflected on next
1657 * open call
1658 */
1659 if (!netif_running(dev))
1660 return 0;
1661
1662 umac_set_hw_addr(priv, dev->dev_addr);
1663
1664 return 0;
1665}
1666
Florian Fainelli30defeb2017-03-23 10:36:46 -07001667static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
1668{
1669 struct bcm_sysport_priv *priv = netdev_priv(dev);
1670 unsigned long tx_bytes = 0, tx_packets = 0;
1671 struct bcm_sysport_tx_ring *ring;
1672 unsigned int q;
1673
1674 for (q = 0; q < dev->num_tx_queues; q++) {
1675 ring = &priv->tx_rings[q];
1676 tx_bytes += ring->bytes;
1677 tx_packets += ring->packets;
1678 }
1679
1680 dev->stats.tx_bytes = tx_bytes;
1681 dev->stats.tx_packets = tx_packets;
1682 return &dev->stats;
1683}
1684
Florian Fainellib02e6d92014-07-01 21:08:37 -07001685static void bcm_sysport_netif_start(struct net_device *dev)
1686{
1687 struct bcm_sysport_priv *priv = netdev_priv(dev);
1688
1689 /* Enable NAPI */
1690 napi_enable(&priv->napi);
1691
Florian Fainelli8edf0042014-10-28 11:12:00 -07001692 /* Enable RX interrupt and TX ring full interrupt */
1693 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1694
Philippe Reynes715a0222016-06-19 20:39:08 +02001695 phy_start(dev->phydev);
Florian Fainellib02e6d92014-07-01 21:08:37 -07001696
Florian Fainelli44a45242017-01-20 11:08:27 -08001697 /* Enable TX interrupts for the TXQs */
1698 if (!priv->is_lite)
1699 intrl2_1_mask_clear(priv, 0xffffffff);
1700 else
1701 intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
Florian Fainellib02e6d92014-07-01 21:08:37 -07001702
1703 /* Last call before we start the real business */
1704 netif_tx_start_all_queues(dev);
1705}
1706
Florian Fainelli40755a02014-07-01 21:08:38 -07001707static void rbuf_init(struct bcm_sysport_priv *priv)
1708{
1709 u32 reg;
1710
1711 reg = rbuf_readl(priv, RBUF_CONTROL);
1712 reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
Florian Fainelli44a45242017-01-20 11:08:27 -08001713 /* Set a correct RSB format on SYSTEMPORT Lite */
1714 if (priv->is_lite) {
1715 reg &= ~RBUF_RSB_SWAP1;
1716 reg |= RBUF_RSB_SWAP0;
1717 }
Florian Fainelli40755a02014-07-01 21:08:38 -07001718 rbuf_writel(priv, reg, RBUF_CONTROL);
1719}
1720
Florian Fainelli44a45242017-01-20 11:08:27 -08001721static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
1722{
1723 intrl2_0_mask_set(priv, 0xffffffff);
1724 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1725 if (!priv->is_lite) {
1726 intrl2_1_mask_set(priv, 0xffffffff);
1727 intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
1728 }
1729}
1730
1731static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
1732{
1733 u32 __maybe_unused reg;
1734
1735 /* Include Broadcom tag in pad extension */
1736 if (netdev_uses_dsa(priv->netdev)) {
1737 reg = gib_readl(priv, GIB_CONTROL);
1738 reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
1739 reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
1740 gib_writel(priv, reg, GIB_CONTROL);
1741 }
1742}
1743
Florian Fainelli80105be2014-04-24 18:08:57 -07001744static int bcm_sysport_open(struct net_device *dev)
1745{
1746 struct bcm_sysport_priv *priv = netdev_priv(dev);
Philippe Reynes715a0222016-06-19 20:39:08 +02001747 struct phy_device *phydev;
Florian Fainelli80105be2014-04-24 18:08:57 -07001748 unsigned int i;
Florian Fainelli80105be2014-04-24 18:08:57 -07001749 int ret;
1750
1751 /* Reset UniMAC */
Florian Fainelli412bce82014-06-26 10:06:45 -07001752 umac_reset(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001753
1754 /* Flush TX and RX FIFOs at TOPCTRL level */
1755 topctrl_flush(priv);
1756
1757 /* Disable the UniMAC RX/TX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07001758 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
Florian Fainelli80105be2014-04-24 18:08:57 -07001759
1760 /* Enable RBUF 2bytes alignment and Receive Status Block */
Florian Fainelli40755a02014-07-01 21:08:38 -07001761 rbuf_init(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001762
1763 /* Set maximum frame length */
Florian Fainelli44a45242017-01-20 11:08:27 -08001764 if (!priv->is_lite)
1765 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1766 else
1767 gib_set_pad_extension(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001768
1769 /* Set MAC address */
1770 umac_set_hw_addr(priv, dev->dev_addr);
1771
1772 /* Read CRC forward */
Florian Fainelli44a45242017-01-20 11:08:27 -08001773 if (!priv->is_lite)
1774 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1775 else
1776 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
1777 GIB_FCS_STRIP);
Florian Fainelli80105be2014-04-24 18:08:57 -07001778
Philippe Reynes715a0222016-06-19 20:39:08 +02001779 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1780 0, priv->phy_interface);
1781 if (!phydev) {
Florian Fainelli80105be2014-04-24 18:08:57 -07001782 netdev_err(dev, "could not attach to PHY\n");
1783 return -ENODEV;
1784 }
1785
1786 /* Reset house keeping link status */
1787 priv->old_duplex = -1;
1788 priv->old_link = -1;
1789 priv->old_pause = -1;
1790
1791 /* mask all interrupts and request them */
Florian Fainelli44a45242017-01-20 11:08:27 -08001792 bcm_sysport_mask_all_intrs(priv);
Florian Fainelli80105be2014-04-24 18:08:57 -07001793
1794 ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
1795 if (ret) {
1796 netdev_err(dev, "failed to request RX interrupt\n");
1797 goto out_phy_disconnect;
1798 }
1799
Florian Fainelli44a45242017-01-20 11:08:27 -08001800 if (!priv->is_lite) {
1801 ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
1802 dev->name, dev);
1803 if (ret) {
1804 netdev_err(dev, "failed to request TX interrupt\n");
1805 goto out_free_irq0;
1806 }
Florian Fainelli80105be2014-04-24 18:08:57 -07001807 }
1808
1809 /* Initialize both hardware and software ring */
1810 for (i = 0; i < dev->num_tx_queues; i++) {
1811 ret = bcm_sysport_init_tx_ring(priv, i);
1812 if (ret) {
1813 netdev_err(dev, "failed to initialize TX ring %d\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07001814 i);
Florian Fainelli80105be2014-04-24 18:08:57 -07001815 goto out_free_tx_ring;
1816 }
1817 }
1818
1819 /* Initialize linked-list */
1820 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
1821
1822 /* Initialize RX ring */
1823 ret = bcm_sysport_init_rx_ring(priv);
1824 if (ret) {
1825 netdev_err(dev, "failed to initialize RX ring\n");
1826 goto out_free_rx_ring;
1827 }
1828
1829 /* Turn on RDMA */
1830 ret = rdma_enable_set(priv, 1);
1831 if (ret)
1832 goto out_free_rx_ring;
1833
Florian Fainelli80105be2014-04-24 18:08:57 -07001834 /* Turn on TDMA */
1835 ret = tdma_enable_set(priv, 1);
1836 if (ret)
1837 goto out_clear_rx_int;
1838
Florian Fainelli80105be2014-04-24 18:08:57 -07001839 /* Turn on UniMAC TX/RX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07001840 umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
Florian Fainelli80105be2014-04-24 18:08:57 -07001841
Florian Fainellib02e6d92014-07-01 21:08:37 -07001842 bcm_sysport_netif_start(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001843
1844 return 0;
1845
1846out_clear_rx_int:
1847 intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1848out_free_rx_ring:
1849 bcm_sysport_fini_rx_ring(priv);
1850out_free_tx_ring:
1851 for (i = 0; i < dev->num_tx_queues; i++)
1852 bcm_sysport_fini_tx_ring(priv, i);
Florian Fainelli44a45242017-01-20 11:08:27 -08001853 if (!priv->is_lite)
1854 free_irq(priv->irq1, dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001855out_free_irq0:
1856 free_irq(priv->irq0, dev);
1857out_phy_disconnect:
Philippe Reynes715a0222016-06-19 20:39:08 +02001858 phy_disconnect(phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001859 return ret;
1860}
1861
Florian Fainellib02e6d92014-07-01 21:08:37 -07001862static void bcm_sysport_netif_stop(struct net_device *dev)
Florian Fainelli80105be2014-04-24 18:08:57 -07001863{
1864 struct bcm_sysport_priv *priv = netdev_priv(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001865
1866 /* stop all software from updating hardware */
1867 netif_tx_stop_all_queues(dev);
1868 napi_disable(&priv->napi);
Philippe Reynes715a0222016-06-19 20:39:08 +02001869 phy_stop(dev->phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001870
1871 /* mask all interrupts */
Florian Fainelli44a45242017-01-20 11:08:27 -08001872 bcm_sysport_mask_all_intrs(priv);
Florian Fainellib02e6d92014-07-01 21:08:37 -07001873}
1874
1875static int bcm_sysport_stop(struct net_device *dev)
1876{
1877 struct bcm_sysport_priv *priv = netdev_priv(dev);
1878 unsigned int i;
1879 int ret;
1880
1881 bcm_sysport_netif_stop(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001882
1883 /* Disable UniMAC RX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07001884 umac_enable_set(priv, CMD_RX_EN, 0);
Florian Fainelli80105be2014-04-24 18:08:57 -07001885
1886 ret = tdma_enable_set(priv, 0);
1887 if (ret) {
1888 netdev_err(dev, "timeout disabling RDMA\n");
1889 return ret;
1890 }
1891
1892 /* Wait for a maximum packet size to be drained */
1893 usleep_range(2000, 3000);
1894
1895 ret = rdma_enable_set(priv, 0);
1896 if (ret) {
1897 netdev_err(dev, "timeout disabling TDMA\n");
1898 return ret;
1899 }
1900
1901 /* Disable UniMAC TX */
Florian Fainelli18e21b02014-07-01 21:08:36 -07001902 umac_enable_set(priv, CMD_TX_EN, 0);
Florian Fainelli80105be2014-04-24 18:08:57 -07001903
1904 /* Free RX/TX rings SW structures */
1905 for (i = 0; i < dev->num_tx_queues; i++)
1906 bcm_sysport_fini_tx_ring(priv, i);
1907 bcm_sysport_fini_rx_ring(priv);
1908
1909 free_irq(priv->irq0, dev);
Florian Fainelli44a45242017-01-20 11:08:27 -08001910 if (!priv->is_lite)
1911 free_irq(priv->irq1, dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001912
1913 /* Disconnect from PHY */
Philippe Reynes715a0222016-06-19 20:39:08 +02001914 phy_disconnect(dev->phydev);
Florian Fainelli80105be2014-04-24 18:08:57 -07001915
1916 return 0;
1917}
1918
Julia Lawallc1ab0e92016-08-31 09:30:48 +02001919static const struct ethtool_ops bcm_sysport_ethtool_ops = {
Florian Fainelli80105be2014-04-24 18:08:57 -07001920 .get_drvinfo = bcm_sysport_get_drvinfo,
1921 .get_msglevel = bcm_sysport_get_msglvl,
1922 .set_msglevel = bcm_sysport_set_msglvl,
1923 .get_link = ethtool_op_get_link,
1924 .get_strings = bcm_sysport_get_strings,
1925 .get_ethtool_stats = bcm_sysport_get_stats,
1926 .get_sset_count = bcm_sysport_get_sset_count,
Florian Fainelli83e82f42014-07-01 21:08:40 -07001927 .get_wol = bcm_sysport_get_wol,
1928 .set_wol = bcm_sysport_set_wol,
Florian Fainellib1a15e82015-05-11 15:12:41 -07001929 .get_coalesce = bcm_sysport_get_coalesce,
1930 .set_coalesce = bcm_sysport_set_coalesce,
Philippe Reynes697666e2016-06-19 20:39:09 +02001931 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1932 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Florian Fainelli80105be2014-04-24 18:08:57 -07001933};
1934
1935static const struct net_device_ops bcm_sysport_netdev_ops = {
1936 .ndo_start_xmit = bcm_sysport_xmit,
1937 .ndo_tx_timeout = bcm_sysport_tx_timeout,
1938 .ndo_open = bcm_sysport_open,
1939 .ndo_stop = bcm_sysport_stop,
1940 .ndo_set_features = bcm_sysport_set_features,
1941 .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
Florian Fainellifb3b5962014-12-08 15:59:18 -08001942 .ndo_set_mac_address = bcm_sysport_change_mac,
Florian Fainelli6cec4f52015-07-31 11:42:55 -07001943#ifdef CONFIG_NET_POLL_CONTROLLER
1944 .ndo_poll_controller = bcm_sysport_poll_controller,
1945#endif
Florian Fainelli30defeb2017-03-23 10:36:46 -07001946 .ndo_get_stats = bcm_sysport_get_nstats,
Florian Fainelli80105be2014-04-24 18:08:57 -07001947};
1948
1949#define REV_FMT "v%2x.%02x"
1950
Florian Fainelli44a45242017-01-20 11:08:27 -08001951static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
1952 [SYSTEMPORT] = {
1953 .is_lite = false,
1954 .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
1955 },
1956 [SYSTEMPORT_LITE] = {
1957 .is_lite = true,
1958 .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
1959 },
1960};
1961
1962static const struct of_device_id bcm_sysport_of_match[] = {
1963 { .compatible = "brcm,systemportlite-v1.00",
1964 .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
1965 { .compatible = "brcm,systemport-v1.00",
1966 .data = &bcm_sysport_params[SYSTEMPORT] },
1967 { .compatible = "brcm,systemport",
1968 .data = &bcm_sysport_params[SYSTEMPORT] },
1969 { /* sentinel */ }
1970};
1971MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
1972
Florian Fainelli80105be2014-04-24 18:08:57 -07001973static int bcm_sysport_probe(struct platform_device *pdev)
1974{
Florian Fainelli44a45242017-01-20 11:08:27 -08001975 const struct bcm_sysport_hw_params *params;
1976 const struct of_device_id *of_id = NULL;
Florian Fainelli80105be2014-04-24 18:08:57 -07001977 struct bcm_sysport_priv *priv;
1978 struct device_node *dn;
1979 struct net_device *dev;
1980 const void *macaddr;
1981 struct resource *r;
1982 u32 txq, rxq;
1983 int ret;
1984
1985 dn = pdev->dev.of_node;
1986 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Florian Fainelli44a45242017-01-20 11:08:27 -08001987 of_id = of_match_node(bcm_sysport_of_match, dn);
1988 if (!of_id || !of_id->data)
1989 return -EINVAL;
1990
1991 /* Fairly quickly we need to know the type of adapter we have */
1992 params = of_id->data;
Florian Fainelli80105be2014-04-24 18:08:57 -07001993
1994 /* Read the Transmit/Receive Queue properties */
1995 if (of_property_read_u32(dn, "systemport,num-txq", &txq))
1996 txq = TDMA_NUM_RINGS;
1997 if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
1998 rxq = 1;
1999
Florian Fainelli7b78be42017-01-20 11:08:26 -08002000 /* Sanity check the number of transmit queues */
2001 if (!txq || txq > TDMA_NUM_RINGS)
2002 return -EINVAL;
2003
Florian Fainelli80105be2014-04-24 18:08:57 -07002004 dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
2005 if (!dev)
2006 return -ENOMEM;
2007
2008 /* Initialize private members */
2009 priv = netdev_priv(dev);
2010
Florian Fainelli7b78be42017-01-20 11:08:26 -08002011 /* Allocate number of TX rings */
2012 priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
2013 sizeof(struct bcm_sysport_tx_ring),
2014 GFP_KERNEL);
2015 if (!priv->tx_rings)
2016 return -ENOMEM;
2017
Florian Fainelli44a45242017-01-20 11:08:27 -08002018 priv->is_lite = params->is_lite;
2019 priv->num_rx_desc_words = params->num_rx_desc_words;
2020
Florian Fainelli80105be2014-04-24 18:08:57 -07002021 priv->irq0 = platform_get_irq(pdev, 0);
Florian Fainelli44a45242017-01-20 11:08:27 -08002022 if (!priv->is_lite)
2023 priv->irq1 = platform_get_irq(pdev, 1);
Florian Fainelli83e82f42014-07-01 21:08:40 -07002024 priv->wol_irq = platform_get_irq(pdev, 2);
Florian Fainelli44a45242017-01-20 11:08:27 -08002025 if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
Florian Fainelli80105be2014-04-24 18:08:57 -07002026 dev_err(&pdev->dev, "invalid interrupts\n");
2027 ret = -EINVAL;
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002028 goto err_free_netdev;
Florian Fainelli80105be2014-04-24 18:08:57 -07002029 }
2030
Jingoo Han126e6122014-05-14 12:15:42 +09002031 priv->base = devm_ioremap_resource(&pdev->dev, r);
2032 if (IS_ERR(priv->base)) {
2033 ret = PTR_ERR(priv->base);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002034 goto err_free_netdev;
Florian Fainelli80105be2014-04-24 18:08:57 -07002035 }
2036
2037 priv->netdev = dev;
2038 priv->pdev = pdev;
2039
2040 priv->phy_interface = of_get_phy_mode(dn);
2041 /* Default to GMII interface mode */
2042 if (priv->phy_interface < 0)
2043 priv->phy_interface = PHY_INTERFACE_MODE_GMII;
2044
Florian Fainelli186534a2014-05-22 09:47:46 -07002045 /* In the case of a fixed PHY, the DT node associated
2046 * to the PHY is the Ethernet MAC DT node.
2047 */
2048 if (of_phy_is_fixed_link(dn)) {
2049 ret = of_phy_register_fixed_link(dn);
2050 if (ret) {
2051 dev_err(&pdev->dev, "failed to register fixed PHY\n");
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002052 goto err_free_netdev;
Florian Fainelli186534a2014-05-22 09:47:46 -07002053 }
2054
2055 priv->phy_dn = dn;
2056 }
2057
Florian Fainelli80105be2014-04-24 18:08:57 -07002058 /* Initialize netdevice members */
2059 macaddr = of_get_mac_address(dn);
2060 if (!macaddr || !is_valid_ether_addr(macaddr)) {
2061 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
Vaishali Thakkaradb35052015-07-08 10:49:30 +05302062 eth_hw_addr_random(dev);
Florian Fainelli80105be2014-04-24 18:08:57 -07002063 } else {
2064 ether_addr_copy(dev->dev_addr, macaddr);
2065 }
2066
2067 SET_NETDEV_DEV(dev, &pdev->dev);
2068 dev_set_drvdata(&pdev->dev, dev);
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002069 dev->ethtool_ops = &bcm_sysport_ethtool_ops;
Florian Fainelli80105be2014-04-24 18:08:57 -07002070 dev->netdev_ops = &bcm_sysport_netdev_ops;
2071 netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
2072
2073 /* HW supported features, none enabled by default */
2074 dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
2075 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2076
Florian Fainelli83e82f42014-07-01 21:08:40 -07002077 /* Request the WOL interrupt and advertise suspend if available */
2078 priv->wol_irq_disabled = 1;
2079 ret = devm_request_irq(&pdev->dev, priv->wol_irq,
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002080 bcm_sysport_wol_isr, 0, dev->name, priv);
Florian Fainelli83e82f42014-07-01 21:08:40 -07002081 if (!ret)
2082 device_set_wakeup_capable(&pdev->dev, 1);
2083
Florian Fainelli80105be2014-04-24 18:08:57 -07002084 /* Set the needed headroom once and for all */
Paul Gortmaker3afc5572014-05-30 15:39:30 -04002085 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
2086 dev->needed_headroom += sizeof(struct bcm_tsb);
Florian Fainelli80105be2014-04-24 18:08:57 -07002087
Florian Fainellif532e742014-06-05 10:22:18 -07002088 /* libphy will adjust the link state accordingly */
2089 netif_carrier_off(dev);
2090
Florian Fainelli80105be2014-04-24 18:08:57 -07002091 ret = register_netdev(dev);
2092 if (ret) {
2093 dev_err(&pdev->dev, "failed to register net_device\n");
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002094 goto err_deregister_fixed_link;
Florian Fainelli80105be2014-04-24 18:08:57 -07002095 }
2096
2097 priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
2098 dev_info(&pdev->dev,
Florian Fainelli44a45242017-01-20 11:08:27 -08002099 "Broadcom SYSTEMPORT%s" REV_FMT
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002100 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
Florian Fainelli44a45242017-01-20 11:08:27 -08002101 priv->is_lite ? " Lite" : "",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002102 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
2103 priv->base, priv->irq0, priv->irq1, txq, rxq);
Florian Fainelli80105be2014-04-24 18:08:57 -07002104
2105 return 0;
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002106
2107err_deregister_fixed_link:
2108 if (of_phy_is_fixed_link(dn))
2109 of_phy_deregister_fixed_link(dn);
2110err_free_netdev:
Florian Fainelli80105be2014-04-24 18:08:57 -07002111 free_netdev(dev);
2112 return ret;
2113}
2114
2115static int bcm_sysport_remove(struct platform_device *pdev)
2116{
2117 struct net_device *dev = dev_get_drvdata(&pdev->dev);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002118 struct device_node *dn = pdev->dev.of_node;
Florian Fainelli80105be2014-04-24 18:08:57 -07002119
2120 /* Not much to do, ndo_close has been called
2121 * and we use managed allocations
2122 */
2123 unregister_netdev(dev);
Johan Hovold39f8b0d2016-11-28 19:24:58 +01002124 if (of_phy_is_fixed_link(dn))
2125 of_phy_deregister_fixed_link(dn);
Florian Fainelli80105be2014-04-24 18:08:57 -07002126 free_netdev(dev);
2127 dev_set_drvdata(&pdev->dev, NULL);
2128
2129 return 0;
2130}
2131
Florian Fainelli40755a02014-07-01 21:08:38 -07002132#ifdef CONFIG_PM_SLEEP
Florian Fainelli83e82f42014-07-01 21:08:40 -07002133static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
2134{
2135 struct net_device *ndev = priv->netdev;
2136 unsigned int timeout = 1000;
2137 u32 reg;
2138
2139 /* Password has already been programmed */
2140 reg = umac_readl(priv, UMAC_MPD_CTRL);
2141 reg |= MPD_EN;
2142 reg &= ~PSW_EN;
2143 if (priv->wolopts & WAKE_MAGICSECURE)
2144 reg |= PSW_EN;
2145 umac_writel(priv, reg, UMAC_MPD_CTRL);
2146
2147 /* Make sure RBUF entered WoL mode as result */
2148 do {
2149 reg = rbuf_readl(priv, RBUF_STATUS);
2150 if (reg & RBUF_WOL_MODE)
2151 break;
2152
2153 udelay(10);
2154 } while (timeout-- > 0);
2155
2156 /* Do not leave the UniMAC RBUF matching only MPD packets */
2157 if (!timeout) {
2158 reg = umac_readl(priv, UMAC_MPD_CTRL);
2159 reg &= ~MPD_EN;
2160 umac_writel(priv, reg, UMAC_MPD_CTRL);
2161 netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
2162 return -ETIMEDOUT;
2163 }
2164
2165 /* UniMAC receive needs to be turned on */
2166 umac_enable_set(priv, CMD_RX_EN, 1);
2167
2168 /* Enable the interrupt wake-up source */
2169 intrl2_0_mask_clear(priv, INTRL2_0_MPD);
2170
2171 netif_dbg(priv, wol, ndev, "entered WOL mode\n");
2172
2173 return 0;
2174}
2175
Florian Fainelli40755a02014-07-01 21:08:38 -07002176static int bcm_sysport_suspend(struct device *d)
2177{
2178 struct net_device *dev = dev_get_drvdata(d);
2179 struct bcm_sysport_priv *priv = netdev_priv(dev);
2180 unsigned int i;
Florian Fainelli83e82f42014-07-01 21:08:40 -07002181 int ret = 0;
Florian Fainelli40755a02014-07-01 21:08:38 -07002182 u32 reg;
2183
2184 if (!netif_running(dev))
2185 return 0;
2186
2187 bcm_sysport_netif_stop(dev);
2188
Philippe Reynes715a0222016-06-19 20:39:08 +02002189 phy_suspend(dev->phydev);
Florian Fainelli40755a02014-07-01 21:08:38 -07002190
2191 netif_device_detach(dev);
2192
2193 /* Disable UniMAC RX */
2194 umac_enable_set(priv, CMD_RX_EN, 0);
2195
2196 ret = rdma_enable_set(priv, 0);
2197 if (ret) {
2198 netdev_err(dev, "RDMA timeout!\n");
2199 return ret;
2200 }
2201
2202 /* Disable RXCHK if enabled */
Florian Fainelli9d34c1c2014-07-01 21:08:39 -07002203 if (priv->rx_chk_en) {
Florian Fainelli40755a02014-07-01 21:08:38 -07002204 reg = rxchk_readl(priv, RXCHK_CONTROL);
2205 reg &= ~RXCHK_EN;
2206 rxchk_writel(priv, reg, RXCHK_CONTROL);
2207 }
2208
2209 /* Flush RX pipe */
Florian Fainelli83e82f42014-07-01 21:08:40 -07002210 if (!priv->wolopts)
2211 topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
Florian Fainelli40755a02014-07-01 21:08:38 -07002212
2213 ret = tdma_enable_set(priv, 0);
2214 if (ret) {
2215 netdev_err(dev, "TDMA timeout!\n");
2216 return ret;
2217 }
2218
2219 /* Wait for a packet boundary */
2220 usleep_range(2000, 3000);
2221
2222 umac_enable_set(priv, CMD_TX_EN, 0);
2223
2224 topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
2225
2226 /* Free RX/TX rings SW structures */
2227 for (i = 0; i < dev->num_tx_queues; i++)
2228 bcm_sysport_fini_tx_ring(priv, i);
2229 bcm_sysport_fini_rx_ring(priv);
2230
Florian Fainelli83e82f42014-07-01 21:08:40 -07002231 /* Get prepared for Wake-on-LAN */
2232 if (device_may_wakeup(d) && priv->wolopts)
2233 ret = bcm_sysport_suspend_to_wol(priv);
2234
2235 return ret;
Florian Fainelli40755a02014-07-01 21:08:38 -07002236}
2237
2238static int bcm_sysport_resume(struct device *d)
2239{
2240 struct net_device *dev = dev_get_drvdata(d);
2241 struct bcm_sysport_priv *priv = netdev_priv(dev);
2242 unsigned int i;
2243 u32 reg;
2244 int ret;
2245
2246 if (!netif_running(dev))
2247 return 0;
2248
Florian Fainelli704d33e2014-10-28 11:12:01 -07002249 umac_reset(priv);
2250
Florian Fainelli83e82f42014-07-01 21:08:40 -07002251 /* We may have been suspended and never received a WOL event that
2252 * would turn off MPD detection, take care of that now
2253 */
2254 bcm_sysport_resume_from_wol(priv);
2255
Florian Fainelli40755a02014-07-01 21:08:38 -07002256 /* Initialize both hardware and software ring */
2257 for (i = 0; i < dev->num_tx_queues; i++) {
2258 ret = bcm_sysport_init_tx_ring(priv, i);
2259 if (ret) {
2260 netdev_err(dev, "failed to initialize TX ring %d\n",
Florian Fainelli23acb2f2014-07-09 17:36:46 -07002261 i);
Florian Fainelli40755a02014-07-01 21:08:38 -07002262 goto out_free_tx_rings;
2263 }
2264 }
2265
2266 /* Initialize linked-list */
2267 tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
2268
2269 /* Initialize RX ring */
2270 ret = bcm_sysport_init_rx_ring(priv);
2271 if (ret) {
2272 netdev_err(dev, "failed to initialize RX ring\n");
2273 goto out_free_rx_ring;
2274 }
2275
2276 netif_device_attach(dev);
2277
Florian Fainelli40755a02014-07-01 21:08:38 -07002278 /* RX pipe enable */
2279 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
2280
2281 ret = rdma_enable_set(priv, 1);
2282 if (ret) {
2283 netdev_err(dev, "failed to enable RDMA\n");
2284 goto out_free_rx_ring;
2285 }
2286
2287 /* Enable rxhck */
Florian Fainelli9d34c1c2014-07-01 21:08:39 -07002288 if (priv->rx_chk_en) {
Florian Fainelli40755a02014-07-01 21:08:38 -07002289 reg = rxchk_readl(priv, RXCHK_CONTROL);
2290 reg |= RXCHK_EN;
2291 rxchk_writel(priv, reg, RXCHK_CONTROL);
2292 }
2293
2294 rbuf_init(priv);
2295
2296 /* Set maximum frame length */
Florian Fainelli44a45242017-01-20 11:08:27 -08002297 if (!priv->is_lite)
2298 umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2299 else
2300 gib_set_pad_extension(priv);
Florian Fainelli40755a02014-07-01 21:08:38 -07002301
2302 /* Set MAC address */
2303 umac_set_hw_addr(priv, dev->dev_addr);
2304
2305 umac_enable_set(priv, CMD_RX_EN, 1);
2306
2307 /* TX pipe enable */
2308 topctrl_writel(priv, 0, TX_FLUSH_CNTL);
2309
2310 umac_enable_set(priv, CMD_TX_EN, 1);
2311
2312 ret = tdma_enable_set(priv, 1);
2313 if (ret) {
2314 netdev_err(dev, "TDMA timeout!\n");
2315 goto out_free_rx_ring;
2316 }
2317
Philippe Reynes715a0222016-06-19 20:39:08 +02002318 phy_resume(dev->phydev);
Florian Fainelli40755a02014-07-01 21:08:38 -07002319
2320 bcm_sysport_netif_start(dev);
2321
2322 return 0;
2323
2324out_free_rx_ring:
2325 bcm_sysport_fini_rx_ring(priv);
2326out_free_tx_rings:
2327 for (i = 0; i < dev->num_tx_queues; i++)
2328 bcm_sysport_fini_tx_ring(priv, i);
2329 return ret;
2330}
2331#endif
2332
2333static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
2334 bcm_sysport_suspend, bcm_sysport_resume);
2335
Florian Fainelli80105be2014-04-24 18:08:57 -07002336static struct platform_driver bcm_sysport_driver = {
2337 .probe = bcm_sysport_probe,
2338 .remove = bcm_sysport_remove,
2339 .driver = {
2340 .name = "brcm-systemport",
Florian Fainelli80105be2014-04-24 18:08:57 -07002341 .of_match_table = bcm_sysport_of_match,
Florian Fainelli40755a02014-07-01 21:08:38 -07002342 .pm = &bcm_sysport_pm_ops,
Florian Fainelli80105be2014-04-24 18:08:57 -07002343 },
2344};
2345module_platform_driver(bcm_sysport_driver);
2346
2347MODULE_AUTHOR("Broadcom Corporation");
2348MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
2349MODULE_ALIAS("platform:brcm-systemport");
2350MODULE_LICENSE("GPL");