blob: 1918c39bbd40a3df0d01aab52b7805765c9f9824 [file] [log] [blame]
John Crispin656e7052016-03-08 11:29:55 +01001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/of_device.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
20#include <linux/clk.h>
Sean Wang26a2ad82016-09-14 23:13:18 +080021#include <linux/pm_runtime.h>
John Crispin656e7052016-03-08 11:29:55 +010022#include <linux/if_vlan.h>
23#include <linux/reset.h>
24#include <linux/tcp.h>
25
26#include "mtk_eth_soc.h"
27
28static int mtk_msg_level = -1;
29module_param_named(msg_level, mtk_msg_level, int, 0);
30MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
31
32#define MTK_ETHTOOL_STAT(x) { #x, \
33 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
34
35/* strings used by ethtool */
36static const struct mtk_ethtool_stats {
37 char str[ETH_GSTRING_LEN];
38 u32 offset;
39} mtk_ethtool_stats[] = {
40 MTK_ETHTOOL_STAT(tx_bytes),
41 MTK_ETHTOOL_STAT(tx_packets),
42 MTK_ETHTOOL_STAT(tx_skip),
43 MTK_ETHTOOL_STAT(tx_collisions),
44 MTK_ETHTOOL_STAT(rx_bytes),
45 MTK_ETHTOOL_STAT(rx_packets),
46 MTK_ETHTOOL_STAT(rx_overflow),
47 MTK_ETHTOOL_STAT(rx_fcs_errors),
48 MTK_ETHTOOL_STAT(rx_short_errors),
49 MTK_ETHTOOL_STAT(rx_long_errors),
50 MTK_ETHTOOL_STAT(rx_checksum_errors),
51 MTK_ETHTOOL_STAT(rx_flow_control_packets),
52};
53
Sean Wang549e5492016-09-01 10:47:28 +080054static const char * const mtk_clks_source_name[] = {
Sean Wangf430dea2016-09-22 10:33:55 +080055 "ethif", "esw", "gp1", "gp2", "trgpll"
Sean Wang549e5492016-09-01 10:47:28 +080056};
57
John Crispin656e7052016-03-08 11:29:55 +010058void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
59{
60 __raw_writel(val, eth->base + reg);
61}
62
63u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
64{
65 return __raw_readl(eth->base + reg);
66}
67
68static int mtk_mdio_busy_wait(struct mtk_eth *eth)
69{
70 unsigned long t_start = jiffies;
71
72 while (1) {
73 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
74 return 0;
75 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
76 break;
77 usleep_range(10, 20);
78 }
79
80 dev_err(eth->dev, "mdio: MDIO timeout\n");
81 return -1;
82}
83
Wei Yongjun379672d2016-07-12 11:36:44 +000084static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
85 u32 phy_register, u32 write_data)
John Crispin656e7052016-03-08 11:29:55 +010086{
87 if (mtk_mdio_busy_wait(eth))
88 return -1;
89
90 write_data &= 0xffff;
91
92 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
93 (phy_register << PHY_IAC_REG_SHIFT) |
94 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
95 MTK_PHY_IAC);
96
97 if (mtk_mdio_busy_wait(eth))
98 return -1;
99
100 return 0;
101}
102
Wei Yongjun379672d2016-07-12 11:36:44 +0000103static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
John Crispin656e7052016-03-08 11:29:55 +0100104{
105 u32 d;
106
107 if (mtk_mdio_busy_wait(eth))
108 return 0xffff;
109
110 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
111 (phy_reg << PHY_IAC_REG_SHIFT) |
112 (phy_addr << PHY_IAC_ADDR_SHIFT),
113 MTK_PHY_IAC);
114
115 if (mtk_mdio_busy_wait(eth))
116 return 0xffff;
117
118 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
119
120 return d;
121}
122
123static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
124 int phy_reg, u16 val)
125{
126 struct mtk_eth *eth = bus->priv;
127
128 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
129}
130
131static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
132{
133 struct mtk_eth *eth = bus->priv;
134
135 return _mtk_mdio_read(eth, phy_addr, phy_reg);
136}
137
Sean Wangf430dea2016-09-22 10:33:55 +0800138static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
139{
140 u32 val;
141 int ret;
142
143 val = (speed == SPEED_1000) ?
144 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
145 mtk_w32(eth, val, INTF_MODE);
146
147 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
148 ETHSYS_TRGMII_CLK_SEL362_5,
149 ETHSYS_TRGMII_CLK_SEL362_5);
150
151 val = (speed == SPEED_1000) ? 250000000 : 500000000;
152 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
153 if (ret)
154 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
155
156 val = (speed == SPEED_1000) ?
157 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
158 mtk_w32(eth, val, TRGMII_RCK_CTRL);
159
160 val = (speed == SPEED_1000) ?
161 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
162 mtk_w32(eth, val, TRGMII_TCK_CTRL);
163}
164
John Crispin656e7052016-03-08 11:29:55 +0100165static void mtk_phy_link_adjust(struct net_device *dev)
166{
167 struct mtk_mac *mac = netdev_priv(dev);
John Crispin08ef55c2016-06-03 10:17:07 +0200168 u16 lcl_adv = 0, rmt_adv = 0;
169 u8 flowctrl;
John Crispin656e7052016-03-08 11:29:55 +0100170 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
171 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
172 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
173 MAC_MCR_BACKPR_EN;
174
Sean Wangdce6fa42016-09-14 23:13:21 +0800175 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
176 return;
177
Sean Wang2364c5c2016-09-22 16:33:35 +0800178 switch (dev->phydev->speed) {
John Crispin656e7052016-03-08 11:29:55 +0100179 case SPEED_1000:
180 mcr |= MAC_MCR_SPEED_1000;
181 break;
182 case SPEED_100:
183 mcr |= MAC_MCR_SPEED_100;
184 break;
185 };
186
Sean Wangf430dea2016-09-22 10:33:55 +0800187 if (mac->id == 0 && !mac->trgmii)
Sean Wang2364c5c2016-09-22 16:33:35 +0800188 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
Sean Wangf430dea2016-09-22 10:33:55 +0800189
Sean Wang2364c5c2016-09-22 16:33:35 +0800190 if (dev->phydev->link)
John Crispin656e7052016-03-08 11:29:55 +0100191 mcr |= MAC_MCR_FORCE_LINK;
192
Sean Wang2364c5c2016-09-22 16:33:35 +0800193 if (dev->phydev->duplex) {
John Crispin656e7052016-03-08 11:29:55 +0100194 mcr |= MAC_MCR_FORCE_DPX;
195
Sean Wang2364c5c2016-09-22 16:33:35 +0800196 if (dev->phydev->pause)
John Crispin08ef55c2016-06-03 10:17:07 +0200197 rmt_adv = LPA_PAUSE_CAP;
Sean Wang2364c5c2016-09-22 16:33:35 +0800198 if (dev->phydev->asym_pause)
John Crispin08ef55c2016-06-03 10:17:07 +0200199 rmt_adv |= LPA_PAUSE_ASYM;
200
Sean Wang2364c5c2016-09-22 16:33:35 +0800201 if (dev->phydev->advertising & ADVERTISED_Pause)
John Crispin08ef55c2016-06-03 10:17:07 +0200202 lcl_adv |= ADVERTISE_PAUSE_CAP;
Sean Wang2364c5c2016-09-22 16:33:35 +0800203 if (dev->phydev->advertising & ADVERTISED_Asym_Pause)
John Crispin08ef55c2016-06-03 10:17:07 +0200204 lcl_adv |= ADVERTISE_PAUSE_ASYM;
205
206 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
207
208 if (flowctrl & FLOW_CTRL_TX)
209 mcr |= MAC_MCR_FORCE_TX_FC;
210 if (flowctrl & FLOW_CTRL_RX)
211 mcr |= MAC_MCR_FORCE_RX_FC;
212
213 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
214 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
215 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
216 }
John Crispin656e7052016-03-08 11:29:55 +0100217
218 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
219
Sean Wang2364c5c2016-09-22 16:33:35 +0800220 if (dev->phydev->link)
John Crispin656e7052016-03-08 11:29:55 +0100221 netif_carrier_on(dev);
222 else
223 netif_carrier_off(dev);
224}
225
226static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
227 struct device_node *phy_node)
228{
John Crispin656e7052016-03-08 11:29:55 +0100229 struct phy_device *phydev;
Sean Wanga2b2a192016-09-22 16:36:15 +0800230 int phy_mode;
John Crispin656e7052016-03-08 11:29:55 +0100231
John Crispin656e7052016-03-08 11:29:55 +0100232 phy_mode = of_get_phy_mode(phy_node);
233 if (phy_mode < 0) {
234 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
235 return -EINVAL;
236 }
237
238 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
239 mtk_phy_link_adjust, 0, phy_mode);
Dan Carpenter977bc202016-03-15 10:18:49 +0300240 if (!phydev) {
John Crispin656e7052016-03-08 11:29:55 +0100241 dev_err(eth->dev, "could not connect to PHY\n");
Dan Carpenter977bc202016-03-15 10:18:49 +0300242 return -ENODEV;
John Crispin656e7052016-03-08 11:29:55 +0100243 }
244
245 dev_info(eth->dev,
246 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
247 mac->id, phydev_name(phydev), phydev->phy_id,
248 phydev->drv->name);
249
John Crispin656e7052016-03-08 11:29:55 +0100250 return 0;
251}
252
Sean Wang2364c5c2016-09-22 16:33:35 +0800253static int mtk_phy_connect(struct net_device *dev)
John Crispin656e7052016-03-08 11:29:55 +0100254{
Sean Wang2364c5c2016-09-22 16:33:35 +0800255 struct mtk_mac *mac = netdev_priv(dev);
256 struct mtk_eth *eth;
John Crispin656e7052016-03-08 11:29:55 +0100257 struct device_node *np;
Sean Wang9ea4d312016-09-14 23:13:19 +0800258 u32 val;
John Crispin656e7052016-03-08 11:29:55 +0100259
Sean Wang2364c5c2016-09-22 16:33:35 +0800260 eth = mac->hw;
John Crispin656e7052016-03-08 11:29:55 +0100261 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
John Crispin0c72c502016-06-03 10:17:08 +0200262 if (!np && of_phy_is_fixed_link(mac->of_node))
263 if (!of_phy_register_fixed_link(mac->of_node))
264 np = of_node_get(mac->of_node);
John Crispin656e7052016-03-08 11:29:55 +0100265 if (!np)
266 return -ENODEV;
267
268 switch (of_get_phy_mode(np)) {
Sean Wang572de602016-09-22 10:33:54 +0800269 case PHY_INTERFACE_MODE_TRGMII:
270 mac->trgmii = true;
John Crispin37920fc2016-06-03 10:17:09 +0200271 case PHY_INTERFACE_MODE_RGMII_TXID:
272 case PHY_INTERFACE_MODE_RGMII_RXID:
273 case PHY_INTERFACE_MODE_RGMII_ID:
John Crispin656e7052016-03-08 11:29:55 +0100274 case PHY_INTERFACE_MODE_RGMII:
Sean Wang9ea4d312016-09-14 23:13:19 +0800275 mac->ge_mode = 0;
John Crispin656e7052016-03-08 11:29:55 +0100276 break;
277 case PHY_INTERFACE_MODE_MII:
Sean Wang9ea4d312016-09-14 23:13:19 +0800278 mac->ge_mode = 1;
John Crispin656e7052016-03-08 11:29:55 +0100279 break;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800280 case PHY_INTERFACE_MODE_REVMII:
Sean Wang9ea4d312016-09-14 23:13:19 +0800281 mac->ge_mode = 2;
John Crispin656e7052016-03-08 11:29:55 +0100282 break;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800283 case PHY_INTERFACE_MODE_RMII:
284 if (!mac->id)
285 goto err_phy;
Sean Wang9ea4d312016-09-14 23:13:19 +0800286 mac->ge_mode = 3;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800287 break;
John Crispin656e7052016-03-08 11:29:55 +0100288 default:
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800289 goto err_phy;
John Crispin656e7052016-03-08 11:29:55 +0100290 }
291
292 /* put the gmac into the right mode */
293 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
294 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
Sean Wang9ea4d312016-09-14 23:13:19 +0800295 val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
John Crispin656e7052016-03-08 11:29:55 +0100296 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
297
Sean Wang2364c5c2016-09-22 16:33:35 +0800298 /* couple phydev to net_device */
John Crispin656e7052016-03-08 11:29:55 +0100299 mtk_phy_connect_node(eth, mac, np);
Sean Wang2364c5c2016-09-22 16:33:35 +0800300 dev->phydev->autoneg = AUTONEG_ENABLE;
301 dev->phydev->speed = 0;
302 dev->phydev->duplex = 0;
sean.wang@mediatek.comb2025c72016-08-16 13:55:14 +0800303
304 if (of_phy_is_fixed_link(mac->of_node))
Sean Wang2364c5c2016-09-22 16:33:35 +0800305 dev->phydev->supported |=
sean.wang@mediatek.comb2025c72016-08-16 13:55:14 +0800306 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
307
Sean Wang2364c5c2016-09-22 16:33:35 +0800308 dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
John Crispin08ef55c2016-06-03 10:17:07 +0200309 SUPPORTED_Asym_Pause;
Sean Wang2364c5c2016-09-22 16:33:35 +0800310 dev->phydev->advertising = dev->phydev->supported |
John Crispin656e7052016-03-08 11:29:55 +0100311 ADVERTISED_Autoneg;
Sean Wang2364c5c2016-09-22 16:33:35 +0800312 phy_start_aneg(dev->phydev);
John Crispin656e7052016-03-08 11:29:55 +0100313
sean.wang@mediatek.come8c29932016-08-13 19:16:19 +0800314 of_node_put(np);
315
John Crispin656e7052016-03-08 11:29:55 +0100316 return 0;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800317
318err_phy:
319 of_node_put(np);
320 dev_err(eth->dev, "invalid phy_mode\n");
321 return -EINVAL;
John Crispin656e7052016-03-08 11:29:55 +0100322}
323
324static int mtk_mdio_init(struct mtk_eth *eth)
325{
326 struct device_node *mii_np;
Sean Wang1e515b72016-09-01 10:47:34 +0800327 int ret;
John Crispin656e7052016-03-08 11:29:55 +0100328
329 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
330 if (!mii_np) {
331 dev_err(eth->dev, "no %s child node found", "mdio-bus");
332 return -ENODEV;
333 }
334
335 if (!of_device_is_available(mii_np)) {
Sean Wangaa6e8a52016-09-01 10:47:35 +0800336 ret = -ENODEV;
John Crispin656e7052016-03-08 11:29:55 +0100337 goto err_put_node;
338 }
339
Sean Wang1e515b72016-09-01 10:47:34 +0800340 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
John Crispin656e7052016-03-08 11:29:55 +0100341 if (!eth->mii_bus) {
Sean Wang1e515b72016-09-01 10:47:34 +0800342 ret = -ENOMEM;
John Crispin656e7052016-03-08 11:29:55 +0100343 goto err_put_node;
344 }
345
346 eth->mii_bus->name = "mdio";
347 eth->mii_bus->read = mtk_mdio_read;
348 eth->mii_bus->write = mtk_mdio_write;
349 eth->mii_bus->priv = eth;
350 eth->mii_bus->parent = eth->dev;
351
352 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
Sean Wang1e515b72016-09-01 10:47:34 +0800353 ret = of_mdiobus_register(eth->mii_bus, mii_np);
John Crispin656e7052016-03-08 11:29:55 +0100354
355err_put_node:
356 of_node_put(mii_np);
Sean Wang1e515b72016-09-01 10:47:34 +0800357 return ret;
John Crispin656e7052016-03-08 11:29:55 +0100358}
359
360static void mtk_mdio_cleanup(struct mtk_eth *eth)
361{
362 if (!eth->mii_bus)
363 return;
364
365 mdiobus_unregister(eth->mii_bus);
John Crispin656e7052016-03-08 11:29:55 +0100366}
367
Nelson Changbacfd112016-08-26 01:09:42 +0800368static inline void mtk_irq_disable(struct mtk_eth *eth,
369 unsigned reg, u32 mask)
John Crispin656e7052016-03-08 11:29:55 +0100370{
John Crispin7bc9cce2016-06-29 13:38:10 +0200371 unsigned long flags;
John Crispin656e7052016-03-08 11:29:55 +0100372 u32 val;
373
John Crispin7bc9cce2016-06-29 13:38:10 +0200374 spin_lock_irqsave(&eth->irq_lock, flags);
Nelson Changbacfd112016-08-26 01:09:42 +0800375 val = mtk_r32(eth, reg);
376 mtk_w32(eth, val & ~mask, reg);
John Crispin7bc9cce2016-06-29 13:38:10 +0200377 spin_unlock_irqrestore(&eth->irq_lock, flags);
John Crispin656e7052016-03-08 11:29:55 +0100378}
379
Nelson Changbacfd112016-08-26 01:09:42 +0800380static inline void mtk_irq_enable(struct mtk_eth *eth,
381 unsigned reg, u32 mask)
John Crispin656e7052016-03-08 11:29:55 +0100382{
John Crispin7bc9cce2016-06-29 13:38:10 +0200383 unsigned long flags;
John Crispin656e7052016-03-08 11:29:55 +0100384 u32 val;
385
John Crispin7bc9cce2016-06-29 13:38:10 +0200386 spin_lock_irqsave(&eth->irq_lock, flags);
Nelson Changbacfd112016-08-26 01:09:42 +0800387 val = mtk_r32(eth, reg);
388 mtk_w32(eth, val | mask, reg);
John Crispin7bc9cce2016-06-29 13:38:10 +0200389 spin_unlock_irqrestore(&eth->irq_lock, flags);
John Crispin656e7052016-03-08 11:29:55 +0100390}
391
392static int mtk_set_mac_address(struct net_device *dev, void *p)
393{
394 int ret = eth_mac_addr(dev, p);
395 struct mtk_mac *mac = netdev_priv(dev);
396 const char *macaddr = dev->dev_addr;
John Crispin656e7052016-03-08 11:29:55 +0100397
398 if (ret)
399 return ret;
400
Sean Wangdce6fa42016-09-14 23:13:21 +0800401 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
402 return -EBUSY;
403
Sean Wange3e96522016-08-11 17:51:00 +0800404 spin_lock_bh(&mac->hw->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100405 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
406 MTK_GDMA_MAC_ADRH(mac->id));
407 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
408 (macaddr[4] << 8) | macaddr[5],
409 MTK_GDMA_MAC_ADRL(mac->id));
Sean Wange3e96522016-08-11 17:51:00 +0800410 spin_unlock_bh(&mac->hw->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100411
412 return 0;
413}
414
415void mtk_stats_update_mac(struct mtk_mac *mac)
416{
417 struct mtk_hw_stats *hw_stats = mac->hw_stats;
418 unsigned int base = MTK_GDM1_TX_GBCNT;
419 u64 stats;
420
421 base += hw_stats->reg_offset;
422
423 u64_stats_update_begin(&hw_stats->syncp);
424
425 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
426 stats = mtk_r32(mac->hw, base + 0x04);
427 if (stats)
428 hw_stats->rx_bytes += (stats << 32);
429 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
430 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
431 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
432 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
433 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
434 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
435 hw_stats->rx_flow_control_packets +=
436 mtk_r32(mac->hw, base + 0x24);
437 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
438 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
439 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
440 stats = mtk_r32(mac->hw, base + 0x34);
441 if (stats)
442 hw_stats->tx_bytes += (stats << 32);
443 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
444 u64_stats_update_end(&hw_stats->syncp);
445}
446
447static void mtk_stats_update(struct mtk_eth *eth)
448{
449 int i;
450
451 for (i = 0; i < MTK_MAC_COUNT; i++) {
452 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
453 continue;
454 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
455 mtk_stats_update_mac(eth->mac[i]);
456 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
457 }
458 }
459}
460
461static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
462 struct rtnl_link_stats64 *storage)
463{
464 struct mtk_mac *mac = netdev_priv(dev);
465 struct mtk_hw_stats *hw_stats = mac->hw_stats;
466 unsigned int start;
467
468 if (netif_running(dev) && netif_device_present(dev)) {
469 if (spin_trylock(&hw_stats->stats_lock)) {
470 mtk_stats_update_mac(mac);
471 spin_unlock(&hw_stats->stats_lock);
472 }
473 }
474
475 do {
476 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
477 storage->rx_packets = hw_stats->rx_packets;
478 storage->tx_packets = hw_stats->tx_packets;
479 storage->rx_bytes = hw_stats->rx_bytes;
480 storage->tx_bytes = hw_stats->tx_bytes;
481 storage->collisions = hw_stats->tx_collisions;
482 storage->rx_length_errors = hw_stats->rx_short_errors +
483 hw_stats->rx_long_errors;
484 storage->rx_over_errors = hw_stats->rx_overflow;
485 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
486 storage->rx_errors = hw_stats->rx_checksum_errors;
487 storage->tx_aborted_errors = hw_stats->tx_skip;
488 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
489
490 storage->tx_errors = dev->stats.tx_errors;
491 storage->rx_dropped = dev->stats.rx_dropped;
492 storage->tx_dropped = dev->stats.tx_dropped;
493
494 return storage;
495}
496
497static inline int mtk_max_frag_size(int mtu)
498{
499 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
500 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
501 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
502
503 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
504 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
505}
506
507static inline int mtk_max_buf_size(int frag_size)
508{
509 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
510 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
511
512 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
513
514 return buf_size;
515}
516
517static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
518 struct mtk_rx_dma *dma_rxd)
519{
520 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
521 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
522 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
523 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
524}
525
526/* the qdma core needs scratch memory to be setup */
527static int mtk_init_fq_dma(struct mtk_eth *eth)
528{
John Crispin605e4fe2016-06-10 13:27:59 +0200529 dma_addr_t phy_ring_tail;
John Crispin656e7052016-03-08 11:29:55 +0100530 int cnt = MTK_DMA_SIZE;
531 dma_addr_t dma_addr;
532 int i;
533
534 eth->scratch_ring = dma_alloc_coherent(eth->dev,
535 cnt * sizeof(struct mtk_tx_dma),
John Crispin605e4fe2016-06-10 13:27:59 +0200536 &eth->phy_scratch_ring,
John Crispin656e7052016-03-08 11:29:55 +0100537 GFP_ATOMIC | __GFP_ZERO);
538 if (unlikely(!eth->scratch_ring))
539 return -ENOMEM;
540
541 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
542 GFP_KERNEL);
John Crispin562c5a72016-06-10 13:27:58 +0200543 if (unlikely(!eth->scratch_head))
544 return -ENOMEM;
545
John Crispin656e7052016-03-08 11:29:55 +0100546 dma_addr = dma_map_single(eth->dev,
547 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
548 DMA_FROM_DEVICE);
549 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
550 return -ENOMEM;
551
552 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
John Crispin605e4fe2016-06-10 13:27:59 +0200553 phy_ring_tail = eth->phy_scratch_ring +
John Crispin656e7052016-03-08 11:29:55 +0100554 (sizeof(struct mtk_tx_dma) * (cnt - 1));
555
556 for (i = 0; i < cnt; i++) {
557 eth->scratch_ring[i].txd1 =
558 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
559 if (i < cnt - 1)
John Crispin605e4fe2016-06-10 13:27:59 +0200560 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
John Crispin656e7052016-03-08 11:29:55 +0100561 ((i + 1) * sizeof(struct mtk_tx_dma)));
562 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
563 }
564
John Crispin605e4fe2016-06-10 13:27:59 +0200565 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
John Crispin656e7052016-03-08 11:29:55 +0100566 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
567 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
568 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
569
570 return 0;
571}
572
573static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
574{
575 void *ret = ring->dma;
576
577 return ret + (desc - ring->phys);
578}
579
580static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
581 struct mtk_tx_dma *txd)
582{
583 int idx = txd - ring->dma;
584
585 return &ring->buf[idx];
586}
587
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800588static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
John Crispin656e7052016-03-08 11:29:55 +0100589{
590 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800591 dma_unmap_single(eth->dev,
John Crispin656e7052016-03-08 11:29:55 +0100592 dma_unmap_addr(tx_buf, dma_addr0),
593 dma_unmap_len(tx_buf, dma_len0),
594 DMA_TO_DEVICE);
595 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800596 dma_unmap_page(eth->dev,
John Crispin656e7052016-03-08 11:29:55 +0100597 dma_unmap_addr(tx_buf, dma_addr0),
598 dma_unmap_len(tx_buf, dma_len0),
599 DMA_TO_DEVICE);
600 }
601 tx_buf->flags = 0;
602 if (tx_buf->skb &&
603 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
604 dev_kfree_skb_any(tx_buf->skb);
605 tx_buf->skb = NULL;
606}
607
608static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
609 int tx_num, struct mtk_tx_ring *ring, bool gso)
610{
611 struct mtk_mac *mac = netdev_priv(dev);
612 struct mtk_eth *eth = mac->hw;
613 struct mtk_tx_dma *itxd, *txd;
614 struct mtk_tx_buf *tx_buf;
John Crispin656e7052016-03-08 11:29:55 +0100615 dma_addr_t mapped_addr;
616 unsigned int nr_frags;
617 int i, n_desc = 1;
Sean Wangc6f1dc42016-09-01 10:47:27 +0800618 u32 txd4 = 0, fport;
John Crispin656e7052016-03-08 11:29:55 +0100619
620 itxd = ring->next_free;
621 if (itxd == ring->last_free)
622 return -ENOMEM;
623
624 /* set the forward port */
Sean Wangc6f1dc42016-09-01 10:47:27 +0800625 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
626 txd4 |= fport;
John Crispin656e7052016-03-08 11:29:55 +0100627
628 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
629 memset(tx_buf, 0, sizeof(*tx_buf));
630
631 if (gso)
632 txd4 |= TX_DMA_TSO;
633
634 /* TX Checksum offload */
635 if (skb->ip_summed == CHECKSUM_PARTIAL)
636 txd4 |= TX_DMA_CHKSUM;
637
638 /* VLAN header offload */
639 if (skb_vlan_tag_present(skb))
640 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
641
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800642 mapped_addr = dma_map_single(eth->dev, skb->data,
John Crispin656e7052016-03-08 11:29:55 +0100643 skb_headlen(skb), DMA_TO_DEVICE);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800644 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
John Crispin656e7052016-03-08 11:29:55 +0100645 return -ENOMEM;
646
John Crispin656e7052016-03-08 11:29:55 +0100647 WRITE_ONCE(itxd->txd1, mapped_addr);
648 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
649 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
650 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
651
652 /* TX SG offload */
653 txd = itxd;
654 nr_frags = skb_shinfo(skb)->nr_frags;
655 for (i = 0; i < nr_frags; i++) {
656 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
657 unsigned int offset = 0;
658 int frag_size = skb_frag_size(frag);
659
660 while (frag_size) {
661 bool last_frag = false;
662 unsigned int frag_map_size;
663
664 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
665 if (txd == ring->last_free)
666 goto err_dma;
667
668 n_desc++;
669 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800670 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
John Crispin656e7052016-03-08 11:29:55 +0100671 frag_map_size,
672 DMA_TO_DEVICE);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800673 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
John Crispin656e7052016-03-08 11:29:55 +0100674 goto err_dma;
675
676 if (i == nr_frags - 1 &&
677 (frag_size - frag_map_size) == 0)
678 last_frag = true;
679
680 WRITE_ONCE(txd->txd1, mapped_addr);
681 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
682 TX_DMA_PLEN0(frag_map_size) |
John Crispin369f0452016-04-08 00:54:11 +0200683 last_frag * TX_DMA_LS0));
Sean Wangc6f1dc42016-09-01 10:47:27 +0800684 WRITE_ONCE(txd->txd4, fport);
John Crispin656e7052016-03-08 11:29:55 +0100685
686 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
687 tx_buf = mtk_desc_to_tx_buf(ring, txd);
688 memset(tx_buf, 0, sizeof(*tx_buf));
689
690 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
691 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
692 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
693 frag_size -= frag_map_size;
694 offset += frag_map_size;
695 }
696 }
697
698 /* store skb to cleanup */
699 tx_buf->skb = skb;
700
701 WRITE_ONCE(itxd->txd4, txd4);
702 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
703 (!nr_frags * TX_DMA_LS0)));
704
John Crispin656e7052016-03-08 11:29:55 +0100705 netdev_sent_queue(dev, skb->len);
706 skb_tx_timestamp(skb);
707
708 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
709 atomic_sub(n_desc, &ring->free_count);
710
711 /* make sure that all changes to the dma ring are flushed before we
712 * continue
713 */
714 wmb();
715
716 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
717 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
718
719 return 0;
720
721err_dma:
722 do {
John Crispin2fae7232016-06-10 13:28:00 +0200723 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
John Crispin656e7052016-03-08 11:29:55 +0100724
725 /* unmap dma */
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800726 mtk_tx_unmap(eth, tx_buf);
John Crispin656e7052016-03-08 11:29:55 +0100727
728 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
729 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
730 } while (itxd != txd);
731
732 return -ENOMEM;
733}
734
735static inline int mtk_cal_txd_req(struct sk_buff *skb)
736{
737 int i, nfrags;
738 struct skb_frag_struct *frag;
739
740 nfrags = 1;
741 if (skb_is_gso(skb)) {
742 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
743 frag = &skb_shinfo(skb)->frags[i];
744 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
745 }
746 } else {
747 nfrags += skb_shinfo(skb)->nr_frags;
748 }
749
John Crispinbeeb4ca2016-04-08 00:54:05 +0200750 return nfrags;
John Crispin656e7052016-03-08 11:29:55 +0100751}
752
John Crispinad3cba92016-06-10 13:28:07 +0200753static int mtk_queue_stopped(struct mtk_eth *eth)
754{
755 int i;
756
757 for (i = 0; i < MTK_MAC_COUNT; i++) {
758 if (!eth->netdev[i])
759 continue;
760 if (netif_queue_stopped(eth->netdev[i]))
761 return 1;
762 }
763
764 return 0;
765}
766
John Crispin13c822f2016-04-08 00:54:07 +0200767static void mtk_wake_queue(struct mtk_eth *eth)
768{
769 int i;
770
771 for (i = 0; i < MTK_MAC_COUNT; i++) {
772 if (!eth->netdev[i])
773 continue;
774 netif_wake_queue(eth->netdev[i]);
775 }
776}
777
778static void mtk_stop_queue(struct mtk_eth *eth)
779{
780 int i;
781
782 for (i = 0; i < MTK_MAC_COUNT; i++) {
783 if (!eth->netdev[i])
784 continue;
785 netif_stop_queue(eth->netdev[i]);
786 }
787}
788
John Crispin656e7052016-03-08 11:29:55 +0100789static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
790{
791 struct mtk_mac *mac = netdev_priv(dev);
792 struct mtk_eth *eth = mac->hw;
793 struct mtk_tx_ring *ring = &eth->tx_ring;
794 struct net_device_stats *stats = &dev->stats;
795 bool gso = false;
796 int tx_num;
797
John Crispin34c2e4c2016-04-08 00:54:08 +0200798 /* normally we can rely on the stack not calling this more than once,
799 * however we have 2 queues running on the same ring so we need to lock
800 * the ring access
801 */
Sean Wange3e96522016-08-11 17:51:00 +0800802 spin_lock(&eth->page_lock);
John Crispin34c2e4c2016-04-08 00:54:08 +0200803
Sean Wangdce6fa42016-09-14 23:13:21 +0800804 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
805 goto drop;
806
John Crispin656e7052016-03-08 11:29:55 +0100807 tx_num = mtk_cal_txd_req(skb);
808 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
John Crispin13c822f2016-04-08 00:54:07 +0200809 mtk_stop_queue(eth);
John Crispin656e7052016-03-08 11:29:55 +0100810 netif_err(eth, tx_queued, dev,
811 "Tx Ring full when queue awake!\n");
Sean Wange3e96522016-08-11 17:51:00 +0800812 spin_unlock(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100813 return NETDEV_TX_BUSY;
814 }
815
816 /* TSO: fill MSS info in tcp checksum field */
817 if (skb_is_gso(skb)) {
818 if (skb_cow_head(skb, 0)) {
819 netif_warn(eth, tx_err, dev,
820 "GSO expand head fail.\n");
821 goto drop;
822 }
823
824 if (skb_shinfo(skb)->gso_type &
825 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
826 gso = true;
827 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
828 }
829 }
830
831 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
832 goto drop;
833
John Crispin82c65442016-06-10 13:28:08 +0200834 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
John Crispin13c822f2016-04-08 00:54:07 +0200835 mtk_stop_queue(eth);
John Crispin82c65442016-06-10 13:28:08 +0200836
Sean Wange3e96522016-08-11 17:51:00 +0800837 spin_unlock(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100838
839 return NETDEV_TX_OK;
840
841drop:
Sean Wange3e96522016-08-11 17:51:00 +0800842 spin_unlock(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100843 stats->tx_dropped++;
844 dev_kfree_skb(skb);
845 return NETDEV_TX_OK;
846}
847
Nelson Changee406812016-09-17 23:50:55 +0800848static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
849{
850 int i;
851 struct mtk_rx_ring *ring;
852 int idx;
853
854 if (!eth->hwlro)
855 return &eth->rx_ring[0];
856
857 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
858 ring = &eth->rx_ring[i];
859 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
860 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
861 ring->calc_idx_update = true;
862 return ring;
863 }
864 }
865
866 return NULL;
867}
868
869static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
870{
871 struct mtk_rx_ring *ring;
872 int i;
873
874 if (!eth->hwlro) {
875 ring = &eth->rx_ring[0];
876 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
877 } else {
878 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
879 ring = &eth->rx_ring[i];
880 if (ring->calc_idx_update) {
881 ring->calc_idx_update = false;
882 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
883 }
884 }
885 }
886}
887
John Crispin656e7052016-03-08 11:29:55 +0100888static int mtk_poll_rx(struct napi_struct *napi, int budget,
John Crispineece71e2016-06-29 13:38:09 +0200889 struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +0100890{
Nelson Changee406812016-09-17 23:50:55 +0800891 struct mtk_rx_ring *ring;
892 int idx;
John Crispin656e7052016-03-08 11:29:55 +0100893 struct sk_buff *skb;
894 u8 *data, *new_data;
895 struct mtk_rx_dma *rxd, trxd;
896 int done = 0;
897
898 while (done < budget) {
899 struct net_device *netdev;
900 unsigned int pktlen;
901 dma_addr_t dma_addr;
902 int mac = 0;
903
Nelson Changee406812016-09-17 23:50:55 +0800904 ring = mtk_get_rx_ring(eth);
905 if (unlikely(!ring))
906 goto rx_done;
907
908 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
John Crispin656e7052016-03-08 11:29:55 +0100909 rxd = &ring->dma[idx];
910 data = ring->data[idx];
911
912 mtk_rx_get_desc(&trxd, rxd);
913 if (!(trxd.rxd2 & RX_DMA_DONE))
914 break;
915
916 /* find out which mac the packet come from. values start at 1 */
917 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
918 RX_DMA_FPORT_MASK;
919 mac--;
920
921 netdev = eth->netdev[mac];
922
Sean Wangdce6fa42016-09-14 23:13:21 +0800923 if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
924 goto release_desc;
925
John Crispin656e7052016-03-08 11:29:55 +0100926 /* alloc new buffer */
927 new_data = napi_alloc_frag(ring->frag_size);
928 if (unlikely(!new_data)) {
929 netdev->stats.rx_dropped++;
930 goto release_desc;
931 }
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800932 dma_addr = dma_map_single(eth->dev,
John Crispin656e7052016-03-08 11:29:55 +0100933 new_data + NET_SKB_PAD,
934 ring->buf_size,
935 DMA_FROM_DEVICE);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800936 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
John Crispin656e7052016-03-08 11:29:55 +0100937 skb_free_frag(new_data);
John Crispin94321a92016-06-10 13:28:01 +0200938 netdev->stats.rx_dropped++;
John Crispin656e7052016-03-08 11:29:55 +0100939 goto release_desc;
940 }
941
942 /* receive data */
943 skb = build_skb(data, ring->frag_size);
944 if (unlikely(!skb)) {
Sean Wang1b430792016-09-01 10:47:29 +0800945 skb_free_frag(new_data);
John Crispin94321a92016-06-10 13:28:01 +0200946 netdev->stats.rx_dropped++;
John Crispin656e7052016-03-08 11:29:55 +0100947 goto release_desc;
948 }
949 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
950
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800951 dma_unmap_single(eth->dev, trxd.rxd1,
John Crispin656e7052016-03-08 11:29:55 +0100952 ring->buf_size, DMA_FROM_DEVICE);
953 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
954 skb->dev = netdev;
955 skb_put(skb, pktlen);
956 if (trxd.rxd4 & RX_DMA_L4_VALID)
957 skb->ip_summed = CHECKSUM_UNNECESSARY;
958 else
959 skb_checksum_none_assert(skb);
960 skb->protocol = eth_type_trans(skb, netdev);
961
962 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
963 RX_DMA_VID(trxd.rxd3))
964 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
965 RX_DMA_VID(trxd.rxd3));
966 napi_gro_receive(napi, skb);
967
968 ring->data[idx] = new_data;
969 rxd->rxd1 = (unsigned int)dma_addr;
970
971release_desc:
972 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
973
974 ring->calc_idx = idx;
Sean Wang635372a2016-09-03 17:59:26 +0800975
John Crispin656e7052016-03-08 11:29:55 +0100976 done++;
977 }
978
Nelson Changee406812016-09-17 23:50:55 +0800979rx_done:
Sean Wang41156ce2016-09-03 17:59:27 +0800980 if (done) {
981 /* make sure that all changes to the dma ring are flushed before
982 * we continue
983 */
984 wmb();
Nelson Changee406812016-09-17 23:50:55 +0800985 mtk_update_rx_cpu_idx(eth);
Sean Wang41156ce2016-09-03 17:59:27 +0800986 }
John Crispin656e7052016-03-08 11:29:55 +0100987
988 return done;
989}
990
John Crispin80673022016-06-29 13:38:11 +0200991static int mtk_poll_tx(struct mtk_eth *eth, int budget)
John Crispin656e7052016-03-08 11:29:55 +0100992{
993 struct mtk_tx_ring *ring = &eth->tx_ring;
994 struct mtk_tx_dma *desc;
995 struct sk_buff *skb;
996 struct mtk_tx_buf *tx_buf;
John Crispin80673022016-06-29 13:38:11 +0200997 unsigned int done[MTK_MAX_DEVS];
John Crispin656e7052016-03-08 11:29:55 +0100998 unsigned int bytes[MTK_MAX_DEVS];
999 u32 cpu, dma;
1000 static int condition;
John Crispin80673022016-06-29 13:38:11 +02001001 int total = 0, i;
John Crispin656e7052016-03-08 11:29:55 +01001002
1003 memset(done, 0, sizeof(done));
1004 memset(bytes, 0, sizeof(bytes));
1005
1006 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1007 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1008
1009 desc = mtk_qdma_phys_to_virt(ring, cpu);
1010
1011 while ((cpu != dma) && budget) {
1012 u32 next_cpu = desc->txd2;
1013 int mac;
1014
1015 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1016 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1017 break;
1018
1019 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1020 TX_DMA_FPORT_MASK;
1021 mac--;
1022
1023 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1024 skb = tx_buf->skb;
1025 if (!skb) {
1026 condition = 1;
1027 break;
1028 }
1029
1030 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1031 bytes[mac] += skb->len;
1032 done[mac]++;
1033 budget--;
1034 }
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +08001035 mtk_tx_unmap(eth, tx_buf);
John Crispin656e7052016-03-08 11:29:55 +01001036
John Crispin656e7052016-03-08 11:29:55 +01001037 ring->last_free = desc;
1038 atomic_inc(&ring->free_count);
1039
1040 cpu = next_cpu;
1041 }
1042
1043 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1044
1045 for (i = 0; i < MTK_MAC_COUNT; i++) {
1046 if (!eth->netdev[i] || !done[i])
1047 continue;
1048 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1049 total += done[i];
1050 }
1051
John Crispinad3cba92016-06-10 13:28:07 +02001052 if (mtk_queue_stopped(eth) &&
1053 (atomic_read(&ring->free_count) > ring->thresh))
John Crispin13c822f2016-04-08 00:54:07 +02001054 mtk_wake_queue(eth);
John Crispin656e7052016-03-08 11:29:55 +01001055
1056 return total;
1057}
1058
John Crispin80673022016-06-29 13:38:11 +02001059static void mtk_handle_status_irq(struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +01001060{
John Crispin80673022016-06-29 13:38:11 +02001061 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
John Crispin656e7052016-03-08 11:29:55 +01001062
John Crispineece71e2016-06-29 13:38:09 +02001063 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
John Crispin656e7052016-03-08 11:29:55 +01001064 mtk_stats_update(eth);
John Crispineece71e2016-06-29 13:38:09 +02001065 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1066 MTK_INT_STATUS2);
John Crispin656e7052016-03-08 11:29:55 +01001067 }
John Crispin80673022016-06-29 13:38:11 +02001068}
1069
1070static int mtk_napi_tx(struct napi_struct *napi, int budget)
1071{
1072 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1073 u32 status, mask;
1074 int tx_done = 0;
1075
1076 mtk_handle_status_irq(eth);
1077 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1078 tx_done = mtk_poll_tx(eth, budget);
John Crispin656e7052016-03-08 11:29:55 +01001079
1080 if (unlikely(netif_msg_intr(eth))) {
John Crispin80673022016-06-29 13:38:11 +02001081 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
John Crispin656e7052016-03-08 11:29:55 +01001082 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
John Crispin80673022016-06-29 13:38:11 +02001083 dev_info(eth->dev,
1084 "done tx %d, intr 0x%08x/0x%x\n",
1085 tx_done, status, mask);
John Crispin656e7052016-03-08 11:29:55 +01001086 }
1087
John Crispin80673022016-06-29 13:38:11 +02001088 if (tx_done == budget)
John Crispin656e7052016-03-08 11:29:55 +01001089 return budget;
1090
1091 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
John Crispin80673022016-06-29 13:38:11 +02001092 if (status & MTK_TX_DONE_INT)
John Crispin656e7052016-03-08 11:29:55 +01001093 return budget;
1094
1095 napi_complete(napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001096 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
John Crispin80673022016-06-29 13:38:11 +02001097
1098 return tx_done;
1099}
1100
1101static int mtk_napi_rx(struct napi_struct *napi, int budget)
1102{
1103 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1104 u32 status, mask;
1105 int rx_done = 0;
Sean Wang41156ce2016-09-03 17:59:27 +08001106 int remain_budget = budget;
John Crispin80673022016-06-29 13:38:11 +02001107
1108 mtk_handle_status_irq(eth);
Sean Wang41156ce2016-09-03 17:59:27 +08001109
1110poll_again:
Nelson Changbacfd112016-08-26 01:09:42 +08001111 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
Sean Wang41156ce2016-09-03 17:59:27 +08001112 rx_done = mtk_poll_rx(napi, remain_budget, eth);
John Crispin80673022016-06-29 13:38:11 +02001113
1114 if (unlikely(netif_msg_intr(eth))) {
Nelson Changbacfd112016-08-26 01:09:42 +08001115 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1116 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
John Crispin80673022016-06-29 13:38:11 +02001117 dev_info(eth->dev,
1118 "done rx %d, intr 0x%08x/0x%x\n",
1119 rx_done, status, mask);
1120 }
Sean Wang41156ce2016-09-03 17:59:27 +08001121 if (rx_done == remain_budget)
John Crispin80673022016-06-29 13:38:11 +02001122 return budget;
1123
Nelson Changbacfd112016-08-26 01:09:42 +08001124 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
Sean Wang41156ce2016-09-03 17:59:27 +08001125 if (status & MTK_RX_DONE_INT) {
1126 remain_budget -= rx_done;
1127 goto poll_again;
1128 }
John Crispin80673022016-06-29 13:38:11 +02001129 napi_complete(napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001130 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001131
Sean Wang41156ce2016-09-03 17:59:27 +08001132 return rx_done + budget - remain_budget;
John Crispin656e7052016-03-08 11:29:55 +01001133}
1134
1135static int mtk_tx_alloc(struct mtk_eth *eth)
1136{
1137 struct mtk_tx_ring *ring = &eth->tx_ring;
1138 int i, sz = sizeof(*ring->dma);
1139
1140 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1141 GFP_KERNEL);
1142 if (!ring->buf)
1143 goto no_tx_mem;
1144
1145 ring->dma = dma_alloc_coherent(eth->dev,
1146 MTK_DMA_SIZE * sz,
1147 &ring->phys,
1148 GFP_ATOMIC | __GFP_ZERO);
1149 if (!ring->dma)
1150 goto no_tx_mem;
1151
1152 memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1153 for (i = 0; i < MTK_DMA_SIZE; i++) {
1154 int next = (i + 1) % MTK_DMA_SIZE;
1155 u32 next_ptr = ring->phys + next * sz;
1156
1157 ring->dma[i].txd2 = next_ptr;
1158 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1159 }
1160
1161 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1162 ring->next_free = &ring->dma[0];
John Crispin12c97c12016-06-10 13:28:06 +02001163 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
John Crispin04698cc2016-06-10 13:28:04 +02001164 ring->thresh = MAX_SKB_FRAGS;
John Crispin656e7052016-03-08 11:29:55 +01001165
1166 /* make sure that all changes to the dma ring are flushed before we
1167 * continue
1168 */
1169 wmb();
1170
1171 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1172 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1173 mtk_w32(eth,
1174 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1175 MTK_QTX_CRX_PTR);
1176 mtk_w32(eth,
1177 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1178 MTK_QTX_DRX_PTR);
Nelson Changbacfd112016-08-26 01:09:42 +08001179 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
John Crispin656e7052016-03-08 11:29:55 +01001180
1181 return 0;
1182
1183no_tx_mem:
1184 return -ENOMEM;
1185}
1186
1187static void mtk_tx_clean(struct mtk_eth *eth)
1188{
1189 struct mtk_tx_ring *ring = &eth->tx_ring;
1190 int i;
1191
1192 if (ring->buf) {
1193 for (i = 0; i < MTK_DMA_SIZE; i++)
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +08001194 mtk_tx_unmap(eth, &ring->buf[i]);
John Crispin656e7052016-03-08 11:29:55 +01001195 kfree(ring->buf);
1196 ring->buf = NULL;
1197 }
1198
1199 if (ring->dma) {
1200 dma_free_coherent(eth->dev,
1201 MTK_DMA_SIZE * sizeof(*ring->dma),
1202 ring->dma,
1203 ring->phys);
1204 ring->dma = NULL;
1205 }
1206}
1207
Nelson Changee406812016-09-17 23:50:55 +08001208static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
John Crispin656e7052016-03-08 11:29:55 +01001209{
Nelson Changee406812016-09-17 23:50:55 +08001210 struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
1211 int rx_data_len, rx_dma_size;
John Crispin656e7052016-03-08 11:29:55 +01001212 int i;
1213
Nelson Changee406812016-09-17 23:50:55 +08001214 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1215 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1216 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1217 } else {
1218 rx_data_len = ETH_DATA_LEN;
1219 rx_dma_size = MTK_DMA_SIZE;
1220 }
1221
1222 ring->frag_size = mtk_max_frag_size(rx_data_len);
John Crispin656e7052016-03-08 11:29:55 +01001223 ring->buf_size = mtk_max_buf_size(ring->frag_size);
Nelson Changee406812016-09-17 23:50:55 +08001224 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
John Crispin656e7052016-03-08 11:29:55 +01001225 GFP_KERNEL);
1226 if (!ring->data)
1227 return -ENOMEM;
1228
Nelson Changee406812016-09-17 23:50:55 +08001229 for (i = 0; i < rx_dma_size; i++) {
John Crispin656e7052016-03-08 11:29:55 +01001230 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1231 if (!ring->data[i])
1232 return -ENOMEM;
1233 }
1234
1235 ring->dma = dma_alloc_coherent(eth->dev,
Nelson Changee406812016-09-17 23:50:55 +08001236 rx_dma_size * sizeof(*ring->dma),
John Crispin656e7052016-03-08 11:29:55 +01001237 &ring->phys,
1238 GFP_ATOMIC | __GFP_ZERO);
1239 if (!ring->dma)
1240 return -ENOMEM;
1241
Nelson Changee406812016-09-17 23:50:55 +08001242 for (i = 0; i < rx_dma_size; i++) {
John Crispin656e7052016-03-08 11:29:55 +01001243 dma_addr_t dma_addr = dma_map_single(eth->dev,
1244 ring->data[i] + NET_SKB_PAD,
1245 ring->buf_size,
1246 DMA_FROM_DEVICE);
1247 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1248 return -ENOMEM;
1249 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1250
1251 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1252 }
Nelson Changee406812016-09-17 23:50:55 +08001253 ring->dma_size = rx_dma_size;
1254 ring->calc_idx_update = false;
1255 ring->calc_idx = rx_dma_size - 1;
1256 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
John Crispin656e7052016-03-08 11:29:55 +01001257 /* make sure that all changes to the dma ring are flushed before we
1258 * continue
1259 */
1260 wmb();
1261
Nelson Changee406812016-09-17 23:50:55 +08001262 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
1263 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
1264 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1265 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
John Crispin656e7052016-03-08 11:29:55 +01001266
1267 return 0;
1268}
1269
Nelson Changee406812016-09-17 23:50:55 +08001270static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
John Crispin656e7052016-03-08 11:29:55 +01001271{
Nelson Changee406812016-09-17 23:50:55 +08001272 struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
John Crispin656e7052016-03-08 11:29:55 +01001273 int i;
1274
1275 if (ring->data && ring->dma) {
Nelson Changee406812016-09-17 23:50:55 +08001276 for (i = 0; i < ring->dma_size; i++) {
John Crispin656e7052016-03-08 11:29:55 +01001277 if (!ring->data[i])
1278 continue;
1279 if (!ring->dma[i].rxd1)
1280 continue;
1281 dma_unmap_single(eth->dev,
1282 ring->dma[i].rxd1,
1283 ring->buf_size,
1284 DMA_FROM_DEVICE);
1285 skb_free_frag(ring->data[i]);
1286 }
1287 kfree(ring->data);
1288 ring->data = NULL;
1289 }
1290
1291 if (ring->dma) {
1292 dma_free_coherent(eth->dev,
Nelson Changee406812016-09-17 23:50:55 +08001293 ring->dma_size * sizeof(*ring->dma),
John Crispin656e7052016-03-08 11:29:55 +01001294 ring->dma,
1295 ring->phys);
1296 ring->dma = NULL;
1297 }
1298}
1299
Nelson Changee406812016-09-17 23:50:55 +08001300static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1301{
1302 int i;
1303 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1304 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1305
1306 /* set LRO rings to auto-learn modes */
1307 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1308
1309 /* validate LRO ring */
1310 ring_ctrl_dw2 |= MTK_RING_VLD;
1311
1312 /* set AGE timer (unit: 20us) */
1313 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1314 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1315
1316 /* set max AGG timer (unit: 20us) */
1317 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1318
1319 /* set max LRO AGG count */
1320 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1321 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1322
1323 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1324 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1325 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1326 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1327 }
1328
1329 /* IPv4 checksum update enable */
1330 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1331
1332 /* switch priority comparison to packet count mode */
1333 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1334
1335 /* bandwidth threshold setting */
1336 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1337
1338 /* auto-learn score delta setting */
1339 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1340
1341 /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1342 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1343 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1344
1345 /* set HW LRO mode & the max aggregation count for rx packets */
1346 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1347
1348 /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1349 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1350
1351 /* enable HW LRO */
1352 lro_ctrl_dw0 |= MTK_LRO_EN;
1353
1354 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1355 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1356
1357 return 0;
1358}
1359
1360static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1361{
1362 int i;
1363 u32 val;
1364
1365 /* relinquish lro rings, flush aggregated packets */
1366 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1367
1368 /* wait for relinquishments done */
1369 for (i = 0; i < 10; i++) {
1370 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1371 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1372 msleep(20);
1373 continue;
1374 }
1375 }
1376
1377 /* invalidate lro rings */
1378 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1379 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1380
1381 /* disable HW LRO */
1382 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1383}
1384
Nelson Chang7aab7472016-09-17 23:50:56 +08001385static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1386{
1387 u32 reg_val;
1388
1389 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1390
1391 /* invalidate the IP setting */
1392 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1393
1394 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1395
1396 /* validate the IP setting */
1397 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1398}
1399
1400static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1401{
1402 u32 reg_val;
1403
1404 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1405
1406 /* invalidate the IP setting */
1407 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1408
1409 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1410}
1411
1412static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1413{
1414 int cnt = 0;
1415 int i;
1416
1417 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1418 if (mac->hwlro_ip[i])
1419 cnt++;
1420 }
1421
1422 return cnt;
1423}
1424
1425static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1426 struct ethtool_rxnfc *cmd)
1427{
1428 struct ethtool_rx_flow_spec *fsp =
1429 (struct ethtool_rx_flow_spec *)&cmd->fs;
1430 struct mtk_mac *mac = netdev_priv(dev);
1431 struct mtk_eth *eth = mac->hw;
1432 int hwlro_idx;
1433
1434 if ((fsp->flow_type != TCP_V4_FLOW) ||
1435 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1436 (fsp->location > 1))
1437 return -EINVAL;
1438
1439 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1440 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1441
1442 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1443
1444 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1445
1446 return 0;
1447}
1448
1449static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1450 struct ethtool_rxnfc *cmd)
1451{
1452 struct ethtool_rx_flow_spec *fsp =
1453 (struct ethtool_rx_flow_spec *)&cmd->fs;
1454 struct mtk_mac *mac = netdev_priv(dev);
1455 struct mtk_eth *eth = mac->hw;
1456 int hwlro_idx;
1457
1458 if (fsp->location > 1)
1459 return -EINVAL;
1460
1461 mac->hwlro_ip[fsp->location] = 0;
1462 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1463
1464 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1465
1466 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1467
1468 return 0;
1469}
1470
1471static void mtk_hwlro_netdev_disable(struct net_device *dev)
1472{
1473 struct mtk_mac *mac = netdev_priv(dev);
1474 struct mtk_eth *eth = mac->hw;
1475 int i, hwlro_idx;
1476
1477 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1478 mac->hwlro_ip[i] = 0;
1479 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1480
1481 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1482 }
1483
1484 mac->hwlro_ip_cnt = 0;
1485}
1486
1487static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1488 struct ethtool_rxnfc *cmd)
1489{
1490 struct mtk_mac *mac = netdev_priv(dev);
1491 struct ethtool_rx_flow_spec *fsp =
1492 (struct ethtool_rx_flow_spec *)&cmd->fs;
1493
1494 /* only tcp dst ipv4 is meaningful, others are meaningless */
1495 fsp->flow_type = TCP_V4_FLOW;
1496 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1497 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1498
1499 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1500 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1501 fsp->h_u.tcp_ip4_spec.psrc = 0;
1502 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1503 fsp->h_u.tcp_ip4_spec.pdst = 0;
1504 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1505 fsp->h_u.tcp_ip4_spec.tos = 0;
1506 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1507
1508 return 0;
1509}
1510
1511static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1512 struct ethtool_rxnfc *cmd,
1513 u32 *rule_locs)
1514{
1515 struct mtk_mac *mac = netdev_priv(dev);
1516 int cnt = 0;
1517 int i;
1518
1519 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1520 if (mac->hwlro_ip[i]) {
1521 rule_locs[cnt] = i;
1522 cnt++;
1523 }
1524 }
1525
1526 cmd->rule_cnt = cnt;
1527
1528 return 0;
1529}
1530
1531static netdev_features_t mtk_fix_features(struct net_device *dev,
1532 netdev_features_t features)
1533{
1534 if (!(features & NETIF_F_LRO)) {
1535 struct mtk_mac *mac = netdev_priv(dev);
1536 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1537
1538 if (ip_cnt) {
1539 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1540
1541 features |= NETIF_F_LRO;
1542 }
1543 }
1544
1545 return features;
1546}
1547
1548static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1549{
1550 int err = 0;
1551
1552 if (!((dev->features ^ features) & NETIF_F_LRO))
1553 return 0;
1554
1555 if (!(features & NETIF_F_LRO))
1556 mtk_hwlro_netdev_disable(dev);
1557
1558 return err;
1559}
1560
John Crispin656e7052016-03-08 11:29:55 +01001561/* wait for DMA to finish whatever it is doing before we start using it again */
1562static int mtk_dma_busy_wait(struct mtk_eth *eth)
1563{
1564 unsigned long t_start = jiffies;
1565
1566 while (1) {
1567 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1568 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1569 return 0;
1570 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1571 break;
1572 }
1573
1574 dev_err(eth->dev, "DMA init timeout\n");
1575 return -1;
1576}
1577
1578static int mtk_dma_init(struct mtk_eth *eth)
1579{
1580 int err;
Nelson Changee406812016-09-17 23:50:55 +08001581 u32 i;
John Crispin656e7052016-03-08 11:29:55 +01001582
1583 if (mtk_dma_busy_wait(eth))
1584 return -EBUSY;
1585
1586 /* QDMA needs scratch memory for internal reordering of the
1587 * descriptors
1588 */
1589 err = mtk_init_fq_dma(eth);
1590 if (err)
1591 return err;
1592
1593 err = mtk_tx_alloc(eth);
1594 if (err)
1595 return err;
1596
Nelson Changee406812016-09-17 23:50:55 +08001597 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
John Crispin656e7052016-03-08 11:29:55 +01001598 if (err)
1599 return err;
1600
Nelson Changee406812016-09-17 23:50:55 +08001601 if (eth->hwlro) {
1602 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1603 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
1604 if (err)
1605 return err;
1606 }
1607 err = mtk_hwlro_rx_init(eth);
1608 if (err)
1609 return err;
1610 }
1611
John Crispin656e7052016-03-08 11:29:55 +01001612 /* Enable random early drop and set drop threshold automatically */
1613 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1614 MTK_QDMA_FC_THRES);
1615 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1616
1617 return 0;
1618}
1619
1620static void mtk_dma_free(struct mtk_eth *eth)
1621{
1622 int i;
1623
1624 for (i = 0; i < MTK_MAC_COUNT; i++)
1625 if (eth->netdev[i])
1626 netdev_reset_queue(eth->netdev[i]);
John Crispin605e4fe2016-06-10 13:27:59 +02001627 if (eth->scratch_ring) {
1628 dma_free_coherent(eth->dev,
1629 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1630 eth->scratch_ring,
1631 eth->phy_scratch_ring);
1632 eth->scratch_ring = NULL;
1633 eth->phy_scratch_ring = 0;
1634 }
John Crispin656e7052016-03-08 11:29:55 +01001635 mtk_tx_clean(eth);
Nelson Changee406812016-09-17 23:50:55 +08001636 mtk_rx_clean(eth, 0);
1637
1638 if (eth->hwlro) {
1639 mtk_hwlro_rx_uninit(eth);
1640 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1641 mtk_rx_clean(eth, i);
1642 }
1643
John Crispin656e7052016-03-08 11:29:55 +01001644 kfree(eth->scratch_head);
1645}
1646
1647static void mtk_tx_timeout(struct net_device *dev)
1648{
1649 struct mtk_mac *mac = netdev_priv(dev);
1650 struct mtk_eth *eth = mac->hw;
1651
1652 eth->netdev[mac->id]->stats.tx_errors++;
1653 netif_err(eth, tx_err, dev,
1654 "transmit timed out\n");
John Crispin7c78b4a2016-04-08 00:54:10 +02001655 schedule_work(&eth->pending_work);
John Crispin656e7052016-03-08 11:29:55 +01001656}
1657
John Crispin80673022016-06-29 13:38:11 +02001658static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
John Crispin656e7052016-03-08 11:29:55 +01001659{
1660 struct mtk_eth *eth = _eth;
John Crispin656e7052016-03-08 11:29:55 +01001661
John Crispin80673022016-06-29 13:38:11 +02001662 if (likely(napi_schedule_prep(&eth->rx_napi))) {
1663 __napi_schedule(&eth->rx_napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001664 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001665 }
John Crispin80673022016-06-29 13:38:11 +02001666
1667 return IRQ_HANDLED;
1668}
1669
1670static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1671{
1672 struct mtk_eth *eth = _eth;
1673
1674 if (likely(napi_schedule_prep(&eth->tx_napi))) {
1675 __napi_schedule(&eth->tx_napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001676 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
John Crispin80673022016-06-29 13:38:11 +02001677 }
John Crispin656e7052016-03-08 11:29:55 +01001678
1679 return IRQ_HANDLED;
1680}
1681
1682#ifdef CONFIG_NET_POLL_CONTROLLER
1683static void mtk_poll_controller(struct net_device *dev)
1684{
1685 struct mtk_mac *mac = netdev_priv(dev);
1686 struct mtk_eth *eth = mac->hw;
John Crispin656e7052016-03-08 11:29:55 +01001687
Nelson Changbacfd112016-08-26 01:09:42 +08001688 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1689 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin8186f6e2016-07-02 08:00:50 +02001690 mtk_handle_irq_rx(eth->irq[2], dev);
Nelson Changbacfd112016-08-26 01:09:42 +08001691 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1692 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001693}
1694#endif
1695
1696static int mtk_start_dma(struct mtk_eth *eth)
1697{
1698 int err;
1699
1700 err = mtk_dma_init(eth);
1701 if (err) {
1702 mtk_dma_free(eth);
1703 return err;
1704 }
1705
1706 mtk_w32(eth,
Nelson Changbacfd112016-08-26 01:09:42 +08001707 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1708 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
John Crispin656e7052016-03-08 11:29:55 +01001709 MTK_QDMA_GLO_CFG);
1710
Nelson Changbacfd112016-08-26 01:09:42 +08001711 mtk_w32(eth,
1712 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1713 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1714 MTK_PDMA_GLO_CFG);
1715
John Crispin656e7052016-03-08 11:29:55 +01001716 return 0;
1717}
1718
1719static int mtk_open(struct net_device *dev)
1720{
1721 struct mtk_mac *mac = netdev_priv(dev);
1722 struct mtk_eth *eth = mac->hw;
1723
1724 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1725 if (!atomic_read(&eth->dma_refcnt)) {
1726 int err = mtk_start_dma(eth);
1727
1728 if (err)
1729 return err;
1730
John Crispin80673022016-06-29 13:38:11 +02001731 napi_enable(&eth->tx_napi);
John Crispin656e7052016-03-08 11:29:55 +01001732 napi_enable(&eth->rx_napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001733 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1734 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001735 }
1736 atomic_inc(&eth->dma_refcnt);
1737
Sean Wang2364c5c2016-09-22 16:33:35 +08001738 phy_start(dev->phydev);
John Crispin656e7052016-03-08 11:29:55 +01001739 netif_start_queue(dev);
1740
1741 return 0;
1742}
1743
1744static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1745{
John Crispin656e7052016-03-08 11:29:55 +01001746 u32 val;
1747 int i;
1748
1749 /* stop the dma engine */
Sean Wange3e96522016-08-11 17:51:00 +08001750 spin_lock_bh(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +01001751 val = mtk_r32(eth, glo_cfg);
1752 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1753 glo_cfg);
Sean Wange3e96522016-08-11 17:51:00 +08001754 spin_unlock_bh(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +01001755
1756 /* wait for dma stop */
1757 for (i = 0; i < 10; i++) {
1758 val = mtk_r32(eth, glo_cfg);
1759 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1760 msleep(20);
1761 continue;
1762 }
1763 break;
1764 }
1765}
1766
1767static int mtk_stop(struct net_device *dev)
1768{
1769 struct mtk_mac *mac = netdev_priv(dev);
1770 struct mtk_eth *eth = mac->hw;
1771
1772 netif_tx_disable(dev);
Sean Wang2364c5c2016-09-22 16:33:35 +08001773 phy_stop(dev->phydev);
John Crispin656e7052016-03-08 11:29:55 +01001774
1775 /* only shutdown DMA if this is the last user */
1776 if (!atomic_dec_and_test(&eth->dma_refcnt))
1777 return 0;
1778
Nelson Changbacfd112016-08-26 01:09:42 +08001779 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1780 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin80673022016-06-29 13:38:11 +02001781 napi_disable(&eth->tx_napi);
John Crispin656e7052016-03-08 11:29:55 +01001782 napi_disable(&eth->rx_napi);
1783
1784 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1785
1786 mtk_dma_free(eth);
1787
1788 return 0;
1789}
1790
Sean Wang2a8307a2016-09-14 23:13:20 +08001791static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
1792{
1793 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1794 reset_bits,
1795 reset_bits);
1796
1797 usleep_range(1000, 1100);
1798 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1799 reset_bits,
1800 ~reset_bits);
1801 mdelay(10);
1802}
1803
Sean Wang9ea4d312016-09-14 23:13:19 +08001804static int mtk_hw_init(struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +01001805{
Sean Wang9ea4d312016-09-14 23:13:19 +08001806 int i, val;
1807
1808 if (test_and_set_bit(MTK_HW_INIT, &eth->state))
1809 return 0;
Sean Wang85574db2016-09-14 23:13:15 +08001810
Sean Wang26a2ad82016-09-14 23:13:18 +08001811 pm_runtime_enable(eth->dev);
1812 pm_runtime_get_sync(eth->dev);
1813
Sean Wang85574db2016-09-14 23:13:15 +08001814 clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
1815 clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
1816 clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
1817 clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
Sean Wang2a8307a2016-09-14 23:13:20 +08001818 ethsys_reset(eth, RSTCTRL_FE);
1819 ethsys_reset(eth, RSTCTRL_PPE);
John Crispin656e7052016-03-08 11:29:55 +01001820
Sean Wang9ea4d312016-09-14 23:13:19 +08001821 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1822 for (i = 0; i < MTK_MAC_COUNT; i++) {
1823 if (!eth->mac[i])
1824 continue;
1825 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1826 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1827 }
1828 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
1829
John Crispin656e7052016-03-08 11:29:55 +01001830 /* Set GE2 driving and slew rate */
1831 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1832
1833 /* set GE2 TDSEL */
1834 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1835
1836 /* set GE2 TUNE */
1837 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1838
1839 /* GE1, Force 1000M/FD, FC ON */
1840 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
1841
1842 /* GE2, Force 1000M/FD, FC ON */
1843 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
1844
1845 /* Enable RX VLan Offloading */
1846 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1847
John Crispin656e7052016-03-08 11:29:55 +01001848 /* disable delay and normal interrupt */
1849 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
Nelson Changbacfd112016-08-26 01:09:42 +08001850 mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
1851 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1852 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
John Crispin656e7052016-03-08 11:29:55 +01001853 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1854 mtk_w32(eth, 0, MTK_RST_GL);
1855
1856 /* FE int grouping */
John Crispin80673022016-06-29 13:38:11 +02001857 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1858 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1859 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1860 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1861 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
John Crispin656e7052016-03-08 11:29:55 +01001862
1863 for (i = 0; i < 2; i++) {
1864 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1865
Nelson Chang9c084352016-08-26 01:09:43 +08001866 /* setup the forward port to send frame to PDMA */
John Crispin656e7052016-03-08 11:29:55 +01001867 val &= ~0xffff;
John Crispin656e7052016-03-08 11:29:55 +01001868
1869 /* Enable RX checksum */
1870 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1871
1872 /* setup the mac dma */
1873 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1874 }
1875
1876 return 0;
1877}
1878
Sean Wangbf253fb2016-09-14 23:13:16 +08001879static int mtk_hw_deinit(struct mtk_eth *eth)
1880{
Sean Wang9ea4d312016-09-14 23:13:19 +08001881 if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
1882 return 0;
1883
Sean Wangbf253fb2016-09-14 23:13:16 +08001884 clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
1885 clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
1886 clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
1887 clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
1888
Sean Wang26a2ad82016-09-14 23:13:18 +08001889 pm_runtime_put_sync(eth->dev);
1890 pm_runtime_disable(eth->dev);
1891
Sean Wangbf253fb2016-09-14 23:13:16 +08001892 return 0;
1893}
1894
John Crispin656e7052016-03-08 11:29:55 +01001895static int __init mtk_init(struct net_device *dev)
1896{
1897 struct mtk_mac *mac = netdev_priv(dev);
1898 struct mtk_eth *eth = mac->hw;
1899 const char *mac_addr;
1900
1901 mac_addr = of_get_mac_address(mac->of_node);
1902 if (mac_addr)
1903 ether_addr_copy(dev->dev_addr, mac_addr);
1904
1905 /* If the mac address is invalid, use random mac address */
1906 if (!is_valid_ether_addr(dev->dev_addr)) {
1907 random_ether_addr(dev->dev_addr);
1908 dev_err(eth->dev, "generated random MAC address %pM\n",
1909 dev->dev_addr);
1910 dev->addr_assign_type = NET_ADDR_RANDOM;
1911 }
1912
Sean Wang2364c5c2016-09-22 16:33:35 +08001913 return mtk_phy_connect(dev);
John Crispin656e7052016-03-08 11:29:55 +01001914}
1915
1916static void mtk_uninit(struct net_device *dev)
1917{
1918 struct mtk_mac *mac = netdev_priv(dev);
1919 struct mtk_eth *eth = mac->hw;
1920
Sean Wang2364c5c2016-09-22 16:33:35 +08001921 phy_disconnect(dev->phydev);
Nelson Changbacfd112016-08-26 01:09:42 +08001922 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1923 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
John Crispin656e7052016-03-08 11:29:55 +01001924}
1925
1926static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1927{
John Crispin656e7052016-03-08 11:29:55 +01001928 switch (cmd) {
1929 case SIOCGMIIPHY:
1930 case SIOCGMIIREG:
1931 case SIOCSMIIREG:
Sean Wang2364c5c2016-09-22 16:33:35 +08001932 return phy_mii_ioctl(dev->phydev, ifr, cmd);
John Crispin656e7052016-03-08 11:29:55 +01001933 default:
1934 break;
1935 }
1936
1937 return -EOPNOTSUPP;
1938}
1939
1940static void mtk_pending_work(struct work_struct *work)
1941{
John Crispin7c78b4a2016-04-08 00:54:10 +02001942 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
John Crispine7d425d2016-04-08 00:54:09 +02001943 int err, i;
1944 unsigned long restart = 0;
John Crispin656e7052016-03-08 11:29:55 +01001945
1946 rtnl_lock();
John Crispin656e7052016-03-08 11:29:55 +01001947
Sean Wangdce6fa42016-09-14 23:13:21 +08001948 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
1949
1950 while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
1951 cpu_relax();
1952
1953 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
John Crispine7d425d2016-04-08 00:54:09 +02001954 /* stop all devices to make sure that dma is properly shut down */
1955 for (i = 0; i < MTK_MAC_COUNT; i++) {
John Crispin7c78b4a2016-04-08 00:54:10 +02001956 if (!eth->netdev[i])
John Crispine7d425d2016-04-08 00:54:09 +02001957 continue;
1958 mtk_stop(eth->netdev[i]);
1959 __set_bit(i, &restart);
1960 }
Sean Wangdce6fa42016-09-14 23:13:21 +08001961 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
John Crispine7d425d2016-04-08 00:54:09 +02001962
Sean Wang9ea4d312016-09-14 23:13:19 +08001963 /* restart underlying hardware such as power, clock, pin mux
1964 * and the connected phy
1965 */
1966 mtk_hw_deinit(eth);
1967
1968 if (eth->dev->pins)
1969 pinctrl_select_state(eth->dev->pins->p,
1970 eth->dev->pins->default_state);
1971 mtk_hw_init(eth);
1972
1973 for (i = 0; i < MTK_MAC_COUNT; i++) {
1974 if (!eth->mac[i] ||
1975 of_phy_is_fixed_link(eth->mac[i]->of_node))
1976 continue;
Sean Wang2364c5c2016-09-22 16:33:35 +08001977 err = phy_init_hw(eth->netdev[i]->phydev);
Sean Wang9ea4d312016-09-14 23:13:19 +08001978 if (err)
1979 dev_err(eth->dev, "%s: PHY init failed.\n",
1980 eth->netdev[i]->name);
1981 }
1982
John Crispine7d425d2016-04-08 00:54:09 +02001983 /* restart DMA and enable IRQs */
1984 for (i = 0; i < MTK_MAC_COUNT; i++) {
1985 if (!test_bit(i, &restart))
1986 continue;
1987 err = mtk_open(eth->netdev[i]);
1988 if (err) {
1989 netif_alert(eth, ifup, eth->netdev[i],
1990 "Driver up/down cycle failed, closing device.\n");
1991 dev_close(eth->netdev[i]);
1992 }
John Crispin656e7052016-03-08 11:29:55 +01001993 }
Sean Wangdce6fa42016-09-14 23:13:21 +08001994
1995 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
1996
1997 clear_bit_unlock(MTK_RESETTING, &eth->state);
1998
John Crispin656e7052016-03-08 11:29:55 +01001999 rtnl_unlock();
2000}
2001
Sean Wang8a8a9e82016-09-14 23:13:17 +08002002static int mtk_free_dev(struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +01002003{
2004 int i;
2005
2006 for (i = 0; i < MTK_MAC_COUNT; i++) {
John Crispin656e7052016-03-08 11:29:55 +01002007 if (!eth->netdev[i])
2008 continue;
John Crispin656e7052016-03-08 11:29:55 +01002009 free_netdev(eth->netdev[i]);
John Crispin656e7052016-03-08 11:29:55 +01002010 }
Sean Wang8a8a9e82016-09-14 23:13:17 +08002011
2012 return 0;
2013}
2014
2015static int mtk_unreg_dev(struct mtk_eth *eth)
2016{
2017 int i;
2018
2019 for (i = 0; i < MTK_MAC_COUNT; i++) {
2020 if (!eth->netdev[i])
2021 continue;
2022 unregister_netdev(eth->netdev[i]);
2023 }
2024
2025 return 0;
2026}
2027
2028static int mtk_cleanup(struct mtk_eth *eth)
2029{
2030 mtk_unreg_dev(eth);
2031 mtk_free_dev(eth);
John Crispin7c78b4a2016-04-08 00:54:10 +02002032 cancel_work_sync(&eth->pending_work);
John Crispin656e7052016-03-08 11:29:55 +01002033
2034 return 0;
2035}
2036
2037static int mtk_get_settings(struct net_device *dev,
2038 struct ethtool_cmd *cmd)
2039{
2040 struct mtk_mac *mac = netdev_priv(dev);
2041 int err;
2042
Sean Wangdce6fa42016-09-14 23:13:21 +08002043 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2044 return -EBUSY;
2045
Sean Wang2364c5c2016-09-22 16:33:35 +08002046 err = phy_read_status(dev->phydev);
John Crispin656e7052016-03-08 11:29:55 +01002047 if (err)
2048 return -ENODEV;
2049
Sean Wang2364c5c2016-09-22 16:33:35 +08002050 return phy_ethtool_gset(dev->phydev, cmd);
John Crispin656e7052016-03-08 11:29:55 +01002051}
2052
2053static int mtk_set_settings(struct net_device *dev,
2054 struct ethtool_cmd *cmd)
2055{
2056 struct mtk_mac *mac = netdev_priv(dev);
2057
Sean Wang2364c5c2016-09-22 16:33:35 +08002058 if (cmd->phy_address != dev->phydev->mdio.addr) {
2059 dev->phydev = mdiobus_get_phy(mac->hw->mii_bus,
John Crispin656e7052016-03-08 11:29:55 +01002060 cmd->phy_address);
Sean Wang2364c5c2016-09-22 16:33:35 +08002061 if (!dev->phydev)
John Crispin656e7052016-03-08 11:29:55 +01002062 return -ENODEV;
2063 }
2064
Sean Wang2364c5c2016-09-22 16:33:35 +08002065 return phy_ethtool_sset(dev->phydev, cmd);
John Crispin656e7052016-03-08 11:29:55 +01002066}
2067
2068static void mtk_get_drvinfo(struct net_device *dev,
2069 struct ethtool_drvinfo *info)
2070{
2071 struct mtk_mac *mac = netdev_priv(dev);
2072
2073 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2074 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2075 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2076}
2077
2078static u32 mtk_get_msglevel(struct net_device *dev)
2079{
2080 struct mtk_mac *mac = netdev_priv(dev);
2081
2082 return mac->hw->msg_enable;
2083}
2084
2085static void mtk_set_msglevel(struct net_device *dev, u32 value)
2086{
2087 struct mtk_mac *mac = netdev_priv(dev);
2088
2089 mac->hw->msg_enable = value;
2090}
2091
2092static int mtk_nway_reset(struct net_device *dev)
2093{
2094 struct mtk_mac *mac = netdev_priv(dev);
2095
Sean Wangdce6fa42016-09-14 23:13:21 +08002096 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2097 return -EBUSY;
2098
Sean Wang2364c5c2016-09-22 16:33:35 +08002099 return genphy_restart_aneg(dev->phydev);
John Crispin656e7052016-03-08 11:29:55 +01002100}
2101
2102static u32 mtk_get_link(struct net_device *dev)
2103{
2104 struct mtk_mac *mac = netdev_priv(dev);
2105 int err;
2106
Sean Wangdce6fa42016-09-14 23:13:21 +08002107 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2108 return -EBUSY;
2109
Sean Wang2364c5c2016-09-22 16:33:35 +08002110 err = genphy_update_link(dev->phydev);
John Crispin656e7052016-03-08 11:29:55 +01002111 if (err)
2112 return ethtool_op_get_link(dev);
2113
Sean Wang2364c5c2016-09-22 16:33:35 +08002114 return dev->phydev->link;
John Crispin656e7052016-03-08 11:29:55 +01002115}
2116
2117static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2118{
2119 int i;
2120
2121 switch (stringset) {
2122 case ETH_SS_STATS:
2123 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2124 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2125 data += ETH_GSTRING_LEN;
2126 }
2127 break;
2128 }
2129}
2130
2131static int mtk_get_sset_count(struct net_device *dev, int sset)
2132{
2133 switch (sset) {
2134 case ETH_SS_STATS:
2135 return ARRAY_SIZE(mtk_ethtool_stats);
2136 default:
2137 return -EOPNOTSUPP;
2138 }
2139}
2140
2141static void mtk_get_ethtool_stats(struct net_device *dev,
2142 struct ethtool_stats *stats, u64 *data)
2143{
2144 struct mtk_mac *mac = netdev_priv(dev);
2145 struct mtk_hw_stats *hwstats = mac->hw_stats;
2146 u64 *data_src, *data_dst;
2147 unsigned int start;
2148 int i;
2149
Sean Wangdce6fa42016-09-14 23:13:21 +08002150 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2151 return;
2152
John Crispin656e7052016-03-08 11:29:55 +01002153 if (netif_running(dev) && netif_device_present(dev)) {
2154 if (spin_trylock(&hwstats->stats_lock)) {
2155 mtk_stats_update_mac(mac);
2156 spin_unlock(&hwstats->stats_lock);
2157 }
2158 }
2159
Sean Wang94d308d2016-09-20 11:26:48 +08002160 data_src = (u64 *)hwstats;
2161
John Crispin656e7052016-03-08 11:29:55 +01002162 do {
John Crispin656e7052016-03-08 11:29:55 +01002163 data_dst = data;
2164 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2165
2166 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2167 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2168 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2169}
2170
Nelson Chang7aab7472016-09-17 23:50:56 +08002171static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2172 u32 *rule_locs)
2173{
2174 int ret = -EOPNOTSUPP;
2175
2176 switch (cmd->cmd) {
2177 case ETHTOOL_GRXRINGS:
2178 if (dev->features & NETIF_F_LRO) {
2179 cmd->data = MTK_MAX_RX_RING_NUM;
2180 ret = 0;
2181 }
2182 break;
2183 case ETHTOOL_GRXCLSRLCNT:
2184 if (dev->features & NETIF_F_LRO) {
2185 struct mtk_mac *mac = netdev_priv(dev);
2186
2187 cmd->rule_cnt = mac->hwlro_ip_cnt;
2188 ret = 0;
2189 }
2190 break;
2191 case ETHTOOL_GRXCLSRULE:
2192 if (dev->features & NETIF_F_LRO)
2193 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2194 break;
2195 case ETHTOOL_GRXCLSRLALL:
2196 if (dev->features & NETIF_F_LRO)
2197 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2198 rule_locs);
2199 break;
2200 default:
2201 break;
2202 }
2203
2204 return ret;
2205}
2206
2207static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2208{
2209 int ret = -EOPNOTSUPP;
2210
2211 switch (cmd->cmd) {
2212 case ETHTOOL_SRXCLSRLINS:
2213 if (dev->features & NETIF_F_LRO)
2214 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2215 break;
2216 case ETHTOOL_SRXCLSRLDEL:
2217 if (dev->features & NETIF_F_LRO)
2218 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2219 break;
2220 default:
2221 break;
2222 }
2223
2224 return ret;
2225}
2226
Julia Lawall6a38cb12016-09-01 00:21:19 +02002227static const struct ethtool_ops mtk_ethtool_ops = {
John Crispin656e7052016-03-08 11:29:55 +01002228 .get_settings = mtk_get_settings,
2229 .set_settings = mtk_set_settings,
2230 .get_drvinfo = mtk_get_drvinfo,
2231 .get_msglevel = mtk_get_msglevel,
2232 .set_msglevel = mtk_set_msglevel,
2233 .nway_reset = mtk_nway_reset,
2234 .get_link = mtk_get_link,
2235 .get_strings = mtk_get_strings,
2236 .get_sset_count = mtk_get_sset_count,
2237 .get_ethtool_stats = mtk_get_ethtool_stats,
Nelson Chang7aab7472016-09-17 23:50:56 +08002238 .get_rxnfc = mtk_get_rxnfc,
2239 .set_rxnfc = mtk_set_rxnfc,
John Crispin656e7052016-03-08 11:29:55 +01002240};
2241
2242static const struct net_device_ops mtk_netdev_ops = {
2243 .ndo_init = mtk_init,
2244 .ndo_uninit = mtk_uninit,
2245 .ndo_open = mtk_open,
2246 .ndo_stop = mtk_stop,
2247 .ndo_start_xmit = mtk_start_xmit,
2248 .ndo_set_mac_address = mtk_set_mac_address,
2249 .ndo_validate_addr = eth_validate_addr,
2250 .ndo_do_ioctl = mtk_do_ioctl,
2251 .ndo_change_mtu = eth_change_mtu,
2252 .ndo_tx_timeout = mtk_tx_timeout,
2253 .ndo_get_stats64 = mtk_get_stats64,
Nelson Chang7aab7472016-09-17 23:50:56 +08002254 .ndo_fix_features = mtk_fix_features,
2255 .ndo_set_features = mtk_set_features,
John Crispin656e7052016-03-08 11:29:55 +01002256#ifdef CONFIG_NET_POLL_CONTROLLER
2257 .ndo_poll_controller = mtk_poll_controller,
2258#endif
2259};
2260
2261static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2262{
2263 struct mtk_mac *mac;
2264 const __be32 *_id = of_get_property(np, "reg", NULL);
2265 int id, err;
2266
2267 if (!_id) {
2268 dev_err(eth->dev, "missing mac id\n");
2269 return -EINVAL;
2270 }
2271
2272 id = be32_to_cpup(_id);
2273 if (id >= MTK_MAC_COUNT) {
2274 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2275 return -EINVAL;
2276 }
2277
2278 if (eth->netdev[id]) {
2279 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2280 return -EINVAL;
2281 }
2282
2283 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2284 if (!eth->netdev[id]) {
2285 dev_err(eth->dev, "alloc_etherdev failed\n");
2286 return -ENOMEM;
2287 }
2288 mac = netdev_priv(eth->netdev[id]);
2289 eth->mac[id] = mac;
2290 mac->id = id;
2291 mac->hw = eth;
2292 mac->of_node = np;
John Crispin656e7052016-03-08 11:29:55 +01002293
Nelson Changee406812016-09-17 23:50:55 +08002294 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2295 mac->hwlro_ip_cnt = 0;
2296
John Crispin656e7052016-03-08 11:29:55 +01002297 mac->hw_stats = devm_kzalloc(eth->dev,
2298 sizeof(*mac->hw_stats),
2299 GFP_KERNEL);
2300 if (!mac->hw_stats) {
2301 dev_err(eth->dev, "failed to allocate counter memory\n");
2302 err = -ENOMEM;
2303 goto free_netdev;
2304 }
2305 spin_lock_init(&mac->hw_stats->stats_lock);
sean.wang@mediatek.comd70056522016-08-13 19:16:18 +08002306 u64_stats_init(&mac->hw_stats->syncp);
John Crispin656e7052016-03-08 11:29:55 +01002307 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2308
2309 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
John Crispineaadf9f2016-06-10 13:28:05 +02002310 eth->netdev[id]->watchdog_timeo = 5 * HZ;
John Crispin656e7052016-03-08 11:29:55 +01002311 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2312 eth->netdev[id]->base_addr = (unsigned long)eth->base;
Nelson Changee406812016-09-17 23:50:55 +08002313
2314 eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2315 if (eth->hwlro)
2316 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2317
John Crispin656e7052016-03-08 11:29:55 +01002318 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2319 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2320 eth->netdev[id]->features |= MTK_HW_FEATURES;
2321 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2322
John Crispin80673022016-06-29 13:38:11 +02002323 eth->netdev[id]->irq = eth->irq[0];
John Crispin656e7052016-03-08 11:29:55 +01002324 return 0;
2325
2326free_netdev:
2327 free_netdev(eth->netdev[id]);
2328 return err;
2329}
2330
2331static int mtk_probe(struct platform_device *pdev)
2332{
2333 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2334 struct device_node *mac_np;
2335 const struct of_device_id *match;
2336 struct mtk_soc_data *soc;
2337 struct mtk_eth *eth;
2338 int err;
John Crispin80673022016-06-29 13:38:11 +02002339 int i;
John Crispin656e7052016-03-08 11:29:55 +01002340
John Crispin656e7052016-03-08 11:29:55 +01002341 match = of_match_device(of_mtk_match, &pdev->dev);
2342 soc = (struct mtk_soc_data *)match->data;
2343
2344 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2345 if (!eth)
2346 return -ENOMEM;
2347
Sean Wang549e5492016-09-01 10:47:28 +08002348 eth->dev = &pdev->dev;
John Crispin656e7052016-03-08 11:29:55 +01002349 eth->base = devm_ioremap_resource(&pdev->dev, res);
Vladimir Zapolskiy621e49f2016-03-23 01:06:04 +02002350 if (IS_ERR(eth->base))
2351 return PTR_ERR(eth->base);
John Crispin656e7052016-03-08 11:29:55 +01002352
2353 spin_lock_init(&eth->page_lock);
John Crispin7bc9cce2016-06-29 13:38:10 +02002354 spin_lock_init(&eth->irq_lock);
John Crispin656e7052016-03-08 11:29:55 +01002355
2356 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2357 "mediatek,ethsys");
2358 if (IS_ERR(eth->ethsys)) {
2359 dev_err(&pdev->dev, "no ethsys regmap found\n");
2360 return PTR_ERR(eth->ethsys);
2361 }
2362
2363 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2364 "mediatek,pctl");
2365 if (IS_ERR(eth->pctl)) {
2366 dev_err(&pdev->dev, "no pctl regmap found\n");
2367 return PTR_ERR(eth->pctl);
2368 }
2369
Nelson Changee406812016-09-17 23:50:55 +08002370 eth->hwlro = of_property_read_bool(pdev->dev.of_node, "mediatek,hwlro");
2371
John Crispin80673022016-06-29 13:38:11 +02002372 for (i = 0; i < 3; i++) {
2373 eth->irq[i] = platform_get_irq(pdev, i);
2374 if (eth->irq[i] < 0) {
2375 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2376 return -ENXIO;
2377 }
John Crispin656e7052016-03-08 11:29:55 +01002378 }
Sean Wang549e5492016-09-01 10:47:28 +08002379 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2380 eth->clks[i] = devm_clk_get(eth->dev,
2381 mtk_clks_source_name[i]);
2382 if (IS_ERR(eth->clks[i])) {
2383 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2384 return -EPROBE_DEFER;
2385 return -ENODEV;
2386 }
2387 }
John Crispin656e7052016-03-08 11:29:55 +01002388
John Crispin656e7052016-03-08 11:29:55 +01002389 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
John Crispin7c78b4a2016-04-08 00:54:10 +02002390 INIT_WORK(&eth->pending_work, mtk_pending_work);
John Crispin656e7052016-03-08 11:29:55 +01002391
2392 err = mtk_hw_init(eth);
2393 if (err)
2394 return err;
2395
2396 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2397 if (!of_device_is_compatible(mac_np,
2398 "mediatek,eth-mac"))
2399 continue;
2400
2401 if (!of_device_is_available(mac_np))
2402 continue;
2403
2404 err = mtk_add_mac(eth, mac_np);
2405 if (err)
Sean Wang8a8a9e82016-09-14 23:13:17 +08002406 goto err_deinit_hw;
John Crispin656e7052016-03-08 11:29:55 +01002407 }
2408
Sean Wang85574db2016-09-14 23:13:15 +08002409 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
2410 dev_name(eth->dev), eth);
2411 if (err)
2412 goto err_free_dev;
2413
2414 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
2415 dev_name(eth->dev), eth);
2416 if (err)
2417 goto err_free_dev;
2418
2419 err = mtk_mdio_init(eth);
2420 if (err)
2421 goto err_free_dev;
2422
2423 for (i = 0; i < MTK_MAX_DEVS; i++) {
2424 if (!eth->netdev[i])
2425 continue;
2426
2427 err = register_netdev(eth->netdev[i]);
2428 if (err) {
2429 dev_err(eth->dev, "error bringing up device\n");
Sean Wang8a8a9e82016-09-14 23:13:17 +08002430 goto err_deinit_mdio;
Sean Wang85574db2016-09-14 23:13:15 +08002431 } else
2432 netif_info(eth, probe, eth->netdev[i],
2433 "mediatek frame engine at 0x%08lx, irq %d\n",
2434 eth->netdev[i]->base_addr, eth->irq[0]);
2435 }
2436
John Crispin656e7052016-03-08 11:29:55 +01002437 /* we run 2 devices on the same DMA ring so we need a dummy device
2438 * for NAPI to work
2439 */
2440 init_dummy_netdev(&eth->dummy_dev);
John Crispin80673022016-06-29 13:38:11 +02002441 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
2442 MTK_NAPI_WEIGHT);
2443 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
John Crispin656e7052016-03-08 11:29:55 +01002444 MTK_NAPI_WEIGHT);
2445
2446 platform_set_drvdata(pdev, eth);
2447
2448 return 0;
2449
Sean Wang8a8a9e82016-09-14 23:13:17 +08002450err_deinit_mdio:
2451 mtk_mdio_cleanup(eth);
John Crispin656e7052016-03-08 11:29:55 +01002452err_free_dev:
Sean Wang8a8a9e82016-09-14 23:13:17 +08002453 mtk_free_dev(eth);
2454err_deinit_hw:
2455 mtk_hw_deinit(eth);
2456
John Crispin656e7052016-03-08 11:29:55 +01002457 return err;
2458}
2459
2460static int mtk_remove(struct platform_device *pdev)
2461{
2462 struct mtk_eth *eth = platform_get_drvdata(pdev);
Sean Wang79e9a412016-09-01 10:47:32 +08002463 int i;
John Crispin656e7052016-03-08 11:29:55 +01002464
Sean Wang79e9a412016-09-01 10:47:32 +08002465 /* stop all devices to make sure that dma is properly shut down */
2466 for (i = 0; i < MTK_MAC_COUNT; i++) {
2467 if (!eth->netdev[i])
2468 continue;
2469 mtk_stop(eth->netdev[i]);
2470 }
John Crispin656e7052016-03-08 11:29:55 +01002471
Sean Wangbf253fb2016-09-14 23:13:16 +08002472 mtk_hw_deinit(eth);
John Crispin656e7052016-03-08 11:29:55 +01002473
John Crispin80673022016-06-29 13:38:11 +02002474 netif_napi_del(&eth->tx_napi);
John Crispin656e7052016-03-08 11:29:55 +01002475 netif_napi_del(&eth->rx_napi);
2476 mtk_cleanup(eth);
Sean Wange82f7142016-09-20 23:53:24 +08002477 mtk_mdio_cleanup(eth);
John Crispin656e7052016-03-08 11:29:55 +01002478
2479 return 0;
2480}
2481
2482const struct of_device_id of_mtk_match[] = {
2483 { .compatible = "mediatek,mt7623-eth" },
2484 {},
2485};
2486
2487static struct platform_driver mtk_driver = {
2488 .probe = mtk_probe,
2489 .remove = mtk_remove,
2490 .driver = {
2491 .name = "mtk_soc_eth",
John Crispin656e7052016-03-08 11:29:55 +01002492 .of_match_table = of_mtk_match,
2493 },
2494};
2495
2496module_platform_driver(mtk_driver);
2497
2498MODULE_LICENSE("GPL");
2499MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2500MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");