blob: 01f59115132cf5721ce59167dfff27322c36ae2e [file] [log] [blame]
John Crispin656e7052016-03-08 11:29:55 +01001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/of_device.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
20#include <linux/clk.h>
Sean Wang26a2ad82016-09-14 23:13:18 +080021#include <linux/pm_runtime.h>
John Crispin656e7052016-03-08 11:29:55 +010022#include <linux/if_vlan.h>
23#include <linux/reset.h>
24#include <linux/tcp.h>
25
26#include "mtk_eth_soc.h"
27
28static int mtk_msg_level = -1;
29module_param_named(msg_level, mtk_msg_level, int, 0);
30MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
31
32#define MTK_ETHTOOL_STAT(x) { #x, \
33 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
34
35/* strings used by ethtool */
36static const struct mtk_ethtool_stats {
37 char str[ETH_GSTRING_LEN];
38 u32 offset;
39} mtk_ethtool_stats[] = {
40 MTK_ETHTOOL_STAT(tx_bytes),
41 MTK_ETHTOOL_STAT(tx_packets),
42 MTK_ETHTOOL_STAT(tx_skip),
43 MTK_ETHTOOL_STAT(tx_collisions),
44 MTK_ETHTOOL_STAT(rx_bytes),
45 MTK_ETHTOOL_STAT(rx_packets),
46 MTK_ETHTOOL_STAT(rx_overflow),
47 MTK_ETHTOOL_STAT(rx_fcs_errors),
48 MTK_ETHTOOL_STAT(rx_short_errors),
49 MTK_ETHTOOL_STAT(rx_long_errors),
50 MTK_ETHTOOL_STAT(rx_checksum_errors),
51 MTK_ETHTOOL_STAT(rx_flow_control_packets),
52};
53
Sean Wang549e5492016-09-01 10:47:28 +080054static const char * const mtk_clks_source_name[] = {
55 "ethif", "esw", "gp1", "gp2"
56};
57
John Crispin656e7052016-03-08 11:29:55 +010058void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
59{
60 __raw_writel(val, eth->base + reg);
61}
62
63u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
64{
65 return __raw_readl(eth->base + reg);
66}
67
68static int mtk_mdio_busy_wait(struct mtk_eth *eth)
69{
70 unsigned long t_start = jiffies;
71
72 while (1) {
73 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
74 return 0;
75 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
76 break;
77 usleep_range(10, 20);
78 }
79
80 dev_err(eth->dev, "mdio: MDIO timeout\n");
81 return -1;
82}
83
Wei Yongjun379672d2016-07-12 11:36:44 +000084static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
85 u32 phy_register, u32 write_data)
John Crispin656e7052016-03-08 11:29:55 +010086{
87 if (mtk_mdio_busy_wait(eth))
88 return -1;
89
90 write_data &= 0xffff;
91
92 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
93 (phy_register << PHY_IAC_REG_SHIFT) |
94 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
95 MTK_PHY_IAC);
96
97 if (mtk_mdio_busy_wait(eth))
98 return -1;
99
100 return 0;
101}
102
Wei Yongjun379672d2016-07-12 11:36:44 +0000103static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
John Crispin656e7052016-03-08 11:29:55 +0100104{
105 u32 d;
106
107 if (mtk_mdio_busy_wait(eth))
108 return 0xffff;
109
110 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
111 (phy_reg << PHY_IAC_REG_SHIFT) |
112 (phy_addr << PHY_IAC_ADDR_SHIFT),
113 MTK_PHY_IAC);
114
115 if (mtk_mdio_busy_wait(eth))
116 return 0xffff;
117
118 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
119
120 return d;
121}
122
123static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
124 int phy_reg, u16 val)
125{
126 struct mtk_eth *eth = bus->priv;
127
128 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
129}
130
131static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
132{
133 struct mtk_eth *eth = bus->priv;
134
135 return _mtk_mdio_read(eth, phy_addr, phy_reg);
136}
137
138static void mtk_phy_link_adjust(struct net_device *dev)
139{
140 struct mtk_mac *mac = netdev_priv(dev);
John Crispin08ef55c2016-06-03 10:17:07 +0200141 u16 lcl_adv = 0, rmt_adv = 0;
142 u8 flowctrl;
John Crispin656e7052016-03-08 11:29:55 +0100143 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
144 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
145 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
146 MAC_MCR_BACKPR_EN;
147
148 switch (mac->phy_dev->speed) {
149 case SPEED_1000:
150 mcr |= MAC_MCR_SPEED_1000;
151 break;
152 case SPEED_100:
153 mcr |= MAC_MCR_SPEED_100;
154 break;
155 };
156
157 if (mac->phy_dev->link)
158 mcr |= MAC_MCR_FORCE_LINK;
159
John Crispin08ef55c2016-06-03 10:17:07 +0200160 if (mac->phy_dev->duplex) {
John Crispin656e7052016-03-08 11:29:55 +0100161 mcr |= MAC_MCR_FORCE_DPX;
162
John Crispin08ef55c2016-06-03 10:17:07 +0200163 if (mac->phy_dev->pause)
164 rmt_adv = LPA_PAUSE_CAP;
165 if (mac->phy_dev->asym_pause)
166 rmt_adv |= LPA_PAUSE_ASYM;
167
168 if (mac->phy_dev->advertising & ADVERTISED_Pause)
169 lcl_adv |= ADVERTISE_PAUSE_CAP;
170 if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
171 lcl_adv |= ADVERTISE_PAUSE_ASYM;
172
173 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
174
175 if (flowctrl & FLOW_CTRL_TX)
176 mcr |= MAC_MCR_FORCE_TX_FC;
177 if (flowctrl & FLOW_CTRL_RX)
178 mcr |= MAC_MCR_FORCE_RX_FC;
179
180 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
181 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
182 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
183 }
John Crispin656e7052016-03-08 11:29:55 +0100184
185 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
186
187 if (mac->phy_dev->link)
188 netif_carrier_on(dev);
189 else
190 netif_carrier_off(dev);
191}
192
193static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
194 struct device_node *phy_node)
195{
196 const __be32 *_addr = NULL;
197 struct phy_device *phydev;
198 int phy_mode, addr;
199
200 _addr = of_get_property(phy_node, "reg", NULL);
201
202 if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) {
203 pr_err("%s: invalid phy address\n", phy_node->name);
204 return -EINVAL;
205 }
206 addr = be32_to_cpu(*_addr);
207 phy_mode = of_get_phy_mode(phy_node);
208 if (phy_mode < 0) {
209 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
210 return -EINVAL;
211 }
212
213 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
214 mtk_phy_link_adjust, 0, phy_mode);
Dan Carpenter977bc202016-03-15 10:18:49 +0300215 if (!phydev) {
John Crispin656e7052016-03-08 11:29:55 +0100216 dev_err(eth->dev, "could not connect to PHY\n");
Dan Carpenter977bc202016-03-15 10:18:49 +0300217 return -ENODEV;
John Crispin656e7052016-03-08 11:29:55 +0100218 }
219
220 dev_info(eth->dev,
221 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
222 mac->id, phydev_name(phydev), phydev->phy_id,
223 phydev->drv->name);
224
225 mac->phy_dev = phydev;
226
227 return 0;
228}
229
230static int mtk_phy_connect(struct mtk_mac *mac)
231{
232 struct mtk_eth *eth = mac->hw;
233 struct device_node *np;
234 u32 val, ge_mode;
235
236 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
John Crispin0c72c502016-06-03 10:17:08 +0200237 if (!np && of_phy_is_fixed_link(mac->of_node))
238 if (!of_phy_register_fixed_link(mac->of_node))
239 np = of_node_get(mac->of_node);
John Crispin656e7052016-03-08 11:29:55 +0100240 if (!np)
241 return -ENODEV;
242
243 switch (of_get_phy_mode(np)) {
John Crispin37920fc2016-06-03 10:17:09 +0200244 case PHY_INTERFACE_MODE_RGMII_TXID:
245 case PHY_INTERFACE_MODE_RGMII_RXID:
246 case PHY_INTERFACE_MODE_RGMII_ID:
John Crispin656e7052016-03-08 11:29:55 +0100247 case PHY_INTERFACE_MODE_RGMII:
248 ge_mode = 0;
249 break;
250 case PHY_INTERFACE_MODE_MII:
251 ge_mode = 1;
252 break;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800253 case PHY_INTERFACE_MODE_REVMII:
John Crispin656e7052016-03-08 11:29:55 +0100254 ge_mode = 2;
255 break;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800256 case PHY_INTERFACE_MODE_RMII:
257 if (!mac->id)
258 goto err_phy;
259 ge_mode = 3;
260 break;
John Crispin656e7052016-03-08 11:29:55 +0100261 default:
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800262 goto err_phy;
John Crispin656e7052016-03-08 11:29:55 +0100263 }
264
265 /* put the gmac into the right mode */
266 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
267 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
268 val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
269 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
270
271 mtk_phy_connect_node(eth, mac, np);
272 mac->phy_dev->autoneg = AUTONEG_ENABLE;
273 mac->phy_dev->speed = 0;
274 mac->phy_dev->duplex = 0;
sean.wang@mediatek.comb2025c72016-08-16 13:55:14 +0800275
276 if (of_phy_is_fixed_link(mac->of_node))
277 mac->phy_dev->supported |=
278 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
279
John Crispin08ef55c2016-06-03 10:17:07 +0200280 mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
281 SUPPORTED_Asym_Pause;
John Crispin656e7052016-03-08 11:29:55 +0100282 mac->phy_dev->advertising = mac->phy_dev->supported |
283 ADVERTISED_Autoneg;
284 phy_start_aneg(mac->phy_dev);
285
sean.wang@mediatek.come8c29932016-08-13 19:16:19 +0800286 of_node_put(np);
287
John Crispin656e7052016-03-08 11:29:55 +0100288 return 0;
sean.wang@mediatek.com8ca7f4f2016-08-16 13:55:13 +0800289
290err_phy:
291 of_node_put(np);
292 dev_err(eth->dev, "invalid phy_mode\n");
293 return -EINVAL;
John Crispin656e7052016-03-08 11:29:55 +0100294}
295
296static int mtk_mdio_init(struct mtk_eth *eth)
297{
298 struct device_node *mii_np;
Sean Wang1e515b72016-09-01 10:47:34 +0800299 int ret;
John Crispin656e7052016-03-08 11:29:55 +0100300
301 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
302 if (!mii_np) {
303 dev_err(eth->dev, "no %s child node found", "mdio-bus");
304 return -ENODEV;
305 }
306
307 if (!of_device_is_available(mii_np)) {
Sean Wangaa6e8a52016-09-01 10:47:35 +0800308 ret = -ENODEV;
John Crispin656e7052016-03-08 11:29:55 +0100309 goto err_put_node;
310 }
311
Sean Wang1e515b72016-09-01 10:47:34 +0800312 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
John Crispin656e7052016-03-08 11:29:55 +0100313 if (!eth->mii_bus) {
Sean Wang1e515b72016-09-01 10:47:34 +0800314 ret = -ENOMEM;
John Crispin656e7052016-03-08 11:29:55 +0100315 goto err_put_node;
316 }
317
318 eth->mii_bus->name = "mdio";
319 eth->mii_bus->read = mtk_mdio_read;
320 eth->mii_bus->write = mtk_mdio_write;
321 eth->mii_bus->priv = eth;
322 eth->mii_bus->parent = eth->dev;
323
324 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
Sean Wang1e515b72016-09-01 10:47:34 +0800325 ret = of_mdiobus_register(eth->mii_bus, mii_np);
John Crispin656e7052016-03-08 11:29:55 +0100326
327err_put_node:
328 of_node_put(mii_np);
Sean Wang1e515b72016-09-01 10:47:34 +0800329 return ret;
John Crispin656e7052016-03-08 11:29:55 +0100330}
331
332static void mtk_mdio_cleanup(struct mtk_eth *eth)
333{
334 if (!eth->mii_bus)
335 return;
336
337 mdiobus_unregister(eth->mii_bus);
John Crispin656e7052016-03-08 11:29:55 +0100338}
339
Nelson Changbacfd112016-08-26 01:09:42 +0800340static inline void mtk_irq_disable(struct mtk_eth *eth,
341 unsigned reg, u32 mask)
John Crispin656e7052016-03-08 11:29:55 +0100342{
John Crispin7bc9cce2016-06-29 13:38:10 +0200343 unsigned long flags;
John Crispin656e7052016-03-08 11:29:55 +0100344 u32 val;
345
John Crispin7bc9cce2016-06-29 13:38:10 +0200346 spin_lock_irqsave(&eth->irq_lock, flags);
Nelson Changbacfd112016-08-26 01:09:42 +0800347 val = mtk_r32(eth, reg);
348 mtk_w32(eth, val & ~mask, reg);
John Crispin7bc9cce2016-06-29 13:38:10 +0200349 spin_unlock_irqrestore(&eth->irq_lock, flags);
John Crispin656e7052016-03-08 11:29:55 +0100350}
351
Nelson Changbacfd112016-08-26 01:09:42 +0800352static inline void mtk_irq_enable(struct mtk_eth *eth,
353 unsigned reg, u32 mask)
John Crispin656e7052016-03-08 11:29:55 +0100354{
John Crispin7bc9cce2016-06-29 13:38:10 +0200355 unsigned long flags;
John Crispin656e7052016-03-08 11:29:55 +0100356 u32 val;
357
John Crispin7bc9cce2016-06-29 13:38:10 +0200358 spin_lock_irqsave(&eth->irq_lock, flags);
Nelson Changbacfd112016-08-26 01:09:42 +0800359 val = mtk_r32(eth, reg);
360 mtk_w32(eth, val | mask, reg);
John Crispin7bc9cce2016-06-29 13:38:10 +0200361 spin_unlock_irqrestore(&eth->irq_lock, flags);
John Crispin656e7052016-03-08 11:29:55 +0100362}
363
364static int mtk_set_mac_address(struct net_device *dev, void *p)
365{
366 int ret = eth_mac_addr(dev, p);
367 struct mtk_mac *mac = netdev_priv(dev);
368 const char *macaddr = dev->dev_addr;
John Crispin656e7052016-03-08 11:29:55 +0100369
370 if (ret)
371 return ret;
372
Sean Wange3e96522016-08-11 17:51:00 +0800373 spin_lock_bh(&mac->hw->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100374 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
375 MTK_GDMA_MAC_ADRH(mac->id));
376 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
377 (macaddr[4] << 8) | macaddr[5],
378 MTK_GDMA_MAC_ADRL(mac->id));
Sean Wange3e96522016-08-11 17:51:00 +0800379 spin_unlock_bh(&mac->hw->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100380
381 return 0;
382}
383
384void mtk_stats_update_mac(struct mtk_mac *mac)
385{
386 struct mtk_hw_stats *hw_stats = mac->hw_stats;
387 unsigned int base = MTK_GDM1_TX_GBCNT;
388 u64 stats;
389
390 base += hw_stats->reg_offset;
391
392 u64_stats_update_begin(&hw_stats->syncp);
393
394 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
395 stats = mtk_r32(mac->hw, base + 0x04);
396 if (stats)
397 hw_stats->rx_bytes += (stats << 32);
398 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
399 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
400 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
401 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
402 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
403 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
404 hw_stats->rx_flow_control_packets +=
405 mtk_r32(mac->hw, base + 0x24);
406 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
407 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
408 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
409 stats = mtk_r32(mac->hw, base + 0x34);
410 if (stats)
411 hw_stats->tx_bytes += (stats << 32);
412 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
413 u64_stats_update_end(&hw_stats->syncp);
414}
415
416static void mtk_stats_update(struct mtk_eth *eth)
417{
418 int i;
419
420 for (i = 0; i < MTK_MAC_COUNT; i++) {
421 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
422 continue;
423 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
424 mtk_stats_update_mac(eth->mac[i]);
425 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
426 }
427 }
428}
429
430static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
431 struct rtnl_link_stats64 *storage)
432{
433 struct mtk_mac *mac = netdev_priv(dev);
434 struct mtk_hw_stats *hw_stats = mac->hw_stats;
435 unsigned int start;
436
437 if (netif_running(dev) && netif_device_present(dev)) {
438 if (spin_trylock(&hw_stats->stats_lock)) {
439 mtk_stats_update_mac(mac);
440 spin_unlock(&hw_stats->stats_lock);
441 }
442 }
443
444 do {
445 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
446 storage->rx_packets = hw_stats->rx_packets;
447 storage->tx_packets = hw_stats->tx_packets;
448 storage->rx_bytes = hw_stats->rx_bytes;
449 storage->tx_bytes = hw_stats->tx_bytes;
450 storage->collisions = hw_stats->tx_collisions;
451 storage->rx_length_errors = hw_stats->rx_short_errors +
452 hw_stats->rx_long_errors;
453 storage->rx_over_errors = hw_stats->rx_overflow;
454 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
455 storage->rx_errors = hw_stats->rx_checksum_errors;
456 storage->tx_aborted_errors = hw_stats->tx_skip;
457 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
458
459 storage->tx_errors = dev->stats.tx_errors;
460 storage->rx_dropped = dev->stats.rx_dropped;
461 storage->tx_dropped = dev->stats.tx_dropped;
462
463 return storage;
464}
465
466static inline int mtk_max_frag_size(int mtu)
467{
468 /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
469 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
470 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
471
472 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
473 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
474}
475
476static inline int mtk_max_buf_size(int frag_size)
477{
478 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
479 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
480
481 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
482
483 return buf_size;
484}
485
486static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
487 struct mtk_rx_dma *dma_rxd)
488{
489 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
490 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
491 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
492 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
493}
494
495/* the qdma core needs scratch memory to be setup */
496static int mtk_init_fq_dma(struct mtk_eth *eth)
497{
John Crispin605e4fe2016-06-10 13:27:59 +0200498 dma_addr_t phy_ring_tail;
John Crispin656e7052016-03-08 11:29:55 +0100499 int cnt = MTK_DMA_SIZE;
500 dma_addr_t dma_addr;
501 int i;
502
503 eth->scratch_ring = dma_alloc_coherent(eth->dev,
504 cnt * sizeof(struct mtk_tx_dma),
John Crispin605e4fe2016-06-10 13:27:59 +0200505 &eth->phy_scratch_ring,
John Crispin656e7052016-03-08 11:29:55 +0100506 GFP_ATOMIC | __GFP_ZERO);
507 if (unlikely(!eth->scratch_ring))
508 return -ENOMEM;
509
510 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
511 GFP_KERNEL);
John Crispin562c5a72016-06-10 13:27:58 +0200512 if (unlikely(!eth->scratch_head))
513 return -ENOMEM;
514
John Crispin656e7052016-03-08 11:29:55 +0100515 dma_addr = dma_map_single(eth->dev,
516 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
517 DMA_FROM_DEVICE);
518 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
519 return -ENOMEM;
520
521 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
John Crispin605e4fe2016-06-10 13:27:59 +0200522 phy_ring_tail = eth->phy_scratch_ring +
John Crispin656e7052016-03-08 11:29:55 +0100523 (sizeof(struct mtk_tx_dma) * (cnt - 1));
524
525 for (i = 0; i < cnt; i++) {
526 eth->scratch_ring[i].txd1 =
527 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
528 if (i < cnt - 1)
John Crispin605e4fe2016-06-10 13:27:59 +0200529 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
John Crispin656e7052016-03-08 11:29:55 +0100530 ((i + 1) * sizeof(struct mtk_tx_dma)));
531 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
532 }
533
John Crispin605e4fe2016-06-10 13:27:59 +0200534 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
John Crispin656e7052016-03-08 11:29:55 +0100535 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
536 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
537 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
538
539 return 0;
540}
541
542static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
543{
544 void *ret = ring->dma;
545
546 return ret + (desc - ring->phys);
547}
548
549static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
550 struct mtk_tx_dma *txd)
551{
552 int idx = txd - ring->dma;
553
554 return &ring->buf[idx];
555}
556
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800557static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
John Crispin656e7052016-03-08 11:29:55 +0100558{
559 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800560 dma_unmap_single(eth->dev,
John Crispin656e7052016-03-08 11:29:55 +0100561 dma_unmap_addr(tx_buf, dma_addr0),
562 dma_unmap_len(tx_buf, dma_len0),
563 DMA_TO_DEVICE);
564 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800565 dma_unmap_page(eth->dev,
John Crispin656e7052016-03-08 11:29:55 +0100566 dma_unmap_addr(tx_buf, dma_addr0),
567 dma_unmap_len(tx_buf, dma_len0),
568 DMA_TO_DEVICE);
569 }
570 tx_buf->flags = 0;
571 if (tx_buf->skb &&
572 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
573 dev_kfree_skb_any(tx_buf->skb);
574 tx_buf->skb = NULL;
575}
576
577static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
578 int tx_num, struct mtk_tx_ring *ring, bool gso)
579{
580 struct mtk_mac *mac = netdev_priv(dev);
581 struct mtk_eth *eth = mac->hw;
582 struct mtk_tx_dma *itxd, *txd;
583 struct mtk_tx_buf *tx_buf;
John Crispin656e7052016-03-08 11:29:55 +0100584 dma_addr_t mapped_addr;
585 unsigned int nr_frags;
586 int i, n_desc = 1;
Sean Wangc6f1dc42016-09-01 10:47:27 +0800587 u32 txd4 = 0, fport;
John Crispin656e7052016-03-08 11:29:55 +0100588
589 itxd = ring->next_free;
590 if (itxd == ring->last_free)
591 return -ENOMEM;
592
593 /* set the forward port */
Sean Wangc6f1dc42016-09-01 10:47:27 +0800594 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
595 txd4 |= fport;
John Crispin656e7052016-03-08 11:29:55 +0100596
597 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
598 memset(tx_buf, 0, sizeof(*tx_buf));
599
600 if (gso)
601 txd4 |= TX_DMA_TSO;
602
603 /* TX Checksum offload */
604 if (skb->ip_summed == CHECKSUM_PARTIAL)
605 txd4 |= TX_DMA_CHKSUM;
606
607 /* VLAN header offload */
608 if (skb_vlan_tag_present(skb))
609 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
610
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800611 mapped_addr = dma_map_single(eth->dev, skb->data,
John Crispin656e7052016-03-08 11:29:55 +0100612 skb_headlen(skb), DMA_TO_DEVICE);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800613 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
John Crispin656e7052016-03-08 11:29:55 +0100614 return -ENOMEM;
615
John Crispin656e7052016-03-08 11:29:55 +0100616 WRITE_ONCE(itxd->txd1, mapped_addr);
617 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
618 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
619 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
620
621 /* TX SG offload */
622 txd = itxd;
623 nr_frags = skb_shinfo(skb)->nr_frags;
624 for (i = 0; i < nr_frags; i++) {
625 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
626 unsigned int offset = 0;
627 int frag_size = skb_frag_size(frag);
628
629 while (frag_size) {
630 bool last_frag = false;
631 unsigned int frag_map_size;
632
633 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
634 if (txd == ring->last_free)
635 goto err_dma;
636
637 n_desc++;
638 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800639 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
John Crispin656e7052016-03-08 11:29:55 +0100640 frag_map_size,
641 DMA_TO_DEVICE);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800642 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
John Crispin656e7052016-03-08 11:29:55 +0100643 goto err_dma;
644
645 if (i == nr_frags - 1 &&
646 (frag_size - frag_map_size) == 0)
647 last_frag = true;
648
649 WRITE_ONCE(txd->txd1, mapped_addr);
650 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
651 TX_DMA_PLEN0(frag_map_size) |
John Crispin369f0452016-04-08 00:54:11 +0200652 last_frag * TX_DMA_LS0));
Sean Wangc6f1dc42016-09-01 10:47:27 +0800653 WRITE_ONCE(txd->txd4, fport);
John Crispin656e7052016-03-08 11:29:55 +0100654
655 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
656 tx_buf = mtk_desc_to_tx_buf(ring, txd);
657 memset(tx_buf, 0, sizeof(*tx_buf));
658
659 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
660 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
661 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
662 frag_size -= frag_map_size;
663 offset += frag_map_size;
664 }
665 }
666
667 /* store skb to cleanup */
668 tx_buf->skb = skb;
669
670 WRITE_ONCE(itxd->txd4, txd4);
671 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
672 (!nr_frags * TX_DMA_LS0)));
673
John Crispin656e7052016-03-08 11:29:55 +0100674 netdev_sent_queue(dev, skb->len);
675 skb_tx_timestamp(skb);
676
677 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
678 atomic_sub(n_desc, &ring->free_count);
679
680 /* make sure that all changes to the dma ring are flushed before we
681 * continue
682 */
683 wmb();
684
685 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
686 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
687
688 return 0;
689
690err_dma:
691 do {
John Crispin2fae7232016-06-10 13:28:00 +0200692 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
John Crispin656e7052016-03-08 11:29:55 +0100693
694 /* unmap dma */
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800695 mtk_tx_unmap(eth, tx_buf);
John Crispin656e7052016-03-08 11:29:55 +0100696
697 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
698 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
699 } while (itxd != txd);
700
701 return -ENOMEM;
702}
703
704static inline int mtk_cal_txd_req(struct sk_buff *skb)
705{
706 int i, nfrags;
707 struct skb_frag_struct *frag;
708
709 nfrags = 1;
710 if (skb_is_gso(skb)) {
711 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
712 frag = &skb_shinfo(skb)->frags[i];
713 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
714 }
715 } else {
716 nfrags += skb_shinfo(skb)->nr_frags;
717 }
718
John Crispinbeeb4ca2016-04-08 00:54:05 +0200719 return nfrags;
John Crispin656e7052016-03-08 11:29:55 +0100720}
721
John Crispinad3cba92016-06-10 13:28:07 +0200722static int mtk_queue_stopped(struct mtk_eth *eth)
723{
724 int i;
725
726 for (i = 0; i < MTK_MAC_COUNT; i++) {
727 if (!eth->netdev[i])
728 continue;
729 if (netif_queue_stopped(eth->netdev[i]))
730 return 1;
731 }
732
733 return 0;
734}
735
John Crispin13c822f2016-04-08 00:54:07 +0200736static void mtk_wake_queue(struct mtk_eth *eth)
737{
738 int i;
739
740 for (i = 0; i < MTK_MAC_COUNT; i++) {
741 if (!eth->netdev[i])
742 continue;
743 netif_wake_queue(eth->netdev[i]);
744 }
745}
746
747static void mtk_stop_queue(struct mtk_eth *eth)
748{
749 int i;
750
751 for (i = 0; i < MTK_MAC_COUNT; i++) {
752 if (!eth->netdev[i])
753 continue;
754 netif_stop_queue(eth->netdev[i]);
755 }
756}
757
John Crispin656e7052016-03-08 11:29:55 +0100758static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
759{
760 struct mtk_mac *mac = netdev_priv(dev);
761 struct mtk_eth *eth = mac->hw;
762 struct mtk_tx_ring *ring = &eth->tx_ring;
763 struct net_device_stats *stats = &dev->stats;
764 bool gso = false;
765 int tx_num;
766
John Crispin34c2e4c2016-04-08 00:54:08 +0200767 /* normally we can rely on the stack not calling this more than once,
768 * however we have 2 queues running on the same ring so we need to lock
769 * the ring access
770 */
Sean Wange3e96522016-08-11 17:51:00 +0800771 spin_lock(&eth->page_lock);
John Crispin34c2e4c2016-04-08 00:54:08 +0200772
John Crispin656e7052016-03-08 11:29:55 +0100773 tx_num = mtk_cal_txd_req(skb);
774 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
John Crispin13c822f2016-04-08 00:54:07 +0200775 mtk_stop_queue(eth);
John Crispin656e7052016-03-08 11:29:55 +0100776 netif_err(eth, tx_queued, dev,
777 "Tx Ring full when queue awake!\n");
Sean Wange3e96522016-08-11 17:51:00 +0800778 spin_unlock(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100779 return NETDEV_TX_BUSY;
780 }
781
782 /* TSO: fill MSS info in tcp checksum field */
783 if (skb_is_gso(skb)) {
784 if (skb_cow_head(skb, 0)) {
785 netif_warn(eth, tx_err, dev,
786 "GSO expand head fail.\n");
787 goto drop;
788 }
789
790 if (skb_shinfo(skb)->gso_type &
791 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
792 gso = true;
793 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
794 }
795 }
796
797 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
798 goto drop;
799
John Crispin82c65442016-06-10 13:28:08 +0200800 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
John Crispin13c822f2016-04-08 00:54:07 +0200801 mtk_stop_queue(eth);
John Crispin82c65442016-06-10 13:28:08 +0200802
Sean Wange3e96522016-08-11 17:51:00 +0800803 spin_unlock(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100804
805 return NETDEV_TX_OK;
806
807drop:
Sean Wange3e96522016-08-11 17:51:00 +0800808 spin_unlock(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +0100809 stats->tx_dropped++;
810 dev_kfree_skb(skb);
811 return NETDEV_TX_OK;
812}
813
814static int mtk_poll_rx(struct napi_struct *napi, int budget,
John Crispineece71e2016-06-29 13:38:09 +0200815 struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +0100816{
817 struct mtk_rx_ring *ring = &eth->rx_ring;
818 int idx = ring->calc_idx;
819 struct sk_buff *skb;
820 u8 *data, *new_data;
821 struct mtk_rx_dma *rxd, trxd;
822 int done = 0;
823
824 while (done < budget) {
825 struct net_device *netdev;
826 unsigned int pktlen;
827 dma_addr_t dma_addr;
828 int mac = 0;
829
830 idx = NEXT_RX_DESP_IDX(idx);
831 rxd = &ring->dma[idx];
832 data = ring->data[idx];
833
834 mtk_rx_get_desc(&trxd, rxd);
835 if (!(trxd.rxd2 & RX_DMA_DONE))
836 break;
837
838 /* find out which mac the packet come from. values start at 1 */
839 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
840 RX_DMA_FPORT_MASK;
841 mac--;
842
843 netdev = eth->netdev[mac];
844
845 /* alloc new buffer */
846 new_data = napi_alloc_frag(ring->frag_size);
847 if (unlikely(!new_data)) {
848 netdev->stats.rx_dropped++;
849 goto release_desc;
850 }
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800851 dma_addr = dma_map_single(eth->dev,
John Crispin656e7052016-03-08 11:29:55 +0100852 new_data + NET_SKB_PAD,
853 ring->buf_size,
854 DMA_FROM_DEVICE);
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800855 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
John Crispin656e7052016-03-08 11:29:55 +0100856 skb_free_frag(new_data);
John Crispin94321a92016-06-10 13:28:01 +0200857 netdev->stats.rx_dropped++;
John Crispin656e7052016-03-08 11:29:55 +0100858 goto release_desc;
859 }
860
861 /* receive data */
862 skb = build_skb(data, ring->frag_size);
863 if (unlikely(!skb)) {
Sean Wang1b430792016-09-01 10:47:29 +0800864 skb_free_frag(new_data);
John Crispin94321a92016-06-10 13:28:01 +0200865 netdev->stats.rx_dropped++;
John Crispin656e7052016-03-08 11:29:55 +0100866 goto release_desc;
867 }
868 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
869
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800870 dma_unmap_single(eth->dev, trxd.rxd1,
John Crispin656e7052016-03-08 11:29:55 +0100871 ring->buf_size, DMA_FROM_DEVICE);
872 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
873 skb->dev = netdev;
874 skb_put(skb, pktlen);
875 if (trxd.rxd4 & RX_DMA_L4_VALID)
876 skb->ip_summed = CHECKSUM_UNNECESSARY;
877 else
878 skb_checksum_none_assert(skb);
879 skb->protocol = eth_type_trans(skb, netdev);
880
881 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
882 RX_DMA_VID(trxd.rxd3))
883 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
884 RX_DMA_VID(trxd.rxd3));
885 napi_gro_receive(napi, skb);
886
887 ring->data[idx] = new_data;
888 rxd->rxd1 = (unsigned int)dma_addr;
889
890release_desc:
891 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
892
893 ring->calc_idx = idx;
Sean Wang635372a2016-09-03 17:59:26 +0800894
John Crispin656e7052016-03-08 11:29:55 +0100895 done++;
896 }
897
Sean Wang41156ce2016-09-03 17:59:27 +0800898 if (done) {
899 /* make sure that all changes to the dma ring are flushed before
900 * we continue
901 */
902 wmb();
903 mtk_w32(eth, ring->calc_idx, MTK_PRX_CRX_IDX0);
904 }
John Crispin656e7052016-03-08 11:29:55 +0100905
906 return done;
907}
908
John Crispin80673022016-06-29 13:38:11 +0200909static int mtk_poll_tx(struct mtk_eth *eth, int budget)
John Crispin656e7052016-03-08 11:29:55 +0100910{
911 struct mtk_tx_ring *ring = &eth->tx_ring;
912 struct mtk_tx_dma *desc;
913 struct sk_buff *skb;
914 struct mtk_tx_buf *tx_buf;
John Crispin80673022016-06-29 13:38:11 +0200915 unsigned int done[MTK_MAX_DEVS];
John Crispin656e7052016-03-08 11:29:55 +0100916 unsigned int bytes[MTK_MAX_DEVS];
917 u32 cpu, dma;
918 static int condition;
John Crispin80673022016-06-29 13:38:11 +0200919 int total = 0, i;
John Crispin656e7052016-03-08 11:29:55 +0100920
921 memset(done, 0, sizeof(done));
922 memset(bytes, 0, sizeof(bytes));
923
924 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
925 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
926
927 desc = mtk_qdma_phys_to_virt(ring, cpu);
928
929 while ((cpu != dma) && budget) {
930 u32 next_cpu = desc->txd2;
931 int mac;
932
933 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
934 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
935 break;
936
937 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
938 TX_DMA_FPORT_MASK;
939 mac--;
940
941 tx_buf = mtk_desc_to_tx_buf(ring, desc);
942 skb = tx_buf->skb;
943 if (!skb) {
944 condition = 1;
945 break;
946 }
947
948 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
949 bytes[mac] += skb->len;
950 done[mac]++;
951 budget--;
952 }
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +0800953 mtk_tx_unmap(eth, tx_buf);
John Crispin656e7052016-03-08 11:29:55 +0100954
John Crispin656e7052016-03-08 11:29:55 +0100955 ring->last_free = desc;
956 atomic_inc(&ring->free_count);
957
958 cpu = next_cpu;
959 }
960
961 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
962
963 for (i = 0; i < MTK_MAC_COUNT; i++) {
964 if (!eth->netdev[i] || !done[i])
965 continue;
966 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
967 total += done[i];
968 }
969
John Crispinad3cba92016-06-10 13:28:07 +0200970 if (mtk_queue_stopped(eth) &&
971 (atomic_read(&ring->free_count) > ring->thresh))
John Crispin13c822f2016-04-08 00:54:07 +0200972 mtk_wake_queue(eth);
John Crispin656e7052016-03-08 11:29:55 +0100973
974 return total;
975}
976
John Crispin80673022016-06-29 13:38:11 +0200977static void mtk_handle_status_irq(struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +0100978{
John Crispin80673022016-06-29 13:38:11 +0200979 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
John Crispin656e7052016-03-08 11:29:55 +0100980
John Crispineece71e2016-06-29 13:38:09 +0200981 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
John Crispin656e7052016-03-08 11:29:55 +0100982 mtk_stats_update(eth);
John Crispineece71e2016-06-29 13:38:09 +0200983 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
984 MTK_INT_STATUS2);
John Crispin656e7052016-03-08 11:29:55 +0100985 }
John Crispin80673022016-06-29 13:38:11 +0200986}
987
988static int mtk_napi_tx(struct napi_struct *napi, int budget)
989{
990 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
991 u32 status, mask;
992 int tx_done = 0;
993
994 mtk_handle_status_irq(eth);
995 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
996 tx_done = mtk_poll_tx(eth, budget);
John Crispin656e7052016-03-08 11:29:55 +0100997
998 if (unlikely(netif_msg_intr(eth))) {
John Crispin80673022016-06-29 13:38:11 +0200999 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
John Crispin656e7052016-03-08 11:29:55 +01001000 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
John Crispin80673022016-06-29 13:38:11 +02001001 dev_info(eth->dev,
1002 "done tx %d, intr 0x%08x/0x%x\n",
1003 tx_done, status, mask);
John Crispin656e7052016-03-08 11:29:55 +01001004 }
1005
John Crispin80673022016-06-29 13:38:11 +02001006 if (tx_done == budget)
John Crispin656e7052016-03-08 11:29:55 +01001007 return budget;
1008
1009 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
John Crispin80673022016-06-29 13:38:11 +02001010 if (status & MTK_TX_DONE_INT)
John Crispin656e7052016-03-08 11:29:55 +01001011 return budget;
1012
1013 napi_complete(napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001014 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
John Crispin80673022016-06-29 13:38:11 +02001015
1016 return tx_done;
1017}
1018
1019static int mtk_napi_rx(struct napi_struct *napi, int budget)
1020{
1021 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1022 u32 status, mask;
1023 int rx_done = 0;
Sean Wang41156ce2016-09-03 17:59:27 +08001024 int remain_budget = budget;
John Crispin80673022016-06-29 13:38:11 +02001025
1026 mtk_handle_status_irq(eth);
Sean Wang41156ce2016-09-03 17:59:27 +08001027
1028poll_again:
Nelson Changbacfd112016-08-26 01:09:42 +08001029 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
Sean Wang41156ce2016-09-03 17:59:27 +08001030 rx_done = mtk_poll_rx(napi, remain_budget, eth);
John Crispin80673022016-06-29 13:38:11 +02001031
1032 if (unlikely(netif_msg_intr(eth))) {
Nelson Changbacfd112016-08-26 01:09:42 +08001033 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1034 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
John Crispin80673022016-06-29 13:38:11 +02001035 dev_info(eth->dev,
1036 "done rx %d, intr 0x%08x/0x%x\n",
1037 rx_done, status, mask);
1038 }
Sean Wang41156ce2016-09-03 17:59:27 +08001039 if (rx_done == remain_budget)
John Crispin80673022016-06-29 13:38:11 +02001040 return budget;
1041
Nelson Changbacfd112016-08-26 01:09:42 +08001042 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
Sean Wang41156ce2016-09-03 17:59:27 +08001043 if (status & MTK_RX_DONE_INT) {
1044 remain_budget -= rx_done;
1045 goto poll_again;
1046 }
John Crispin80673022016-06-29 13:38:11 +02001047 napi_complete(napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001048 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001049
Sean Wang41156ce2016-09-03 17:59:27 +08001050 return rx_done + budget - remain_budget;
John Crispin656e7052016-03-08 11:29:55 +01001051}
1052
1053static int mtk_tx_alloc(struct mtk_eth *eth)
1054{
1055 struct mtk_tx_ring *ring = &eth->tx_ring;
1056 int i, sz = sizeof(*ring->dma);
1057
1058 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1059 GFP_KERNEL);
1060 if (!ring->buf)
1061 goto no_tx_mem;
1062
1063 ring->dma = dma_alloc_coherent(eth->dev,
1064 MTK_DMA_SIZE * sz,
1065 &ring->phys,
1066 GFP_ATOMIC | __GFP_ZERO);
1067 if (!ring->dma)
1068 goto no_tx_mem;
1069
1070 memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1071 for (i = 0; i < MTK_DMA_SIZE; i++) {
1072 int next = (i + 1) % MTK_DMA_SIZE;
1073 u32 next_ptr = ring->phys + next * sz;
1074
1075 ring->dma[i].txd2 = next_ptr;
1076 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1077 }
1078
1079 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1080 ring->next_free = &ring->dma[0];
John Crispin12c97c12016-06-10 13:28:06 +02001081 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
John Crispin04698cc2016-06-10 13:28:04 +02001082 ring->thresh = MAX_SKB_FRAGS;
John Crispin656e7052016-03-08 11:29:55 +01001083
1084 /* make sure that all changes to the dma ring are flushed before we
1085 * continue
1086 */
1087 wmb();
1088
1089 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1090 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1091 mtk_w32(eth,
1092 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1093 MTK_QTX_CRX_PTR);
1094 mtk_w32(eth,
1095 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1096 MTK_QTX_DRX_PTR);
Nelson Changbacfd112016-08-26 01:09:42 +08001097 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
John Crispin656e7052016-03-08 11:29:55 +01001098
1099 return 0;
1100
1101no_tx_mem:
1102 return -ENOMEM;
1103}
1104
1105static void mtk_tx_clean(struct mtk_eth *eth)
1106{
1107 struct mtk_tx_ring *ring = &eth->tx_ring;
1108 int i;
1109
1110 if (ring->buf) {
1111 for (i = 0; i < MTK_DMA_SIZE; i++)
sean.wang@mediatek.com55a4e772016-08-16 13:55:15 +08001112 mtk_tx_unmap(eth, &ring->buf[i]);
John Crispin656e7052016-03-08 11:29:55 +01001113 kfree(ring->buf);
1114 ring->buf = NULL;
1115 }
1116
1117 if (ring->dma) {
1118 dma_free_coherent(eth->dev,
1119 MTK_DMA_SIZE * sizeof(*ring->dma),
1120 ring->dma,
1121 ring->phys);
1122 ring->dma = NULL;
1123 }
1124}
1125
1126static int mtk_rx_alloc(struct mtk_eth *eth)
1127{
1128 struct mtk_rx_ring *ring = &eth->rx_ring;
1129 int i;
1130
1131 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1132 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1133 ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data),
1134 GFP_KERNEL);
1135 if (!ring->data)
1136 return -ENOMEM;
1137
1138 for (i = 0; i < MTK_DMA_SIZE; i++) {
1139 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1140 if (!ring->data[i])
1141 return -ENOMEM;
1142 }
1143
1144 ring->dma = dma_alloc_coherent(eth->dev,
1145 MTK_DMA_SIZE * sizeof(*ring->dma),
1146 &ring->phys,
1147 GFP_ATOMIC | __GFP_ZERO);
1148 if (!ring->dma)
1149 return -ENOMEM;
1150
1151 for (i = 0; i < MTK_DMA_SIZE; i++) {
1152 dma_addr_t dma_addr = dma_map_single(eth->dev,
1153 ring->data[i] + NET_SKB_PAD,
1154 ring->buf_size,
1155 DMA_FROM_DEVICE);
1156 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1157 return -ENOMEM;
1158 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1159
1160 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1161 }
1162 ring->calc_idx = MTK_DMA_SIZE - 1;
1163 /* make sure that all changes to the dma ring are flushed before we
1164 * continue
1165 */
1166 wmb();
1167
Nelson Changbacfd112016-08-26 01:09:42 +08001168 mtk_w32(eth, eth->rx_ring.phys, MTK_PRX_BASE_PTR0);
1169 mtk_w32(eth, MTK_DMA_SIZE, MTK_PRX_MAX_CNT0);
1170 mtk_w32(eth, eth->rx_ring.calc_idx, MTK_PRX_CRX_IDX0);
1171 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_PDMA_RST_IDX);
John Crispin656e7052016-03-08 11:29:55 +01001172
1173 return 0;
1174}
1175
1176static void mtk_rx_clean(struct mtk_eth *eth)
1177{
1178 struct mtk_rx_ring *ring = &eth->rx_ring;
1179 int i;
1180
1181 if (ring->data && ring->dma) {
1182 for (i = 0; i < MTK_DMA_SIZE; i++) {
1183 if (!ring->data[i])
1184 continue;
1185 if (!ring->dma[i].rxd1)
1186 continue;
1187 dma_unmap_single(eth->dev,
1188 ring->dma[i].rxd1,
1189 ring->buf_size,
1190 DMA_FROM_DEVICE);
1191 skb_free_frag(ring->data[i]);
1192 }
1193 kfree(ring->data);
1194 ring->data = NULL;
1195 }
1196
1197 if (ring->dma) {
1198 dma_free_coherent(eth->dev,
1199 MTK_DMA_SIZE * sizeof(*ring->dma),
1200 ring->dma,
1201 ring->phys);
1202 ring->dma = NULL;
1203 }
1204}
1205
1206/* wait for DMA to finish whatever it is doing before we start using it again */
1207static int mtk_dma_busy_wait(struct mtk_eth *eth)
1208{
1209 unsigned long t_start = jiffies;
1210
1211 while (1) {
1212 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1213 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1214 return 0;
1215 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1216 break;
1217 }
1218
1219 dev_err(eth->dev, "DMA init timeout\n");
1220 return -1;
1221}
1222
1223static int mtk_dma_init(struct mtk_eth *eth)
1224{
1225 int err;
1226
1227 if (mtk_dma_busy_wait(eth))
1228 return -EBUSY;
1229
1230 /* QDMA needs scratch memory for internal reordering of the
1231 * descriptors
1232 */
1233 err = mtk_init_fq_dma(eth);
1234 if (err)
1235 return err;
1236
1237 err = mtk_tx_alloc(eth);
1238 if (err)
1239 return err;
1240
1241 err = mtk_rx_alloc(eth);
1242 if (err)
1243 return err;
1244
1245 /* Enable random early drop and set drop threshold automatically */
1246 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1247 MTK_QDMA_FC_THRES);
1248 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1249
1250 return 0;
1251}
1252
1253static void mtk_dma_free(struct mtk_eth *eth)
1254{
1255 int i;
1256
1257 for (i = 0; i < MTK_MAC_COUNT; i++)
1258 if (eth->netdev[i])
1259 netdev_reset_queue(eth->netdev[i]);
John Crispin605e4fe2016-06-10 13:27:59 +02001260 if (eth->scratch_ring) {
1261 dma_free_coherent(eth->dev,
1262 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1263 eth->scratch_ring,
1264 eth->phy_scratch_ring);
1265 eth->scratch_ring = NULL;
1266 eth->phy_scratch_ring = 0;
1267 }
John Crispin656e7052016-03-08 11:29:55 +01001268 mtk_tx_clean(eth);
1269 mtk_rx_clean(eth);
1270 kfree(eth->scratch_head);
1271}
1272
1273static void mtk_tx_timeout(struct net_device *dev)
1274{
1275 struct mtk_mac *mac = netdev_priv(dev);
1276 struct mtk_eth *eth = mac->hw;
1277
1278 eth->netdev[mac->id]->stats.tx_errors++;
1279 netif_err(eth, tx_err, dev,
1280 "transmit timed out\n");
John Crispin7c78b4a2016-04-08 00:54:10 +02001281 schedule_work(&eth->pending_work);
John Crispin656e7052016-03-08 11:29:55 +01001282}
1283
John Crispin80673022016-06-29 13:38:11 +02001284static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
John Crispin656e7052016-03-08 11:29:55 +01001285{
1286 struct mtk_eth *eth = _eth;
John Crispin656e7052016-03-08 11:29:55 +01001287
John Crispin80673022016-06-29 13:38:11 +02001288 if (likely(napi_schedule_prep(&eth->rx_napi))) {
1289 __napi_schedule(&eth->rx_napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001290 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001291 }
John Crispin80673022016-06-29 13:38:11 +02001292
1293 return IRQ_HANDLED;
1294}
1295
1296static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1297{
1298 struct mtk_eth *eth = _eth;
1299
1300 if (likely(napi_schedule_prep(&eth->tx_napi))) {
1301 __napi_schedule(&eth->tx_napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001302 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
John Crispin80673022016-06-29 13:38:11 +02001303 }
John Crispin656e7052016-03-08 11:29:55 +01001304
1305 return IRQ_HANDLED;
1306}
1307
1308#ifdef CONFIG_NET_POLL_CONTROLLER
1309static void mtk_poll_controller(struct net_device *dev)
1310{
1311 struct mtk_mac *mac = netdev_priv(dev);
1312 struct mtk_eth *eth = mac->hw;
John Crispin656e7052016-03-08 11:29:55 +01001313
Nelson Changbacfd112016-08-26 01:09:42 +08001314 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1315 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin8186f6e2016-07-02 08:00:50 +02001316 mtk_handle_irq_rx(eth->irq[2], dev);
Nelson Changbacfd112016-08-26 01:09:42 +08001317 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1318 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001319}
1320#endif
1321
1322static int mtk_start_dma(struct mtk_eth *eth)
1323{
1324 int err;
1325
1326 err = mtk_dma_init(eth);
1327 if (err) {
1328 mtk_dma_free(eth);
1329 return err;
1330 }
1331
1332 mtk_w32(eth,
Nelson Changbacfd112016-08-26 01:09:42 +08001333 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1334 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO,
John Crispin656e7052016-03-08 11:29:55 +01001335 MTK_QDMA_GLO_CFG);
1336
Nelson Changbacfd112016-08-26 01:09:42 +08001337 mtk_w32(eth,
1338 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1339 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1340 MTK_PDMA_GLO_CFG);
1341
John Crispin656e7052016-03-08 11:29:55 +01001342 return 0;
1343}
1344
1345static int mtk_open(struct net_device *dev)
1346{
1347 struct mtk_mac *mac = netdev_priv(dev);
1348 struct mtk_eth *eth = mac->hw;
1349
1350 /* we run 2 netdevs on the same dma ring so we only bring it up once */
1351 if (!atomic_read(&eth->dma_refcnt)) {
1352 int err = mtk_start_dma(eth);
1353
1354 if (err)
1355 return err;
1356
John Crispin80673022016-06-29 13:38:11 +02001357 napi_enable(&eth->tx_napi);
John Crispin656e7052016-03-08 11:29:55 +01001358 napi_enable(&eth->rx_napi);
Nelson Changbacfd112016-08-26 01:09:42 +08001359 mtk_irq_enable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1360 mtk_irq_enable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin656e7052016-03-08 11:29:55 +01001361 }
1362 atomic_inc(&eth->dma_refcnt);
1363
1364 phy_start(mac->phy_dev);
1365 netif_start_queue(dev);
1366
1367 return 0;
1368}
1369
1370static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1371{
John Crispin656e7052016-03-08 11:29:55 +01001372 u32 val;
1373 int i;
1374
1375 /* stop the dma engine */
Sean Wange3e96522016-08-11 17:51:00 +08001376 spin_lock_bh(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +01001377 val = mtk_r32(eth, glo_cfg);
1378 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1379 glo_cfg);
Sean Wange3e96522016-08-11 17:51:00 +08001380 spin_unlock_bh(&eth->page_lock);
John Crispin656e7052016-03-08 11:29:55 +01001381
1382 /* wait for dma stop */
1383 for (i = 0; i < 10; i++) {
1384 val = mtk_r32(eth, glo_cfg);
1385 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1386 msleep(20);
1387 continue;
1388 }
1389 break;
1390 }
1391}
1392
1393static int mtk_stop(struct net_device *dev)
1394{
1395 struct mtk_mac *mac = netdev_priv(dev);
1396 struct mtk_eth *eth = mac->hw;
1397
1398 netif_tx_disable(dev);
1399 phy_stop(mac->phy_dev);
1400
1401 /* only shutdown DMA if this is the last user */
1402 if (!atomic_dec_and_test(&eth->dma_refcnt))
1403 return 0;
1404
Nelson Changbacfd112016-08-26 01:09:42 +08001405 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, MTK_TX_DONE_INT);
1406 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, MTK_RX_DONE_INT);
John Crispin80673022016-06-29 13:38:11 +02001407 napi_disable(&eth->tx_napi);
John Crispin656e7052016-03-08 11:29:55 +01001408 napi_disable(&eth->rx_napi);
1409
1410 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1411
1412 mtk_dma_free(eth);
1413
1414 return 0;
1415}
1416
1417static int __init mtk_hw_init(struct mtk_eth *eth)
1418{
Sean Wang85574db2016-09-14 23:13:15 +08001419 int i;
1420
Sean Wang26a2ad82016-09-14 23:13:18 +08001421 pm_runtime_enable(eth->dev);
1422 pm_runtime_get_sync(eth->dev);
1423
Sean Wang85574db2016-09-14 23:13:15 +08001424 clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
1425 clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
1426 clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
1427 clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
John Crispin656e7052016-03-08 11:29:55 +01001428
1429 /* reset the frame engine */
1430 reset_control_assert(eth->rstc);
1431 usleep_range(10, 20);
1432 reset_control_deassert(eth->rstc);
1433 usleep_range(10, 20);
1434
1435 /* Set GE2 driving and slew rate */
1436 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1437
1438 /* set GE2 TDSEL */
1439 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1440
1441 /* set GE2 TUNE */
1442 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1443
1444 /* GE1, Force 1000M/FD, FC ON */
1445 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0));
1446
1447 /* GE2, Force 1000M/FD, FC ON */
1448 mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1));
1449
1450 /* Enable RX VLan Offloading */
1451 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1452
John Crispin656e7052016-03-08 11:29:55 +01001453 /* disable delay and normal interrupt */
1454 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
Nelson Changbacfd112016-08-26 01:09:42 +08001455 mtk_w32(eth, 0, MTK_PDMA_DELAY_INT);
1456 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1457 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
John Crispin656e7052016-03-08 11:29:55 +01001458 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1459 mtk_w32(eth, 0, MTK_RST_GL);
1460
1461 /* FE int grouping */
John Crispin80673022016-06-29 13:38:11 +02001462 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1463 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1464 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1465 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1466 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
John Crispin656e7052016-03-08 11:29:55 +01001467
1468 for (i = 0; i < 2; i++) {
1469 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1470
Nelson Chang9c084352016-08-26 01:09:43 +08001471 /* setup the forward port to send frame to PDMA */
John Crispin656e7052016-03-08 11:29:55 +01001472 val &= ~0xffff;
John Crispin656e7052016-03-08 11:29:55 +01001473
1474 /* Enable RX checksum */
1475 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
1476
1477 /* setup the mac dma */
1478 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
1479 }
1480
1481 return 0;
1482}
1483
Sean Wangbf253fb2016-09-14 23:13:16 +08001484static int mtk_hw_deinit(struct mtk_eth *eth)
1485{
1486 clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
1487 clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
1488 clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
1489 clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
1490
Sean Wang26a2ad82016-09-14 23:13:18 +08001491 pm_runtime_put_sync(eth->dev);
1492 pm_runtime_disable(eth->dev);
1493
Sean Wangbf253fb2016-09-14 23:13:16 +08001494 return 0;
1495}
1496
John Crispin656e7052016-03-08 11:29:55 +01001497static int __init mtk_init(struct net_device *dev)
1498{
1499 struct mtk_mac *mac = netdev_priv(dev);
1500 struct mtk_eth *eth = mac->hw;
1501 const char *mac_addr;
1502
1503 mac_addr = of_get_mac_address(mac->of_node);
1504 if (mac_addr)
1505 ether_addr_copy(dev->dev_addr, mac_addr);
1506
1507 /* If the mac address is invalid, use random mac address */
1508 if (!is_valid_ether_addr(dev->dev_addr)) {
1509 random_ether_addr(dev->dev_addr);
1510 dev_err(eth->dev, "generated random MAC address %pM\n",
1511 dev->dev_addr);
1512 dev->addr_assign_type = NET_ADDR_RANDOM;
1513 }
1514
1515 return mtk_phy_connect(mac);
1516}
1517
1518static void mtk_uninit(struct net_device *dev)
1519{
1520 struct mtk_mac *mac = netdev_priv(dev);
1521 struct mtk_eth *eth = mac->hw;
1522
1523 phy_disconnect(mac->phy_dev);
1524 mtk_mdio_cleanup(eth);
Nelson Changbacfd112016-08-26 01:09:42 +08001525 mtk_irq_disable(eth, MTK_QDMA_INT_MASK, ~0);
1526 mtk_irq_disable(eth, MTK_PDMA_INT_MASK, ~0);
John Crispin80673022016-06-29 13:38:11 +02001527 free_irq(eth->irq[1], dev);
1528 free_irq(eth->irq[2], dev);
John Crispin656e7052016-03-08 11:29:55 +01001529}
1530
1531static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1532{
1533 struct mtk_mac *mac = netdev_priv(dev);
1534
1535 switch (cmd) {
1536 case SIOCGMIIPHY:
1537 case SIOCGMIIREG:
1538 case SIOCSMIIREG:
1539 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1540 default:
1541 break;
1542 }
1543
1544 return -EOPNOTSUPP;
1545}
1546
1547static void mtk_pending_work(struct work_struct *work)
1548{
John Crispin7c78b4a2016-04-08 00:54:10 +02001549 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
John Crispine7d425d2016-04-08 00:54:09 +02001550 int err, i;
1551 unsigned long restart = 0;
John Crispin656e7052016-03-08 11:29:55 +01001552
1553 rtnl_lock();
John Crispin656e7052016-03-08 11:29:55 +01001554
John Crispine7d425d2016-04-08 00:54:09 +02001555 /* stop all devices to make sure that dma is properly shut down */
1556 for (i = 0; i < MTK_MAC_COUNT; i++) {
John Crispin7c78b4a2016-04-08 00:54:10 +02001557 if (!eth->netdev[i])
John Crispine7d425d2016-04-08 00:54:09 +02001558 continue;
1559 mtk_stop(eth->netdev[i]);
1560 __set_bit(i, &restart);
1561 }
1562
1563 /* restart DMA and enable IRQs */
1564 for (i = 0; i < MTK_MAC_COUNT; i++) {
1565 if (!test_bit(i, &restart))
1566 continue;
1567 err = mtk_open(eth->netdev[i]);
1568 if (err) {
1569 netif_alert(eth, ifup, eth->netdev[i],
1570 "Driver up/down cycle failed, closing device.\n");
1571 dev_close(eth->netdev[i]);
1572 }
John Crispin656e7052016-03-08 11:29:55 +01001573 }
1574 rtnl_unlock();
1575}
1576
Sean Wang8a8a9e82016-09-14 23:13:17 +08001577static int mtk_free_dev(struct mtk_eth *eth)
John Crispin656e7052016-03-08 11:29:55 +01001578{
1579 int i;
1580
1581 for (i = 0; i < MTK_MAC_COUNT; i++) {
John Crispin656e7052016-03-08 11:29:55 +01001582 if (!eth->netdev[i])
1583 continue;
John Crispin656e7052016-03-08 11:29:55 +01001584 free_netdev(eth->netdev[i]);
John Crispin656e7052016-03-08 11:29:55 +01001585 }
Sean Wang8a8a9e82016-09-14 23:13:17 +08001586
1587 return 0;
1588}
1589
1590static int mtk_unreg_dev(struct mtk_eth *eth)
1591{
1592 int i;
1593
1594 for (i = 0; i < MTK_MAC_COUNT; i++) {
1595 if (!eth->netdev[i])
1596 continue;
1597 unregister_netdev(eth->netdev[i]);
1598 }
1599
1600 return 0;
1601}
1602
1603static int mtk_cleanup(struct mtk_eth *eth)
1604{
1605 mtk_unreg_dev(eth);
1606 mtk_free_dev(eth);
John Crispin7c78b4a2016-04-08 00:54:10 +02001607 cancel_work_sync(&eth->pending_work);
John Crispin656e7052016-03-08 11:29:55 +01001608
1609 return 0;
1610}
1611
1612static int mtk_get_settings(struct net_device *dev,
1613 struct ethtool_cmd *cmd)
1614{
1615 struct mtk_mac *mac = netdev_priv(dev);
1616 int err;
1617
1618 err = phy_read_status(mac->phy_dev);
1619 if (err)
1620 return -ENODEV;
1621
1622 return phy_ethtool_gset(mac->phy_dev, cmd);
1623}
1624
1625static int mtk_set_settings(struct net_device *dev,
1626 struct ethtool_cmd *cmd)
1627{
1628 struct mtk_mac *mac = netdev_priv(dev);
1629
1630 if (cmd->phy_address != mac->phy_dev->mdio.addr) {
1631 mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
1632 cmd->phy_address);
1633 if (!mac->phy_dev)
1634 return -ENODEV;
1635 }
1636
1637 return phy_ethtool_sset(mac->phy_dev, cmd);
1638}
1639
1640static void mtk_get_drvinfo(struct net_device *dev,
1641 struct ethtool_drvinfo *info)
1642{
1643 struct mtk_mac *mac = netdev_priv(dev);
1644
1645 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
1646 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
1647 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
1648}
1649
1650static u32 mtk_get_msglevel(struct net_device *dev)
1651{
1652 struct mtk_mac *mac = netdev_priv(dev);
1653
1654 return mac->hw->msg_enable;
1655}
1656
1657static void mtk_set_msglevel(struct net_device *dev, u32 value)
1658{
1659 struct mtk_mac *mac = netdev_priv(dev);
1660
1661 mac->hw->msg_enable = value;
1662}
1663
1664static int mtk_nway_reset(struct net_device *dev)
1665{
1666 struct mtk_mac *mac = netdev_priv(dev);
1667
1668 return genphy_restart_aneg(mac->phy_dev);
1669}
1670
1671static u32 mtk_get_link(struct net_device *dev)
1672{
1673 struct mtk_mac *mac = netdev_priv(dev);
1674 int err;
1675
1676 err = genphy_update_link(mac->phy_dev);
1677 if (err)
1678 return ethtool_op_get_link(dev);
1679
1680 return mac->phy_dev->link;
1681}
1682
1683static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1684{
1685 int i;
1686
1687 switch (stringset) {
1688 case ETH_SS_STATS:
1689 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
1690 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
1691 data += ETH_GSTRING_LEN;
1692 }
1693 break;
1694 }
1695}
1696
1697static int mtk_get_sset_count(struct net_device *dev, int sset)
1698{
1699 switch (sset) {
1700 case ETH_SS_STATS:
1701 return ARRAY_SIZE(mtk_ethtool_stats);
1702 default:
1703 return -EOPNOTSUPP;
1704 }
1705}
1706
1707static void mtk_get_ethtool_stats(struct net_device *dev,
1708 struct ethtool_stats *stats, u64 *data)
1709{
1710 struct mtk_mac *mac = netdev_priv(dev);
1711 struct mtk_hw_stats *hwstats = mac->hw_stats;
1712 u64 *data_src, *data_dst;
1713 unsigned int start;
1714 int i;
1715
1716 if (netif_running(dev) && netif_device_present(dev)) {
1717 if (spin_trylock(&hwstats->stats_lock)) {
1718 mtk_stats_update_mac(mac);
1719 spin_unlock(&hwstats->stats_lock);
1720 }
1721 }
1722
1723 do {
Nelson Changbacfd112016-08-26 01:09:42 +08001724 data_src = (u64 *)hwstats;
John Crispin656e7052016-03-08 11:29:55 +01001725 data_dst = data;
1726 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
1727
1728 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
1729 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
1730 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
1731}
1732
Julia Lawall6a38cb12016-09-01 00:21:19 +02001733static const struct ethtool_ops mtk_ethtool_ops = {
John Crispin656e7052016-03-08 11:29:55 +01001734 .get_settings = mtk_get_settings,
1735 .set_settings = mtk_set_settings,
1736 .get_drvinfo = mtk_get_drvinfo,
1737 .get_msglevel = mtk_get_msglevel,
1738 .set_msglevel = mtk_set_msglevel,
1739 .nway_reset = mtk_nway_reset,
1740 .get_link = mtk_get_link,
1741 .get_strings = mtk_get_strings,
1742 .get_sset_count = mtk_get_sset_count,
1743 .get_ethtool_stats = mtk_get_ethtool_stats,
1744};
1745
1746static const struct net_device_ops mtk_netdev_ops = {
1747 .ndo_init = mtk_init,
1748 .ndo_uninit = mtk_uninit,
1749 .ndo_open = mtk_open,
1750 .ndo_stop = mtk_stop,
1751 .ndo_start_xmit = mtk_start_xmit,
1752 .ndo_set_mac_address = mtk_set_mac_address,
1753 .ndo_validate_addr = eth_validate_addr,
1754 .ndo_do_ioctl = mtk_do_ioctl,
1755 .ndo_change_mtu = eth_change_mtu,
1756 .ndo_tx_timeout = mtk_tx_timeout,
1757 .ndo_get_stats64 = mtk_get_stats64,
1758#ifdef CONFIG_NET_POLL_CONTROLLER
1759 .ndo_poll_controller = mtk_poll_controller,
1760#endif
1761};
1762
1763static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1764{
1765 struct mtk_mac *mac;
1766 const __be32 *_id = of_get_property(np, "reg", NULL);
1767 int id, err;
1768
1769 if (!_id) {
1770 dev_err(eth->dev, "missing mac id\n");
1771 return -EINVAL;
1772 }
1773
1774 id = be32_to_cpup(_id);
1775 if (id >= MTK_MAC_COUNT) {
1776 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1777 return -EINVAL;
1778 }
1779
1780 if (eth->netdev[id]) {
1781 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
1782 return -EINVAL;
1783 }
1784
1785 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1786 if (!eth->netdev[id]) {
1787 dev_err(eth->dev, "alloc_etherdev failed\n");
1788 return -ENOMEM;
1789 }
1790 mac = netdev_priv(eth->netdev[id]);
1791 eth->mac[id] = mac;
1792 mac->id = id;
1793 mac->hw = eth;
1794 mac->of_node = np;
John Crispin656e7052016-03-08 11:29:55 +01001795
1796 mac->hw_stats = devm_kzalloc(eth->dev,
1797 sizeof(*mac->hw_stats),
1798 GFP_KERNEL);
1799 if (!mac->hw_stats) {
1800 dev_err(eth->dev, "failed to allocate counter memory\n");
1801 err = -ENOMEM;
1802 goto free_netdev;
1803 }
1804 spin_lock_init(&mac->hw_stats->stats_lock);
sean.wang@mediatek.comd70056522016-08-13 19:16:18 +08001805 u64_stats_init(&mac->hw_stats->syncp);
John Crispin656e7052016-03-08 11:29:55 +01001806 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
1807
1808 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
John Crispineaadf9f2016-06-10 13:28:05 +02001809 eth->netdev[id]->watchdog_timeo = 5 * HZ;
John Crispin656e7052016-03-08 11:29:55 +01001810 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
1811 eth->netdev[id]->base_addr = (unsigned long)eth->base;
1812 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
1813 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
1814 eth->netdev[id]->features |= MTK_HW_FEATURES;
1815 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
1816
John Crispin80673022016-06-29 13:38:11 +02001817 eth->netdev[id]->irq = eth->irq[0];
John Crispin656e7052016-03-08 11:29:55 +01001818 return 0;
1819
1820free_netdev:
1821 free_netdev(eth->netdev[id]);
1822 return err;
1823}
1824
1825static int mtk_probe(struct platform_device *pdev)
1826{
1827 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1828 struct device_node *mac_np;
1829 const struct of_device_id *match;
1830 struct mtk_soc_data *soc;
1831 struct mtk_eth *eth;
1832 int err;
John Crispin80673022016-06-29 13:38:11 +02001833 int i;
John Crispin656e7052016-03-08 11:29:55 +01001834
John Crispin656e7052016-03-08 11:29:55 +01001835 match = of_match_device(of_mtk_match, &pdev->dev);
1836 soc = (struct mtk_soc_data *)match->data;
1837
1838 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
1839 if (!eth)
1840 return -ENOMEM;
1841
Sean Wang549e5492016-09-01 10:47:28 +08001842 eth->dev = &pdev->dev;
John Crispin656e7052016-03-08 11:29:55 +01001843 eth->base = devm_ioremap_resource(&pdev->dev, res);
Vladimir Zapolskiy621e49f2016-03-23 01:06:04 +02001844 if (IS_ERR(eth->base))
1845 return PTR_ERR(eth->base);
John Crispin656e7052016-03-08 11:29:55 +01001846
1847 spin_lock_init(&eth->page_lock);
John Crispin7bc9cce2016-06-29 13:38:10 +02001848 spin_lock_init(&eth->irq_lock);
John Crispin656e7052016-03-08 11:29:55 +01001849
1850 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1851 "mediatek,ethsys");
1852 if (IS_ERR(eth->ethsys)) {
1853 dev_err(&pdev->dev, "no ethsys regmap found\n");
1854 return PTR_ERR(eth->ethsys);
1855 }
1856
1857 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
1858 "mediatek,pctl");
1859 if (IS_ERR(eth->pctl)) {
1860 dev_err(&pdev->dev, "no pctl regmap found\n");
1861 return PTR_ERR(eth->pctl);
1862 }
1863
1864 eth->rstc = devm_reset_control_get(&pdev->dev, "eth");
1865 if (IS_ERR(eth->rstc)) {
1866 dev_err(&pdev->dev, "no eth reset found\n");
1867 return PTR_ERR(eth->rstc);
1868 }
1869
John Crispin80673022016-06-29 13:38:11 +02001870 for (i = 0; i < 3; i++) {
1871 eth->irq[i] = platform_get_irq(pdev, i);
1872 if (eth->irq[i] < 0) {
1873 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
1874 return -ENXIO;
1875 }
John Crispin656e7052016-03-08 11:29:55 +01001876 }
Sean Wang549e5492016-09-01 10:47:28 +08001877 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
1878 eth->clks[i] = devm_clk_get(eth->dev,
1879 mtk_clks_source_name[i]);
1880 if (IS_ERR(eth->clks[i])) {
1881 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
1882 return -EPROBE_DEFER;
1883 return -ENODEV;
1884 }
1885 }
John Crispin656e7052016-03-08 11:29:55 +01001886
John Crispin656e7052016-03-08 11:29:55 +01001887 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
John Crispin7c78b4a2016-04-08 00:54:10 +02001888 INIT_WORK(&eth->pending_work, mtk_pending_work);
John Crispin656e7052016-03-08 11:29:55 +01001889
1890 err = mtk_hw_init(eth);
1891 if (err)
1892 return err;
1893
1894 for_each_child_of_node(pdev->dev.of_node, mac_np) {
1895 if (!of_device_is_compatible(mac_np,
1896 "mediatek,eth-mac"))
1897 continue;
1898
1899 if (!of_device_is_available(mac_np))
1900 continue;
1901
1902 err = mtk_add_mac(eth, mac_np);
1903 if (err)
Sean Wang8a8a9e82016-09-14 23:13:17 +08001904 goto err_deinit_hw;
John Crispin656e7052016-03-08 11:29:55 +01001905 }
1906
Sean Wang85574db2016-09-14 23:13:15 +08001907 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
1908 dev_name(eth->dev), eth);
1909 if (err)
1910 goto err_free_dev;
1911
1912 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
1913 dev_name(eth->dev), eth);
1914 if (err)
1915 goto err_free_dev;
1916
1917 err = mtk_mdio_init(eth);
1918 if (err)
1919 goto err_free_dev;
1920
1921 for (i = 0; i < MTK_MAX_DEVS; i++) {
1922 if (!eth->netdev[i])
1923 continue;
1924
1925 err = register_netdev(eth->netdev[i]);
1926 if (err) {
1927 dev_err(eth->dev, "error bringing up device\n");
Sean Wang8a8a9e82016-09-14 23:13:17 +08001928 goto err_deinit_mdio;
Sean Wang85574db2016-09-14 23:13:15 +08001929 } else
1930 netif_info(eth, probe, eth->netdev[i],
1931 "mediatek frame engine at 0x%08lx, irq %d\n",
1932 eth->netdev[i]->base_addr, eth->irq[0]);
1933 }
1934
John Crispin656e7052016-03-08 11:29:55 +01001935 /* we run 2 devices on the same DMA ring so we need a dummy device
1936 * for NAPI to work
1937 */
1938 init_dummy_netdev(&eth->dummy_dev);
John Crispin80673022016-06-29 13:38:11 +02001939 netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
1940 MTK_NAPI_WEIGHT);
1941 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
John Crispin656e7052016-03-08 11:29:55 +01001942 MTK_NAPI_WEIGHT);
1943
1944 platform_set_drvdata(pdev, eth);
1945
1946 return 0;
1947
Sean Wang8a8a9e82016-09-14 23:13:17 +08001948err_deinit_mdio:
1949 mtk_mdio_cleanup(eth);
John Crispin656e7052016-03-08 11:29:55 +01001950err_free_dev:
Sean Wang8a8a9e82016-09-14 23:13:17 +08001951 mtk_free_dev(eth);
1952err_deinit_hw:
1953 mtk_hw_deinit(eth);
1954
John Crispin656e7052016-03-08 11:29:55 +01001955 return err;
1956}
1957
1958static int mtk_remove(struct platform_device *pdev)
1959{
1960 struct mtk_eth *eth = platform_get_drvdata(pdev);
Sean Wang79e9a412016-09-01 10:47:32 +08001961 int i;
John Crispin656e7052016-03-08 11:29:55 +01001962
Sean Wang79e9a412016-09-01 10:47:32 +08001963 /* stop all devices to make sure that dma is properly shut down */
1964 for (i = 0; i < MTK_MAC_COUNT; i++) {
1965 if (!eth->netdev[i])
1966 continue;
1967 mtk_stop(eth->netdev[i]);
1968 }
John Crispin656e7052016-03-08 11:29:55 +01001969
Sean Wangbf253fb2016-09-14 23:13:16 +08001970 mtk_hw_deinit(eth);
John Crispin656e7052016-03-08 11:29:55 +01001971
John Crispin80673022016-06-29 13:38:11 +02001972 netif_napi_del(&eth->tx_napi);
John Crispin656e7052016-03-08 11:29:55 +01001973 netif_napi_del(&eth->rx_napi);
1974 mtk_cleanup(eth);
John Crispin656e7052016-03-08 11:29:55 +01001975
1976 return 0;
1977}
1978
1979const struct of_device_id of_mtk_match[] = {
1980 { .compatible = "mediatek,mt7623-eth" },
1981 {},
1982};
1983
1984static struct platform_driver mtk_driver = {
1985 .probe = mtk_probe,
1986 .remove = mtk_remove,
1987 .driver = {
1988 .name = "mtk_soc_eth",
John Crispin656e7052016-03-08 11:29:55 +01001989 .of_match_table = of_mtk_match,
1990 },
1991};
1992
1993module_platform_driver(mtk_driver);
1994
1995MODULE_LICENSE("GPL");
1996MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
1997MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");