blob: 8e0e6dea3476d5e314c69af38d54392e2d0cfb96 [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030015#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
David S. Miller2d39d122014-08-25 20:21:55 -070023#include <linux/if_vlan.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030024#include <net/ip.h>
25#include <net/ipv6.h>
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +010026#include <linux/io.h>
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -030027#include <net/tso.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030028#include <linux/of.h>
29#include <linux/of_irq.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/of_address.h>
33#include <linux/phy.h>
Thomas Petazzoni189dd622012-11-19 14:15:25 +010034#include <linux/clk.h>
Maxime Ripardf8642882015-09-25 18:09:38 +020035#include <linux/cpu.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030036
37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53#define MVNETA_PORT_RX_RESET 0x1cc0
54#define MVNETA_PORT_RX_DMA_RESET BIT(0)
55#define MVNETA_PHY_ADDR 0x2000
56#define MVNETA_PHY_ADDR_MASK 0x1f
57#define MVNETA_MBUS_RETRY 0x2010
58#define MVNETA_UNIT_INTR_CAUSE 0x2080
59#define MVNETA_UNIT_CONTROL 0x20B0
60#define MVNETA_PHY_POLLING_ENABLE BIT(1)
61#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290
65#define MVNETA_PORT_CONFIG 0x2400
66#define MVNETA_UNI_PROMISC_MODE BIT(0)
67#define MVNETA_DEF_RXQ(q) ((q) << 1)
68#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
69#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
70#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
71#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
72#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
73#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
74#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
75 MVNETA_DEF_RXQ_ARP(q) | \
76 MVNETA_DEF_RXQ_TCP(q) | \
77 MVNETA_DEF_RXQ_UDP(q) | \
78 MVNETA_DEF_RXQ_BPDU(q) | \
79 MVNETA_TX_UNSET_ERR_SUM | \
80 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
81#define MVNETA_PORT_CONFIG_EXTEND 0x2404
82#define MVNETA_MAC_ADDR_LOW 0x2414
83#define MVNETA_MAC_ADDR_HIGH 0x2418
84#define MVNETA_SDMA_CONFIG 0x241c
85#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030086#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
87#define MVNETA_RX_NO_DATA_SWAP BIT(4)
88#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +020089#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030090#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
91#define MVNETA_PORT_STATUS 0x2444
92#define MVNETA_TX_IN_PRGRS BIT(1)
93#define MVNETA_TX_FIFO_EMPTY BIT(8)
94#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020095#define MVNETA_SERDES_CFG 0x24A0
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +020096#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020097#define MVNETA_QSGMII_SERDES_PROTO 0x0667
Thomas Petazzonic5aff182012-08-17 14:04:28 +030098#define MVNETA_TYPE_PRIO 0x24bc
99#define MVNETA_FORCE_UNI BIT(21)
100#define MVNETA_TXQ_CMD_1 0x24e4
101#define MVNETA_TXQ_CMD 0x2448
102#define MVNETA_TXQ_DISABLE_SHIFT 8
103#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
Andrew Lunne4839112015-10-22 18:37:36 +0100104#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
105#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
Stas Sergeev898b29702015-04-01 20:32:49 +0300106#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
107#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300108#define MVNETA_ACC_MODE 0x2500
109#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
110#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
111#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
112#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
willy tarreau40ba35e2014-01-16 08:20:10 +0100113
114/* Exception Interrupt Port/Queue Cause register */
115
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300116#define MVNETA_INTR_NEW_CAUSE 0x25a0
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300117#define MVNETA_INTR_NEW_MASK 0x25a4
willy tarreau40ba35e2014-01-16 08:20:10 +0100118
119/* bits 0..7 = TXQ SENT, one bit per queue.
120 * bits 8..15 = RXQ OCCUP, one bit per queue.
121 * bits 16..23 = RXQ FREE, one bit per queue.
122 * bit 29 = OLD_REG_SUM, see old reg ?
123 * bit 30 = TX_ERR_SUM, one bit for 4 ports
124 * bit 31 = MISC_SUM, one bit for 4 ports
125 */
126#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
127#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
128#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
129#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
Stas Sergeev898b29702015-04-01 20:32:49 +0300130#define MVNETA_MISCINTR_INTR_MASK BIT(31)
willy tarreau40ba35e2014-01-16 08:20:10 +0100131
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300132#define MVNETA_INTR_OLD_CAUSE 0x25a8
133#define MVNETA_INTR_OLD_MASK 0x25ac
willy tarreau40ba35e2014-01-16 08:20:10 +0100134
135/* Data Path Port/Queue Cause Register */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300136#define MVNETA_INTR_MISC_CAUSE 0x25b0
137#define MVNETA_INTR_MISC_MASK 0x25b4
willy tarreau40ba35e2014-01-16 08:20:10 +0100138
139#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
140#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
141#define MVNETA_CAUSE_PTP BIT(4)
142
143#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
144#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
145#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
146#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
147#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
148#define MVNETA_CAUSE_PRBS_ERR BIT(12)
149#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
150#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
151
152#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
153#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
154#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
155
156#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
157#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
158#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
159
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300160#define MVNETA_INTR_ENABLE 0x25b8
161#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
willy tarreau40ba35e2014-01-16 08:20:10 +0100162#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
163
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300164#define MVNETA_RXQ_CMD 0x2680
165#define MVNETA_RXQ_DISABLE_SHIFT 8
166#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
167#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
168#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
169#define MVNETA_GMAC_CTRL_0 0x2c00
170#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
171#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
172#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
173#define MVNETA_GMAC_CTRL_2 0x2c08
Stas Sergeev898b29702015-04-01 20:32:49 +0300174#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
Thomas Petazzonia79121d2014-03-26 00:25:41 +0100175#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300176#define MVNETA_GMAC2_PORT_RGMII BIT(4)
177#define MVNETA_GMAC2_PORT_RESET BIT(6)
178#define MVNETA_GMAC_STATUS 0x2c10
179#define MVNETA_GMAC_LINK_UP BIT(0)
180#define MVNETA_GMAC_SPEED_1000 BIT(1)
181#define MVNETA_GMAC_SPEED_100 BIT(2)
182#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
183#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
184#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
185#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
186#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
187#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
188#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
189#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
Stas Sergeev898b29702015-04-01 20:32:49 +0300190#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300191#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
192#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200193#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Stas Sergeev898b29702015-04-01 20:32:49 +0300194#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300195#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200196#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Andrew Lunne4839112015-10-22 18:37:36 +0100197#define MVNETA_MIB_COUNTERS_BASE 0x3000
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300198#define MVNETA_MIB_LATE_COLLISION 0x7c
199#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
200#define MVNETA_DA_FILT_OTH_MCAST 0x3500
201#define MVNETA_DA_FILT_UCAST_BASE 0x3600
202#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
203#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
204#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
205#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
206#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
207#define MVNETA_TXQ_DEC_SENT_SHIFT 16
208#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
209#define MVNETA_TXQ_SENT_DESC_SHIFT 16
210#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
211#define MVNETA_PORT_TX_RESET 0x3cf0
212#define MVNETA_PORT_TX_DMA_RESET BIT(0)
213#define MVNETA_TX_MTU 0x3e0c
214#define MVNETA_TX_TOKEN_SIZE 0x3e14
215#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
216#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
217#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
218
219#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
220
221/* Descriptor ring Macros */
222#define MVNETA_QUEUE_NEXT_DESC(q, index) \
223 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
224
225/* Various constants */
226
227/* Coalescing */
willy tarreauaebea2b2014-12-02 08:13:04 +0100228#define MVNETA_TXDONE_COAL_PKTS 1
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300229#define MVNETA_RX_COAL_PKTS 32
230#define MVNETA_RX_COAL_USEC 100
231
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100232/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300233 * by Marvell switches when a specific hardware mode is enabled (not
234 * supported by this driver) or is filled automatically by zeroes on
235 * the RX side. Those two bytes being at the front of the Ethernet
236 * header, they allow to have the IP header aligned on a 4 bytes
237 * boundary automatically: the hardware skips those two bytes on its
238 * own.
239 */
240#define MVNETA_MH_SIZE 2
241
242#define MVNETA_VLAN_TAG_LEN 4
243
244#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
245#define MVNETA_TX_CSUM_MAX_SIZE 9800
246#define MVNETA_ACC_MODE_EXT 1
247
248/* Timeout constants */
249#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
250#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
251#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
252
253#define MVNETA_TX_MTU_MAX 0x3ffff
254
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -0300255/* TSO header size */
256#define TSO_HEADER_SIZE 128
257
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300258/* Max number of Rx descriptors */
259#define MVNETA_MAX_RXD 128
260
261/* Max number of Tx descriptors */
262#define MVNETA_MAX_TXD 532
263
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300264/* Max number of allowed TCP segments for software TSO */
265#define MVNETA_MAX_TSO_SEGS 100
266
267#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
268
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300269/* descriptor aligned size */
270#define MVNETA_DESC_ALIGNED_SIZE 32
271
272#define MVNETA_RX_PKT_SIZE(mtu) \
273 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
274 ETH_HLEN + ETH_FCS_LEN, \
275 MVNETA_CPU_D_CACHE_LINE_SIZE)
276
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -0300277#define IS_TSO_HEADER(txq, addr) \
278 ((addr >= txq->tso_hdrs_phys) && \
279 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
280
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300281#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
282
Russell King9b0cdef2015-10-22 18:37:30 +0100283struct mvneta_statistic {
284 unsigned short offset;
285 unsigned short type;
286 const char name[ETH_GSTRING_LEN];
287};
288
289#define T_REG_32 32
290#define T_REG_64 64
291
292static const struct mvneta_statistic mvneta_statistics[] = {
293 { 0x3000, T_REG_64, "good_octets_received", },
294 { 0x3010, T_REG_32, "good_frames_received", },
295 { 0x3008, T_REG_32, "bad_octets_received", },
296 { 0x3014, T_REG_32, "bad_frames_received", },
297 { 0x3018, T_REG_32, "broadcast_frames_received", },
298 { 0x301c, T_REG_32, "multicast_frames_received", },
299 { 0x3050, T_REG_32, "unrec_mac_control_received", },
300 { 0x3058, T_REG_32, "good_fc_received", },
301 { 0x305c, T_REG_32, "bad_fc_received", },
302 { 0x3060, T_REG_32, "undersize_received", },
303 { 0x3064, T_REG_32, "fragments_received", },
304 { 0x3068, T_REG_32, "oversize_received", },
305 { 0x306c, T_REG_32, "jabber_received", },
306 { 0x3070, T_REG_32, "mac_receive_error", },
307 { 0x3074, T_REG_32, "bad_crc_event", },
308 { 0x3078, T_REG_32, "collision", },
309 { 0x307c, T_REG_32, "late_collision", },
310 { 0x2484, T_REG_32, "rx_discard", },
311 { 0x2488, T_REG_32, "rx_overrun", },
312 { 0x3020, T_REG_32, "frames_64_octets", },
313 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
314 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
315 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
316 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
317 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
318 { 0x3038, T_REG_64, "good_octets_sent", },
319 { 0x3040, T_REG_32, "good_frames_sent", },
320 { 0x3044, T_REG_32, "excessive_collision", },
321 { 0x3048, T_REG_32, "multicast_frames_sent", },
322 { 0x304c, T_REG_32, "broadcast_frames_sent", },
323 { 0x3054, T_REG_32, "fc_sent", },
324 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
325};
326
willy tarreau74c41b02014-01-16 08:20:08 +0100327struct mvneta_pcpu_stats {
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300328 struct u64_stats_sync syncp;
willy tarreau74c41b02014-01-16 08:20:08 +0100329 u64 rx_packets;
330 u64 rx_bytes;
331 u64 tx_packets;
332 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300333};
334
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200335struct mvneta_pcpu_port {
336 /* Pointer to the shared port */
337 struct mvneta_port *pp;
338
339 /* Pointer to the CPU-local NAPI struct */
340 struct napi_struct napi;
341
342 /* Cause of the previous interrupt */
343 u32 cause_rx_tx;
344};
345
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300346struct mvneta_port {
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200347 struct mvneta_pcpu_port __percpu *ports;
348 struct mvneta_pcpu_stats __percpu *stats;
349
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300350 int pkt_size;
willy tarreau8ec2cd42014-01-16 08:20:16 +0100351 unsigned int frag_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300352 void __iomem *base;
353 struct mvneta_rx_queue *rxqs;
354 struct mvneta_tx_queue *txqs;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300355 struct net_device *dev;
Maxime Ripardf8642882015-09-25 18:09:38 +0200356 struct notifier_block cpu_notifier;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300357
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300358 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100359 struct clk *clk;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300360 u8 mcast_count[256];
361 u16 tx_ring_size;
362 u16 rx_ring_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300363
364 struct mii_bus *mii_bus;
365 struct phy_device *phy_dev;
366 phy_interface_t phy_interface;
367 struct device_node *phy_node;
368 unsigned int link;
369 unsigned int duplex;
370 unsigned int speed;
Simon Guinotb65657f2015-06-30 16:20:22 +0200371 unsigned int tx_csum_limit;
Stas Sergeev898b29702015-04-01 20:32:49 +0300372 int use_inband_status:1;
Russell King9b0cdef2015-10-22 18:37:30 +0100373
374 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300375};
376
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100377/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300378 * layout of the transmit and reception DMA descriptors, and their
379 * layout is therefore defined by the hardware design
380 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200381
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300382#define MVNETA_TX_L3_OFF_SHIFT 0
383#define MVNETA_TX_IP_HLEN_SHIFT 8
384#define MVNETA_TX_L4_UDP BIT(16)
385#define MVNETA_TX_L3_IP6 BIT(17)
386#define MVNETA_TXD_IP_CSUM BIT(18)
387#define MVNETA_TXD_Z_PAD BIT(19)
388#define MVNETA_TXD_L_DESC BIT(20)
389#define MVNETA_TXD_F_DESC BIT(21)
390#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
391 MVNETA_TXD_L_DESC | \
392 MVNETA_TXD_F_DESC)
393#define MVNETA_TX_L4_CSUM_FULL BIT(30)
394#define MVNETA_TX_L4_CSUM_NOT BIT(31)
395
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300396#define MVNETA_RXD_ERR_CRC 0x0
397#define MVNETA_RXD_ERR_SUMMARY BIT(16)
398#define MVNETA_RXD_ERR_OVERRUN BIT(17)
399#define MVNETA_RXD_ERR_LEN BIT(18)
400#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
401#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
402#define MVNETA_RXD_L3_IP4 BIT(25)
403#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
404#define MVNETA_RXD_L4_CSUM_OK BIT(30)
405
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200406#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200407struct mvneta_tx_desc {
408 u32 command; /* Options used by HW for packet transmitting.*/
409 u16 reserverd1; /* csum_l4 (for future use) */
410 u16 data_size; /* Data size of transmitted packet in bytes */
411 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
412 u32 reserved2; /* hw_cmd - (for future use, PMT) */
413 u32 reserved3[4]; /* Reserved - (for future use) */
414};
415
416struct mvneta_rx_desc {
417 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300418 u16 reserved1; /* pnc_info - (for future use, PnC) */
419 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200420
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300421 u32 buf_phys_addr; /* Physical address of the buffer */
422 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200423
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300424 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
425 u16 reserved3; /* prefetch_cmd, for future use */
426 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200427
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300428 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
429 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
430};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200431#else
432struct mvneta_tx_desc {
433 u16 data_size; /* Data size of transmitted packet in bytes */
434 u16 reserverd1; /* csum_l4 (for future use) */
435 u32 command; /* Options used by HW for packet transmitting.*/
436 u32 reserved2; /* hw_cmd - (for future use, PMT) */
437 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
438 u32 reserved3[4]; /* Reserved - (for future use) */
439};
440
441struct mvneta_rx_desc {
442 u16 data_size; /* Size of received packet in bytes */
443 u16 reserved1; /* pnc_info - (for future use, PnC) */
444 u32 status; /* Info about received packet */
445
446 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
447 u32 buf_phys_addr; /* Physical address of the buffer */
448
449 u16 reserved4; /* csum_l4 - (for future use, PnC) */
450 u16 reserved3; /* prefetch_cmd, for future use */
451 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
452
453 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
454 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
455};
456#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300457
458struct mvneta_tx_queue {
459 /* Number of this TX queue, in the range 0-7 */
460 u8 id;
461
462 /* Number of TX DMA descriptors in the descriptor ring */
463 int size;
464
465 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100466 * descriptor ring
467 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300468 int count;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300469 int tx_stop_threshold;
470 int tx_wake_threshold;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300471
472 /* Array of transmitted skb */
473 struct sk_buff **tx_skb;
474
475 /* Index of last TX DMA descriptor that was inserted */
476 int txq_put_index;
477
478 /* Index of the TX DMA descriptor to be cleaned up */
479 int txq_get_index;
480
481 u32 done_pkts_coal;
482
483 /* Virtual address of the TX DMA descriptors array */
484 struct mvneta_tx_desc *descs;
485
486 /* DMA address of the TX DMA descriptors array */
487 dma_addr_t descs_phys;
488
489 /* Index of the last TX DMA descriptor */
490 int last_desc;
491
492 /* Index of the next TX DMA descriptor to process */
493 int next_desc_to_proc;
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -0300494
495 /* DMA buffers for TSO headers */
496 char *tso_hdrs;
497
498 /* DMA address of TSO headers */
499 dma_addr_t tso_hdrs_phys;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300500};
501
502struct mvneta_rx_queue {
503 /* rx queue number, in the range 0-7 */
504 u8 id;
505
506 /* num of rx descriptors in the rx descriptor ring */
507 int size;
508
509 /* counter of times when mvneta_refill() failed */
510 int missed;
511
512 u32 pkts_coal;
513 u32 time_coal;
514
515 /* Virtual address of the RX DMA descriptors array */
516 struct mvneta_rx_desc *descs;
517
518 /* DMA address of the RX DMA descriptors array */
519 dma_addr_t descs_phys;
520
521 /* Index of the last RX DMA descriptor */
522 int last_desc;
523
524 /* Index of the next RX DMA descriptor to process */
525 int next_desc_to_proc;
526};
527
Ezequiel Garciaedadb7f2014-05-22 20:07:01 -0300528/* The hardware supports eight (8) rx queues, but we are only allowing
529 * the first one to be used. Therefore, let's just allocate one queue.
530 */
Maxime Ripardd8936652015-09-25 18:09:37 +0200531static int rxq_number = 8;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300532static int txq_number = 8;
533
534static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300535
willy tarreauf19fadf2014-01-16 08:20:17 +0100536static int rx_copybreak __read_mostly = 256;
537
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300538#define MVNETA_DRIVER_NAME "mvneta"
539#define MVNETA_DRIVER_VERSION "1.0"
540
541/* Utility/helper methods */
542
543/* Write helper method */
544static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
545{
546 writel(data, pp->base + offset);
547}
548
549/* Read helper method */
550static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
551{
552 return readl(pp->base + offset);
553}
554
555/* Increment txq get counter */
556static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
557{
558 txq->txq_get_index++;
559 if (txq->txq_get_index == txq->size)
560 txq->txq_get_index = 0;
561}
562
563/* Increment txq put counter */
564static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
565{
566 txq->txq_put_index++;
567 if (txq->txq_put_index == txq->size)
568 txq->txq_put_index = 0;
569}
570
571
572/* Clear all MIB counters */
573static void mvneta_mib_counters_clear(struct mvneta_port *pp)
574{
575 int i;
576 u32 dummy;
577
578 /* Perform dummy reads from MIB counters */
579 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
580 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
Andrew Lunne4839112015-10-22 18:37:36 +0100581 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
582 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300583}
584
585/* Get System Network Statistics */
586struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
587 struct rtnl_link_stats64 *stats)
588{
589 struct mvneta_port *pp = netdev_priv(dev);
590 unsigned int start;
willy tarreau74c41b02014-01-16 08:20:08 +0100591 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300592
willy tarreau74c41b02014-01-16 08:20:08 +0100593 for_each_possible_cpu(cpu) {
594 struct mvneta_pcpu_stats *cpu_stats;
595 u64 rx_packets;
596 u64 rx_bytes;
597 u64 tx_packets;
598 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300599
willy tarreau74c41b02014-01-16 08:20:08 +0100600 cpu_stats = per_cpu_ptr(pp->stats, cpu);
601 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700602 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
willy tarreau74c41b02014-01-16 08:20:08 +0100603 rx_packets = cpu_stats->rx_packets;
604 rx_bytes = cpu_stats->rx_bytes;
605 tx_packets = cpu_stats->tx_packets;
606 tx_bytes = cpu_stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700607 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300608
willy tarreau74c41b02014-01-16 08:20:08 +0100609 stats->rx_packets += rx_packets;
610 stats->rx_bytes += rx_bytes;
611 stats->tx_packets += tx_packets;
612 stats->tx_bytes += tx_bytes;
613 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300614
615 stats->rx_errors = dev->stats.rx_errors;
616 stats->rx_dropped = dev->stats.rx_dropped;
617
618 stats->tx_dropped = dev->stats.tx_dropped;
619
620 return stats;
621}
622
623/* Rx descriptors helper methods */
624
willy tarreau54282132014-01-16 08:20:14 +0100625/* Checks whether the RX descriptor having this status is both the first
626 * and the last descriptor for the RX packet. Each RX packet is currently
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300627 * received through a single RX descriptor, so not having each RX
628 * descriptor with its first and last bits set is an error
629 */
willy tarreau54282132014-01-16 08:20:14 +0100630static int mvneta_rxq_desc_is_first_last(u32 status)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300631{
willy tarreau54282132014-01-16 08:20:14 +0100632 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300633 MVNETA_RXD_FIRST_LAST_DESC;
634}
635
636/* Add number of descriptors ready to receive new packets */
637static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
638 struct mvneta_rx_queue *rxq,
639 int ndescs)
640{
641 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100642 * be added at once
643 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300644 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
645 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
646 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
647 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
648 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
649 }
650
651 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
652 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
653}
654
655/* Get number of RX descriptors occupied by received packets */
656static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
657 struct mvneta_rx_queue *rxq)
658{
659 u32 val;
660
661 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
662 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
663}
664
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100665/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300666 * from mvneta_rxq_drop_pkts().
667 */
668static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
669 struct mvneta_rx_queue *rxq,
670 int rx_done, int rx_filled)
671{
672 u32 val;
673
674 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
675 val = rx_done |
676 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
677 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
678 return;
679 }
680
681 /* Only 255 descriptors can be added at once */
682 while ((rx_done > 0) || (rx_filled > 0)) {
683 if (rx_done <= 0xff) {
684 val = rx_done;
685 rx_done = 0;
686 } else {
687 val = 0xff;
688 rx_done -= 0xff;
689 }
690 if (rx_filled <= 0xff) {
691 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
692 rx_filled = 0;
693 } else {
694 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
695 rx_filled -= 0xff;
696 }
697 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
698 }
699}
700
701/* Get pointer to next RX descriptor to be processed by SW */
702static struct mvneta_rx_desc *
703mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
704{
705 int rx_desc = rxq->next_desc_to_proc;
706
707 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
willy tarreau34e41792014-01-16 08:20:15 +0100708 prefetch(rxq->descs + rxq->next_desc_to_proc);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300709 return rxq->descs + rx_desc;
710}
711
712/* Change maximum receive size of the port. */
713static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
714{
715 u32 val;
716
717 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
718 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
719 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
720 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
721 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
722}
723
724
725/* Set rx queue offset */
726static void mvneta_rxq_offset_set(struct mvneta_port *pp,
727 struct mvneta_rx_queue *rxq,
728 int offset)
729{
730 u32 val;
731
732 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
733 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
734
735 /* Offset is in */
736 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
737 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
738}
739
740
741/* Tx descriptors helper methods */
742
743/* Update HW with number of TX descriptors to be sent */
744static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
745 struct mvneta_tx_queue *txq,
746 int pend_desc)
747{
748 u32 val;
749
750 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100751 * process TX desriptors in quanta less than 256
752 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300753 val = pend_desc;
754 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
755}
756
757/* Get pointer to next TX descriptor to be processed (send) by HW */
758static struct mvneta_tx_desc *
759mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
760{
761 int tx_desc = txq->next_desc_to_proc;
762
763 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
764 return txq->descs + tx_desc;
765}
766
767/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100768 * mapping failures in the TX path.
769 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300770static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
771{
772 if (txq->next_desc_to_proc == 0)
773 txq->next_desc_to_proc = txq->last_desc - 1;
774 else
775 txq->next_desc_to_proc--;
776}
777
778/* Set rxq buf size */
779static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
780 struct mvneta_rx_queue *rxq,
781 int buf_size)
782{
783 u32 val;
784
785 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
786
787 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
788 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
789
790 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
791}
792
793/* Disable buffer management (BM) */
794static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
795 struct mvneta_rx_queue *rxq)
796{
797 u32 val;
798
799 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
800 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
801 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
802}
803
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300804/* Start the Ethernet port RX and TX activity */
805static void mvneta_port_up(struct mvneta_port *pp)
806{
807 int queue;
808 u32 q_map;
809
810 /* Enable all initialized TXs. */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300811 q_map = 0;
812 for (queue = 0; queue < txq_number; queue++) {
813 struct mvneta_tx_queue *txq = &pp->txqs[queue];
814 if (txq->descs != NULL)
815 q_map |= (1 << queue);
816 }
817 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
818
819 /* Enable all initialized RXQs. */
Maxime Ripardd8936652015-09-25 18:09:37 +0200820 mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300821}
822
823/* Stop the Ethernet port activity */
824static void mvneta_port_down(struct mvneta_port *pp)
825{
826 u32 val;
827 int count;
828
829 /* Stop Rx port activity. Check port Rx activity. */
830 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
831
832 /* Issue stop command for active channels only */
833 if (val != 0)
834 mvreg_write(pp, MVNETA_RXQ_CMD,
835 val << MVNETA_RXQ_DISABLE_SHIFT);
836
837 /* Wait for all Rx activity to terminate. */
838 count = 0;
839 do {
840 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
841 netdev_warn(pp->dev,
842 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
843 val);
844 break;
845 }
846 mdelay(1);
847
848 val = mvreg_read(pp, MVNETA_RXQ_CMD);
849 } while (val & 0xff);
850
851 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100852 * command for active channels only
853 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300854 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
855
856 if (val != 0)
857 mvreg_write(pp, MVNETA_TXQ_CMD,
858 (val << MVNETA_TXQ_DISABLE_SHIFT));
859
860 /* Wait for all Tx activity to terminate. */
861 count = 0;
862 do {
863 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
864 netdev_warn(pp->dev,
865 "TIMEOUT for TX stopped status=0x%08x\n",
866 val);
867 break;
868 }
869 mdelay(1);
870
871 /* Check TX Command reg that all Txqs are stopped */
872 val = mvreg_read(pp, MVNETA_TXQ_CMD);
873
874 } while (val & 0xff);
875
876 /* Double check to verify that TX FIFO is empty */
877 count = 0;
878 do {
879 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
880 netdev_warn(pp->dev,
881 "TX FIFO empty timeout status=0x08%x\n",
882 val);
883 break;
884 }
885 mdelay(1);
886
887 val = mvreg_read(pp, MVNETA_PORT_STATUS);
888 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
889 (val & MVNETA_TX_IN_PRGRS));
890
891 udelay(200);
892}
893
894/* Enable the port by setting the port enable bit of the MAC control register */
895static void mvneta_port_enable(struct mvneta_port *pp)
896{
897 u32 val;
898
899 /* Enable port */
900 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
901 val |= MVNETA_GMAC0_PORT_ENABLE;
902 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
903}
904
905/* Disable the port and wait for about 200 usec before retuning */
906static void mvneta_port_disable(struct mvneta_port *pp)
907{
908 u32 val;
909
910 /* Reset the Enable bit in the Serial Control Register */
911 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
912 val &= ~MVNETA_GMAC0_PORT_ENABLE;
913 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
914
915 udelay(200);
916}
917
918/* Multicast tables methods */
919
920/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
921static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
922{
923 int offset;
924 u32 val;
925
926 if (queue == -1) {
927 val = 0;
928 } else {
929 val = 0x1 | (queue << 1);
930 val |= (val << 24) | (val << 16) | (val << 8);
931 }
932
933 for (offset = 0; offset <= 0xc; offset += 4)
934 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
935}
936
937/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
938static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
939{
940 int offset;
941 u32 val;
942
943 if (queue == -1) {
944 val = 0;
945 } else {
946 val = 0x1 | (queue << 1);
947 val |= (val << 24) | (val << 16) | (val << 8);
948 }
949
950 for (offset = 0; offset <= 0xfc; offset += 4)
951 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
952
953}
954
955/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
956static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
957{
958 int offset;
959 u32 val;
960
961 if (queue == -1) {
962 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
963 val = 0;
964 } else {
965 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
966 val = 0x1 | (queue << 1);
967 val |= (val << 24) | (val << 16) | (val << 8);
968 }
969
970 for (offset = 0; offset <= 0xfc; offset += 4)
971 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
972}
973
974/* This method sets defaults to the NETA port:
975 * Clears interrupt Cause and Mask registers.
976 * Clears all MAC tables.
977 * Sets defaults to all registers.
978 * Resets RX and TX descriptor rings.
979 * Resets PHY.
980 * This method can be called after mvneta_port_down() to return the port
981 * settings to defaults.
982 */
983static void mvneta_defaults_set(struct mvneta_port *pp)
984{
985 int cpu;
986 int queue;
987 u32 val;
988
989 /* Clear all Cause registers */
990 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
991 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
992 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
993
994 /* Mask all interrupts */
995 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
996 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
997 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
998 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
999
1000 /* Enable MBUS Retry bit16 */
1001 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1002
1003 /* Set CPU queue access map - all CPUs have access to all RX
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001004 * queues and to all TX queues
1005 */
Maxime Ripard2502d0e2015-09-25 18:09:35 +02001006 for_each_present_cpu(cpu)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001007 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
1008 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
1009 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
1010
1011 /* Reset RX and TX DMAs */
1012 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1013 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1014
1015 /* Disable Legacy WRR, Disable EJP, Release from reset */
1016 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1017 for (queue = 0; queue < txq_number; queue++) {
1018 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1019 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1020 }
1021
1022 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1023 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1024
1025 /* Set Port Acceleration Mode */
1026 val = MVNETA_ACC_MODE_EXT;
1027 mvreg_write(pp, MVNETA_ACC_MODE, val);
1028
1029 /* Update val of portCfg register accordingly with all RxQueue types */
1030 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
1031 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1032
1033 val = 0;
1034 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1035 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1036
1037 /* Build PORT_SDMA_CONFIG_REG */
1038 val = 0;
1039
1040 /* Default burst size */
1041 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1042 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001043 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001044
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001045#if defined(__BIG_ENDIAN)
1046 val |= MVNETA_DESC_SWAP;
1047#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001048
1049 /* Assign port SDMA configuration */
1050 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1051
Thomas Petazzoni71408602013-09-04 16:21:18 +02001052 /* Disable PHY polling in hardware, since we're using the
1053 * kernel phylib to do this.
1054 */
1055 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1056 val &= ~MVNETA_PHY_POLLING_ENABLE;
1057 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1058
Stas Sergeev898b29702015-04-01 20:32:49 +03001059 if (pp->use_inband_status) {
1060 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1061 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1062 MVNETA_GMAC_FORCE_LINK_DOWN |
1063 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1064 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1065 MVNETA_GMAC_AN_SPEED_EN |
1066 MVNETA_GMAC_AN_DUPLEX_EN;
1067 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
Stas Sergeevaa5bc7a2015-12-02 20:33:56 +03001068
Stas Sergeev898b29702015-04-01 20:32:49 +03001069 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1070 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1071 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
Stas Sergeevaa5bc7a2015-12-02 20:33:56 +03001072
1073 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1074 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1075 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
Stas Sergeev538761b2015-06-18 18:36:03 +03001076 } else {
1077 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1078 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1079 MVNETA_GMAC_AN_SPEED_EN |
1080 MVNETA_GMAC_AN_DUPLEX_EN);
1081 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
Stas Sergeevaa5bc7a2015-12-02 20:33:56 +03001082
1083 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1084 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1085 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1086
1087 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1088 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1089 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
Stas Sergeev898b29702015-04-01 20:32:49 +03001090 }
1091
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001092 mvneta_set_ucast_table(pp, -1);
1093 mvneta_set_special_mcast_table(pp, -1);
1094 mvneta_set_other_mcast_table(pp, -1);
1095
1096 /* Set port interrupt enable register - default enable all */
1097 mvreg_write(pp, MVNETA_INTR_ENABLE,
1098 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1099 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
Andrew Lunne4839112015-10-22 18:37:36 +01001100
1101 mvneta_mib_counters_clear(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001102}
1103
1104/* Set max sizes for tx queues */
1105static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1106
1107{
1108 u32 val, size, mtu;
1109 int queue;
1110
1111 mtu = max_tx_size * 8;
1112 if (mtu > MVNETA_TX_MTU_MAX)
1113 mtu = MVNETA_TX_MTU_MAX;
1114
1115 /* Set MTU */
1116 val = mvreg_read(pp, MVNETA_TX_MTU);
1117 val &= ~MVNETA_TX_MTU_MAX;
1118 val |= mtu;
1119 mvreg_write(pp, MVNETA_TX_MTU, val);
1120
1121 /* TX token size and all TXQs token size must be larger that MTU */
1122 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1123
1124 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1125 if (size < mtu) {
1126 size = mtu;
1127 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1128 val |= size;
1129 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1130 }
1131 for (queue = 0; queue < txq_number; queue++) {
1132 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1133
1134 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1135 if (size < mtu) {
1136 size = mtu;
1137 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1138 val |= size;
1139 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1140 }
1141 }
1142}
1143
1144/* Set unicast address */
1145static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1146 int queue)
1147{
1148 unsigned int unicast_reg;
1149 unsigned int tbl_offset;
1150 unsigned int reg_offset;
1151
1152 /* Locate the Unicast table entry */
1153 last_nibble = (0xf & last_nibble);
1154
1155 /* offset from unicast tbl base */
1156 tbl_offset = (last_nibble / 4) * 4;
1157
1158 /* offset within the above reg */
1159 reg_offset = last_nibble % 4;
1160
1161 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1162
1163 if (queue == -1) {
1164 /* Clear accepts frame bit at specified unicast DA tbl entry */
1165 unicast_reg &= ~(0xff << (8 * reg_offset));
1166 } else {
1167 unicast_reg &= ~(0xff << (8 * reg_offset));
1168 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1169 }
1170
1171 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1172}
1173
1174/* Set mac address */
1175static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1176 int queue)
1177{
1178 unsigned int mac_h;
1179 unsigned int mac_l;
1180
1181 if (queue != -1) {
1182 mac_l = (addr[4] << 8) | (addr[5]);
1183 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1184 (addr[2] << 8) | (addr[3] << 0);
1185
1186 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1187 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1188 }
1189
1190 /* Accept frames of this address */
1191 mvneta_set_ucast_addr(pp, addr[5], queue);
1192}
1193
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001194/* Set the number of packets that will be received before RX interrupt
1195 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001196 */
1197static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1198 struct mvneta_rx_queue *rxq, u32 value)
1199{
1200 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1201 value | MVNETA_RXQ_NON_OCCUPIED(0));
1202 rxq->pkts_coal = value;
1203}
1204
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001205/* Set the time delay in usec before RX interrupt will be generated by
1206 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001207 */
1208static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1209 struct mvneta_rx_queue *rxq, u32 value)
1210{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001211 u32 val;
1212 unsigned long clk_rate;
1213
1214 clk_rate = clk_get_rate(pp->clk);
1215 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001216
1217 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1218 rxq->time_coal = value;
1219}
1220
1221/* Set threshold for TX_DONE pkts coalescing */
1222static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1223 struct mvneta_tx_queue *txq, u32 value)
1224{
1225 u32 val;
1226
1227 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1228
1229 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1230 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1231
1232 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1233
1234 txq->done_pkts_coal = value;
1235}
1236
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001237/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1238static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1239 u32 phys_addr, u32 cookie)
1240{
1241 rx_desc->buf_cookie = cookie;
1242 rx_desc->buf_phys_addr = phys_addr;
1243}
1244
1245/* Decrement sent descriptors counter */
1246static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1247 struct mvneta_tx_queue *txq,
1248 int sent_desc)
1249{
1250 u32 val;
1251
1252 /* Only 255 TX descriptors can be updated at once */
1253 while (sent_desc > 0xff) {
1254 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1255 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1256 sent_desc = sent_desc - 0xff;
1257 }
1258
1259 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1260 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1261}
1262
1263/* Get number of TX descriptors already sent by HW */
1264static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1265 struct mvneta_tx_queue *txq)
1266{
1267 u32 val;
1268 int sent_desc;
1269
1270 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1271 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1272 MVNETA_TXQ_SENT_DESC_SHIFT;
1273
1274 return sent_desc;
1275}
1276
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001277/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001278 * The number of sent descriptors is returned.
1279 */
1280static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1281 struct mvneta_tx_queue *txq)
1282{
1283 int sent_desc;
1284
1285 /* Get number of sent descriptors */
1286 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1287
1288 /* Decrement sent descriptors counter */
1289 if (sent_desc)
1290 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1291
1292 return sent_desc;
1293}
1294
1295/* Set TXQ descriptors fields relevant for CSUM calculation */
1296static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1297 int ip_hdr_len, int l4_proto)
1298{
1299 u32 command;
1300
1301 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001302 * G_L4_chk, L4_type; required only for checksum
1303 * calculation
1304 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001305 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1306 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1307
Thomas Fitzsimmons0a198582014-07-08 19:44:07 -04001308 if (l3_proto == htons(ETH_P_IP))
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001309 command |= MVNETA_TXD_IP_CSUM;
1310 else
1311 command |= MVNETA_TX_L3_IP6;
1312
1313 if (l4_proto == IPPROTO_TCP)
1314 command |= MVNETA_TX_L4_CSUM_FULL;
1315 else if (l4_proto == IPPROTO_UDP)
1316 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1317 else
1318 command |= MVNETA_TX_L4_CSUM_NOT;
1319
1320 return command;
1321}
1322
1323
1324/* Display more error info */
1325static void mvneta_rx_error(struct mvneta_port *pp,
1326 struct mvneta_rx_desc *rx_desc)
1327{
1328 u32 status = rx_desc->status;
1329
willy tarreau54282132014-01-16 08:20:14 +01001330 if (!mvneta_rxq_desc_is_first_last(status)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001331 netdev_err(pp->dev,
1332 "bad rx status %08x (buffer oversize), size=%d\n",
willy tarreau54282132014-01-16 08:20:14 +01001333 status, rx_desc->data_size);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001334 return;
1335 }
1336
1337 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1338 case MVNETA_RXD_ERR_CRC:
1339 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1340 status, rx_desc->data_size);
1341 break;
1342 case MVNETA_RXD_ERR_OVERRUN:
1343 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1344 status, rx_desc->data_size);
1345 break;
1346 case MVNETA_RXD_ERR_LEN:
1347 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1348 status, rx_desc->data_size);
1349 break;
1350 case MVNETA_RXD_ERR_RESOURCE:
1351 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1352 status, rx_desc->data_size);
1353 break;
1354 }
1355}
1356
willy tarreau54282132014-01-16 08:20:14 +01001357/* Handle RX checksum offload based on the descriptor's status */
1358static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001359 struct sk_buff *skb)
1360{
willy tarreau54282132014-01-16 08:20:14 +01001361 if ((status & MVNETA_RXD_L3_IP4) &&
1362 (status & MVNETA_RXD_L4_CSUM_OK)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001363 skb->csum = 0;
1364 skb->ip_summed = CHECKSUM_UNNECESSARY;
1365 return;
1366 }
1367
1368 skb->ip_summed = CHECKSUM_NONE;
1369}
1370
willy tarreau6c498972014-01-16 08:20:12 +01001371/* Return tx queue pointer (find last set bit) according to <cause> returned
1372 * form tx_done reg. <cause> must not be null. The return value is always a
1373 * valid queue for matching the first one found in <cause>.
1374 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001375static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1376 u32 cause)
1377{
1378 int queue = fls(cause) - 1;
1379
willy tarreau6c498972014-01-16 08:20:12 +01001380 return &pp->txqs[queue];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001381}
1382
1383/* Free tx queue skbuffs */
1384static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1385 struct mvneta_tx_queue *txq, int num)
1386{
1387 int i;
1388
1389 for (i = 0; i < num; i++) {
1390 struct mvneta_tx_desc *tx_desc = txq->descs +
1391 txq->txq_get_index;
1392 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1393
1394 mvneta_txq_inc_get(txq);
1395
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001396 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1397 dma_unmap_single(pp->dev->dev.parent,
1398 tx_desc->buf_phys_addr,
1399 tx_desc->data_size, DMA_TO_DEVICE);
Ezequiel Garciaba7e46e2014-05-30 13:40:06 -03001400 if (!skb)
1401 continue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001402 dev_kfree_skb_any(skb);
1403 }
1404}
1405
1406/* Handle end of transmission */
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001407static void mvneta_txq_done(struct mvneta_port *pp,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001408 struct mvneta_tx_queue *txq)
1409{
1410 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1411 int tx_done;
1412
1413 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001414 if (!tx_done)
1415 return;
1416
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001417 mvneta_txq_bufs_free(pp, txq, tx_done);
1418
1419 txq->count -= tx_done;
1420
1421 if (netif_tx_queue_stopped(nq)) {
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001422 if (txq->count <= txq->tx_wake_threshold)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001423 netif_tx_wake_queue(nq);
1424 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001425}
1426
willy tarreau8ec2cd42014-01-16 08:20:16 +01001427static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1428{
1429 if (likely(pp->frag_size <= PAGE_SIZE))
1430 return netdev_alloc_frag(pp->frag_size);
1431 else
1432 return kmalloc(pp->frag_size, GFP_ATOMIC);
1433}
1434
1435static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1436{
1437 if (likely(pp->frag_size <= PAGE_SIZE))
Alexander Duyck13dc0d22015-05-06 21:12:14 -07001438 skb_free_frag(data);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001439 else
1440 kfree(data);
1441}
1442
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001443/* Refill processing */
1444static int mvneta_rx_refill(struct mvneta_port *pp,
1445 struct mvneta_rx_desc *rx_desc)
1446
1447{
1448 dma_addr_t phys_addr;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001449 void *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001450
willy tarreau8ec2cd42014-01-16 08:20:16 +01001451 data = mvneta_frag_alloc(pp);
1452 if (!data)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001453 return -ENOMEM;
1454
willy tarreau8ec2cd42014-01-16 08:20:16 +01001455 phys_addr = dma_map_single(pp->dev->dev.parent, data,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001456 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1457 DMA_FROM_DEVICE);
1458 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
willy tarreau8ec2cd42014-01-16 08:20:16 +01001459 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001460 return -ENOMEM;
1461 }
1462
willy tarreau8ec2cd42014-01-16 08:20:16 +01001463 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001464 return 0;
1465}
1466
1467/* Handle tx checksum */
1468static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1469{
1470 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1471 int ip_hdr_len = 0;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001472 __be16 l3_proto = vlan_get_protocol(skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001473 u8 l4_proto;
1474
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001475 if (l3_proto == htons(ETH_P_IP)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001476 struct iphdr *ip4h = ip_hdr(skb);
1477
1478 /* Calculate IPv4 checksum and L4 checksum */
1479 ip_hdr_len = ip4h->ihl;
1480 l4_proto = ip4h->protocol;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001481 } else if (l3_proto == htons(ETH_P_IPV6)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001482 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1483
1484 /* Read l4_protocol from one of IPv6 extra headers */
1485 if (skb_network_header_len(skb) > 0)
1486 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1487 l4_proto = ip6h->nexthdr;
1488 } else
1489 return MVNETA_TX_L4_CSUM_NOT;
1490
1491 return mvneta_txq_desc_csum(skb_network_offset(skb),
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001492 l3_proto, ip_hdr_len, l4_proto);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001493 }
1494
1495 return MVNETA_TX_L4_CSUM_NOT;
1496}
1497
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001498/* Drop packets received by the RXQ and free buffers */
1499static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1500 struct mvneta_rx_queue *rxq)
1501{
1502 int rx_done, i;
1503
1504 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1505 for (i = 0; i < rxq->size; i++) {
1506 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001507 void *data = (void *)rx_desc->buf_cookie;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001508
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001509 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001510 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Justin Maggard8c94ddb2015-11-09 17:21:05 -08001511 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001512 }
1513
1514 if (rx_done)
1515 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1516}
1517
1518/* Main rx processing */
1519static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1520 struct mvneta_rx_queue *rxq)
1521{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001522 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001523 struct net_device *dev = pp->dev;
Simon Guinota84e3282015-07-19 13:00:53 +02001524 int rx_done;
willy tarreaudc4277d2014-01-16 08:20:07 +01001525 u32 rcvd_pkts = 0;
1526 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001527
1528 /* Get number of received packets */
1529 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1530
1531 if (rx_todo > rx_done)
1532 rx_todo = rx_done;
1533
1534 rx_done = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001535
1536 /* Fairness NAPI loop */
1537 while (rx_done < rx_todo) {
1538 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1539 struct sk_buff *skb;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001540 unsigned char *data;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001541 dma_addr_t phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001542 u32 rx_status;
1543 int rx_bytes, err;
1544
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001545 rx_done++;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001546 rx_status = rx_desc->status;
willy tarreauf19fadf2014-01-16 08:20:17 +01001547 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001548 data = (unsigned char *)rx_desc->buf_cookie;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001549 phys_addr = rx_desc->buf_phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001550
willy tarreau54282132014-01-16 08:20:14 +01001551 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
willy tarreauf19fadf2014-01-16 08:20:17 +01001552 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1553 err_drop_frame:
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001554 dev->stats.rx_errors++;
1555 mvneta_rx_error(pp, rx_desc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001556 /* leave the descriptor untouched */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001557 continue;
1558 }
1559
willy tarreauf19fadf2014-01-16 08:20:17 +01001560 if (rx_bytes <= rx_copybreak) {
1561 /* better copy a small frame and not unmap the DMA region */
1562 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1563 if (unlikely(!skb))
1564 goto err_drop_frame;
1565
1566 dma_sync_single_range_for_cpu(dev->dev.parent,
1567 rx_desc->buf_phys_addr,
1568 MVNETA_MH_SIZE + NET_SKB_PAD,
1569 rx_bytes,
1570 DMA_FROM_DEVICE);
1571 memcpy(skb_put(skb, rx_bytes),
1572 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1573 rx_bytes);
1574
1575 skb->protocol = eth_type_trans(skb, dev);
1576 mvneta_rx_csum(pp, rx_status, skb);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001577 napi_gro_receive(&port->napi, skb);
willy tarreauf19fadf2014-01-16 08:20:17 +01001578
1579 rcvd_pkts++;
1580 rcvd_bytes += rx_bytes;
1581
1582 /* leave the descriptor and buffer untouched */
1583 continue;
1584 }
1585
Simon Guinota84e3282015-07-19 13:00:53 +02001586 /* Refill processing */
1587 err = mvneta_rx_refill(pp, rx_desc);
1588 if (err) {
1589 netdev_err(dev, "Linux processing - Can't refill\n");
1590 rxq->missed++;
1591 goto err_drop_frame;
1592 }
1593
willy tarreauf19fadf2014-01-16 08:20:17 +01001594 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1595 if (!skb)
1596 goto err_drop_frame;
1597
Simon Guinotdaf158d2015-09-15 22:41:21 +02001598 dma_unmap_single(dev->dev.parent, phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001599 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001600
willy tarreaudc4277d2014-01-16 08:20:07 +01001601 rcvd_pkts++;
1602 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001603
1604 /* Linux processing */
willy tarreau8ec2cd42014-01-16 08:20:16 +01001605 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001606 skb_put(skb, rx_bytes);
1607
1608 skb->protocol = eth_type_trans(skb, dev);
1609
willy tarreau54282132014-01-16 08:20:14 +01001610 mvneta_rx_csum(pp, rx_status, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001611
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001612 napi_gro_receive(&port->napi, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001613 }
1614
willy tarreaudc4277d2014-01-16 08:20:07 +01001615 if (rcvd_pkts) {
willy tarreau74c41b02014-01-16 08:20:08 +01001616 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1617
1618 u64_stats_update_begin(&stats->syncp);
1619 stats->rx_packets += rcvd_pkts;
1620 stats->rx_bytes += rcvd_bytes;
1621 u64_stats_update_end(&stats->syncp);
willy tarreaudc4277d2014-01-16 08:20:07 +01001622 }
1623
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001624 /* Update rxq management counters */
Simon Guinota84e3282015-07-19 13:00:53 +02001625 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001626
1627 return rx_done;
1628}
1629
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03001630static inline void
1631mvneta_tso_put_hdr(struct sk_buff *skb,
1632 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1633{
1634 struct mvneta_tx_desc *tx_desc;
1635 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1636
1637 txq->tx_skb[txq->txq_put_index] = NULL;
1638 tx_desc = mvneta_txq_next_desc_get(txq);
1639 tx_desc->data_size = hdr_len;
1640 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1641 tx_desc->command |= MVNETA_TXD_F_DESC;
1642 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1643 txq->txq_put_index * TSO_HEADER_SIZE;
1644 mvneta_txq_inc_put(txq);
1645}
1646
1647static inline int
1648mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1649 struct sk_buff *skb, char *data, int size,
1650 bool last_tcp, bool is_last)
1651{
1652 struct mvneta_tx_desc *tx_desc;
1653
1654 tx_desc = mvneta_txq_next_desc_get(txq);
1655 tx_desc->data_size = size;
1656 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1657 size, DMA_TO_DEVICE);
1658 if (unlikely(dma_mapping_error(dev->dev.parent,
1659 tx_desc->buf_phys_addr))) {
1660 mvneta_txq_desc_put(txq);
1661 return -ENOMEM;
1662 }
1663
1664 tx_desc->command = 0;
1665 txq->tx_skb[txq->txq_put_index] = NULL;
1666
1667 if (last_tcp) {
1668 /* last descriptor in the TCP packet */
1669 tx_desc->command = MVNETA_TXD_L_DESC;
1670
1671 /* last descriptor in SKB */
1672 if (is_last)
1673 txq->tx_skb[txq->txq_put_index] = skb;
1674 }
1675 mvneta_txq_inc_put(txq);
1676 return 0;
1677}
1678
1679static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1680 struct mvneta_tx_queue *txq)
1681{
1682 int total_len, data_left;
1683 int desc_count = 0;
1684 struct mvneta_port *pp = netdev_priv(dev);
1685 struct tso_t tso;
1686 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1687 int i;
1688
1689 /* Count needed descriptors */
1690 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1691 return 0;
1692
1693 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1694 pr_info("*** Is this even possible???!?!?\n");
1695 return 0;
1696 }
1697
1698 /* Initialize the TSO handler, and prepare the first payload */
1699 tso_start(skb, &tso);
1700
1701 total_len = skb->len - hdr_len;
1702 while (total_len > 0) {
1703 char *hdr;
1704
1705 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1706 total_len -= data_left;
1707 desc_count++;
1708
1709 /* prepare packet headers: MAC + IP + TCP */
1710 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1711 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1712
1713 mvneta_tso_put_hdr(skb, pp, txq);
1714
1715 while (data_left > 0) {
1716 int size;
1717 desc_count++;
1718
1719 size = min_t(int, tso.size, data_left);
1720
1721 if (mvneta_tso_put_data(dev, txq, skb,
1722 tso.data, size,
1723 size == data_left,
1724 total_len == 0))
1725 goto err_release;
1726 data_left -= size;
1727
1728 tso_build_data(skb, &tso, size);
1729 }
1730 }
1731
1732 return desc_count;
1733
1734err_release:
1735 /* Release all used data descriptors; header descriptors must not
1736 * be DMA-unmapped.
1737 */
1738 for (i = desc_count - 1; i >= 0; i--) {
1739 struct mvneta_tx_desc *tx_desc = txq->descs + i;
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001740 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03001741 dma_unmap_single(pp->dev->dev.parent,
1742 tx_desc->buf_phys_addr,
1743 tx_desc->data_size,
1744 DMA_TO_DEVICE);
1745 mvneta_txq_desc_put(txq);
1746 }
1747 return 0;
1748}
1749
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001750/* Handle tx fragmentation processing */
1751static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1752 struct mvneta_tx_queue *txq)
1753{
1754 struct mvneta_tx_desc *tx_desc;
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001755 int i, nr_frags = skb_shinfo(skb)->nr_frags;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001756
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001757 for (i = 0; i < nr_frags; i++) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001758 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1759 void *addr = page_address(frag->page.p) + frag->page_offset;
1760
1761 tx_desc = mvneta_txq_next_desc_get(txq);
1762 tx_desc->data_size = frag->size;
1763
1764 tx_desc->buf_phys_addr =
1765 dma_map_single(pp->dev->dev.parent, addr,
1766 tx_desc->data_size, DMA_TO_DEVICE);
1767
1768 if (dma_mapping_error(pp->dev->dev.parent,
1769 tx_desc->buf_phys_addr)) {
1770 mvneta_txq_desc_put(txq);
1771 goto error;
1772 }
1773
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001774 if (i == nr_frags - 1) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001775 /* Last descriptor */
1776 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001777 txq->tx_skb[txq->txq_put_index] = skb;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001778 } else {
1779 /* Descriptor in the middle: Not First, Not Last */
1780 tx_desc->command = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001781 txq->tx_skb[txq->txq_put_index] = NULL;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001782 }
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001783 mvneta_txq_inc_put(txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001784 }
1785
1786 return 0;
1787
1788error:
1789 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001790 * this packet, as well as the corresponding DMA mappings
1791 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001792 for (i = i - 1; i >= 0; i--) {
1793 tx_desc = txq->descs + i;
1794 dma_unmap_single(pp->dev->dev.parent,
1795 tx_desc->buf_phys_addr,
1796 tx_desc->data_size,
1797 DMA_TO_DEVICE);
1798 mvneta_txq_desc_put(txq);
1799 }
1800
1801 return -ENOMEM;
1802}
1803
1804/* Main tx processing */
1805static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1806{
1807 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02001808 u16 txq_id = skb_get_queue_mapping(skb);
1809 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001810 struct mvneta_tx_desc *tx_desc;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001811 int len = skb->len;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001812 int frags = 0;
1813 u32 tx_cmd;
1814
1815 if (!netif_running(dev))
1816 goto out;
1817
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03001818 if (skb_is_gso(skb)) {
1819 frags = mvneta_tx_tso(skb, dev, txq);
1820 goto out;
1821 }
1822
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001823 frags = skb_shinfo(skb)->nr_frags + 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001824
1825 /* Get a descriptor for the first part of the packet */
1826 tx_desc = mvneta_txq_next_desc_get(txq);
1827
1828 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1829
1830 tx_desc->data_size = skb_headlen(skb);
1831
1832 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1833 tx_desc->data_size,
1834 DMA_TO_DEVICE);
1835 if (unlikely(dma_mapping_error(dev->dev.parent,
1836 tx_desc->buf_phys_addr))) {
1837 mvneta_txq_desc_put(txq);
1838 frags = 0;
1839 goto out;
1840 }
1841
1842 if (frags == 1) {
1843 /* First and Last descriptor */
1844 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1845 tx_desc->command = tx_cmd;
1846 txq->tx_skb[txq->txq_put_index] = skb;
1847 mvneta_txq_inc_put(txq);
1848 } else {
1849 /* First but not Last */
1850 tx_cmd |= MVNETA_TXD_F_DESC;
1851 txq->tx_skb[txq->txq_put_index] = NULL;
1852 mvneta_txq_inc_put(txq);
1853 tx_desc->command = tx_cmd;
1854 /* Continue with other skb fragments */
1855 if (mvneta_tx_frag_process(pp, skb, txq)) {
1856 dma_unmap_single(dev->dev.parent,
1857 tx_desc->buf_phys_addr,
1858 tx_desc->data_size,
1859 DMA_TO_DEVICE);
1860 mvneta_txq_desc_put(txq);
1861 frags = 0;
1862 goto out;
1863 }
1864 }
1865
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001866out:
1867 if (frags > 0) {
willy tarreau74c41b02014-01-16 08:20:08 +01001868 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001869 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1870
1871 txq->count += frags;
1872 mvneta_txq_pend_desc_add(pp, txq, frags);
1873
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001874 if (txq->count >= txq->tx_stop_threshold)
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001875 netif_tx_stop_queue(nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001876
willy tarreau74c41b02014-01-16 08:20:08 +01001877 u64_stats_update_begin(&stats->syncp);
1878 stats->tx_packets++;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001879 stats->tx_bytes += len;
willy tarreau74c41b02014-01-16 08:20:08 +01001880 u64_stats_update_end(&stats->syncp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001881 } else {
1882 dev->stats.tx_dropped++;
1883 dev_kfree_skb_any(skb);
1884 }
1885
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001886 return NETDEV_TX_OK;
1887}
1888
1889
1890/* Free tx resources, when resetting a port */
1891static void mvneta_txq_done_force(struct mvneta_port *pp,
1892 struct mvneta_tx_queue *txq)
1893
1894{
1895 int tx_done = txq->count;
1896
1897 mvneta_txq_bufs_free(pp, txq, tx_done);
1898
1899 /* reset txq */
1900 txq->count = 0;
1901 txq->txq_put_index = 0;
1902 txq->txq_get_index = 0;
1903}
1904
willy tarreau6c498972014-01-16 08:20:12 +01001905/* Handle tx done - called in softirq context. The <cause_tx_done> argument
1906 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1907 */
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001908static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001909{
1910 struct mvneta_tx_queue *txq;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001911 struct netdev_queue *nq;
1912
willy tarreau6c498972014-01-16 08:20:12 +01001913 while (cause_tx_done) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001914 txq = mvneta_tx_done_policy(pp, cause_tx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001915
1916 nq = netdev_get_tx_queue(pp->dev, txq->id);
1917 __netif_tx_lock(nq, smp_processor_id());
1918
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001919 if (txq->count)
1920 mvneta_txq_done(pp, txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001921
1922 __netif_tx_unlock(nq);
1923 cause_tx_done &= ~((1 << txq->id));
1924 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001925}
1926
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001927/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001928 * according to hw spec, different than generic crc8 algorithm
1929 */
1930static int mvneta_addr_crc(unsigned char *addr)
1931{
1932 int crc = 0;
1933 int i;
1934
1935 for (i = 0; i < ETH_ALEN; i++) {
1936 int j;
1937
1938 crc = (crc ^ addr[i]) << 8;
1939 for (j = 7; j >= 0; j--) {
1940 if (crc & (0x100 << j))
1941 crc ^= 0x107 << j;
1942 }
1943 }
1944
1945 return crc;
1946}
1947
1948/* This method controls the net device special MAC multicast support.
1949 * The Special Multicast Table for MAC addresses supports MAC of the form
1950 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1951 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1952 * Table entries in the DA-Filter table. This method set the Special
1953 * Multicast Table appropriate entry.
1954 */
1955static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1956 unsigned char last_byte,
1957 int queue)
1958{
1959 unsigned int smc_table_reg;
1960 unsigned int tbl_offset;
1961 unsigned int reg_offset;
1962
1963 /* Register offset from SMC table base */
1964 tbl_offset = (last_byte / 4);
1965 /* Entry offset within the above reg */
1966 reg_offset = last_byte % 4;
1967
1968 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1969 + tbl_offset * 4));
1970
1971 if (queue == -1)
1972 smc_table_reg &= ~(0xff << (8 * reg_offset));
1973 else {
1974 smc_table_reg &= ~(0xff << (8 * reg_offset));
1975 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1976 }
1977
1978 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1979 smc_table_reg);
1980}
1981
1982/* This method controls the network device Other MAC multicast support.
1983 * The Other Multicast Table is used for multicast of another type.
1984 * A CRC-8 is used as an index to the Other Multicast Table entries
1985 * in the DA-Filter table.
1986 * The method gets the CRC-8 value from the calling routine and
1987 * sets the Other Multicast Table appropriate entry according to the
1988 * specified CRC-8 .
1989 */
1990static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1991 unsigned char crc8,
1992 int queue)
1993{
1994 unsigned int omc_table_reg;
1995 unsigned int tbl_offset;
1996 unsigned int reg_offset;
1997
1998 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1999 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2000
2001 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2002
2003 if (queue == -1) {
2004 /* Clear accepts frame bit at specified Other DA table entry */
2005 omc_table_reg &= ~(0xff << (8 * reg_offset));
2006 } else {
2007 omc_table_reg &= ~(0xff << (8 * reg_offset));
2008 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2009 }
2010
2011 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2012}
2013
2014/* The network device supports multicast using two tables:
2015 * 1) Special Multicast Table for MAC addresses of the form
2016 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2017 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2018 * Table entries in the DA-Filter table.
2019 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2020 * is used as an index to the Other Multicast Table entries in the
2021 * DA-Filter table.
2022 */
2023static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2024 int queue)
2025{
2026 unsigned char crc_result = 0;
2027
2028 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2029 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2030 return 0;
2031 }
2032
2033 crc_result = mvneta_addr_crc(p_addr);
2034 if (queue == -1) {
2035 if (pp->mcast_count[crc_result] == 0) {
2036 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2037 crc_result);
2038 return -EINVAL;
2039 }
2040
2041 pp->mcast_count[crc_result]--;
2042 if (pp->mcast_count[crc_result] != 0) {
2043 netdev_info(pp->dev,
2044 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2045 pp->mcast_count[crc_result], crc_result);
2046 return -EINVAL;
2047 }
2048 } else
2049 pp->mcast_count[crc_result]++;
2050
2051 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2052
2053 return 0;
2054}
2055
2056/* Configure Fitering mode of Ethernet port */
2057static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2058 int is_promisc)
2059{
2060 u32 port_cfg_reg, val;
2061
2062 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2063
2064 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2065
2066 /* Set / Clear UPM bit in port configuration register */
2067 if (is_promisc) {
2068 /* Accept all Unicast addresses */
2069 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2070 val |= MVNETA_FORCE_UNI;
2071 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2072 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2073 } else {
2074 /* Reject all Unicast addresses */
2075 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2076 val &= ~MVNETA_FORCE_UNI;
2077 }
2078
2079 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2080 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2081}
2082
2083/* register unicast and multicast addresses */
2084static void mvneta_set_rx_mode(struct net_device *dev)
2085{
2086 struct mvneta_port *pp = netdev_priv(dev);
2087 struct netdev_hw_addr *ha;
2088
2089 if (dev->flags & IFF_PROMISC) {
2090 /* Accept all: Multicast + Unicast */
2091 mvneta_rx_unicast_promisc_set(pp, 1);
2092 mvneta_set_ucast_table(pp, rxq_def);
2093 mvneta_set_special_mcast_table(pp, rxq_def);
2094 mvneta_set_other_mcast_table(pp, rxq_def);
2095 } else {
2096 /* Accept single Unicast */
2097 mvneta_rx_unicast_promisc_set(pp, 0);
2098 mvneta_set_ucast_table(pp, -1);
2099 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2100
2101 if (dev->flags & IFF_ALLMULTI) {
2102 /* Accept all multicast */
2103 mvneta_set_special_mcast_table(pp, rxq_def);
2104 mvneta_set_other_mcast_table(pp, rxq_def);
2105 } else {
2106 /* Accept only initialized multicast */
2107 mvneta_set_special_mcast_table(pp, -1);
2108 mvneta_set_other_mcast_table(pp, -1);
2109
2110 if (!netdev_mc_empty(dev)) {
2111 netdev_for_each_mc_addr(ha, dev) {
2112 mvneta_mcast_addr_set(pp, ha->addr,
2113 rxq_def);
2114 }
2115 }
2116 }
2117 }
2118}
2119
2120/* Interrupt handling - the callback for request_irq() */
2121static irqreturn_t mvneta_isr(int irq, void *dev_id)
2122{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002123 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002124
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002125 disable_percpu_irq(port->pp->dev->irq);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002126 napi_schedule(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002127
2128 return IRQ_HANDLED;
2129}
2130
Stas Sergeev898b29702015-04-01 20:32:49 +03002131static int mvneta_fixed_link_update(struct mvneta_port *pp,
2132 struct phy_device *phy)
2133{
2134 struct fixed_phy_status status;
2135 struct fixed_phy_status changed = {};
2136 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2137
2138 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2139 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2140 status.speed = SPEED_1000;
2141 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2142 status.speed = SPEED_100;
2143 else
2144 status.speed = SPEED_10;
2145 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2146 changed.link = 1;
2147 changed.speed = 1;
2148 changed.duplex = 1;
2149 fixed_phy_update_state(phy, &status, &changed);
2150 return 0;
2151}
2152
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002153/* NAPI handler
2154 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2155 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2156 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2157 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2158 * Each CPU has its own causeRxTx register
2159 */
2160static int mvneta_poll(struct napi_struct *napi, int budget)
2161{
2162 int rx_done = 0;
2163 u32 cause_rx_tx;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002164 struct mvneta_port *pp = netdev_priv(napi->dev);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002165 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002166
2167 if (!netif_running(pp->dev)) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002168 napi_complete(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002169 return rx_done;
2170 }
2171
2172 /* Read cause register */
Stas Sergeev898b29702015-04-01 20:32:49 +03002173 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2174 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2175 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2176
2177 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2178 if (pp->use_inband_status && (cause_misc &
2179 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2180 MVNETA_CAUSE_LINK_CHANGE |
2181 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2182 mvneta_fixed_link_update(pp, pp->phy_dev);
2183 }
2184 }
willy tarreau71f6d1b2014-01-16 08:20:11 +01002185
2186 /* Release Tx descriptors */
2187 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002188 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
willy tarreau71f6d1b2014-01-16 08:20:11 +01002189 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2190 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002191
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002192 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002193 * RX packets
2194 */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002195 cause_rx_tx |= port->cause_rx_tx;
Maxime Ripardd8936652015-09-25 18:09:37 +02002196 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2197 budget -= rx_done;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002198
2199 if (budget > 0) {
2200 cause_rx_tx = 0;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002201 napi_complete(&port->napi);
2202 enable_percpu_irq(pp->dev->irq, 0);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002203 }
2204
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002205 port->cause_rx_tx = cause_rx_tx;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002206 return rx_done;
2207}
2208
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002209/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2210static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2211 int num)
2212{
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002213 int i;
2214
2215 for (i = 0; i < num; i++) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002216 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2217 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2218 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002219 __func__, rxq->id, i, num);
2220 break;
2221 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002222 }
2223
2224 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002225 * get packets)
2226 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002227 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2228
2229 return i;
2230}
2231
2232/* Free all packets pending transmit from all TXQs and reset TX port */
2233static void mvneta_tx_reset(struct mvneta_port *pp)
2234{
2235 int queue;
2236
Ezequiel Garcia96728502014-05-22 20:06:59 -03002237 /* free the skb's in the tx ring */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002238 for (queue = 0; queue < txq_number; queue++)
2239 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2240
2241 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2242 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2243}
2244
2245static void mvneta_rx_reset(struct mvneta_port *pp)
2246{
2247 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2248 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2249}
2250
2251/* Rx/Tx queue initialization/cleanup methods */
2252
2253/* Create a specified RX queue */
2254static int mvneta_rxq_init(struct mvneta_port *pp,
2255 struct mvneta_rx_queue *rxq)
2256
2257{
2258 rxq->size = pp->rx_ring_size;
2259
2260 /* Allocate memory for RX descriptors */
2261 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2262 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2263 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002264 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002265 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002266
2267 BUG_ON(rxq->descs !=
2268 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2269
2270 rxq->last_desc = rxq->size - 1;
2271
2272 /* Set Rx descriptors queue starting address */
2273 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2274 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2275
2276 /* Set Offset */
2277 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2278
2279 /* Set coalescing pkts and time */
2280 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2281 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2282
2283 /* Fill RXQ with buffers from RX pool */
2284 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2285 mvneta_rxq_bm_disable(pp, rxq);
2286 mvneta_rxq_fill(pp, rxq, rxq->size);
2287
2288 return 0;
2289}
2290
2291/* Cleanup Rx queue */
2292static void mvneta_rxq_deinit(struct mvneta_port *pp,
2293 struct mvneta_rx_queue *rxq)
2294{
2295 mvneta_rxq_drop_pkts(pp, rxq);
2296
2297 if (rxq->descs)
2298 dma_free_coherent(pp->dev->dev.parent,
2299 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2300 rxq->descs,
2301 rxq->descs_phys);
2302
2303 rxq->descs = NULL;
2304 rxq->last_desc = 0;
2305 rxq->next_desc_to_proc = 0;
2306 rxq->descs_phys = 0;
2307}
2308
2309/* Create and initialize a tx queue */
2310static int mvneta_txq_init(struct mvneta_port *pp,
2311 struct mvneta_tx_queue *txq)
2312{
2313 txq->size = pp->tx_ring_size;
2314
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002315 /* A queue must always have room for at least one skb.
2316 * Therefore, stop the queue when the free entries reaches
2317 * the maximum number of descriptors per skb.
2318 */
2319 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2320 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2321
2322
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002323 /* Allocate memory for TX descriptors */
2324 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2325 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2326 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002327 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002328 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002329
2330 /* Make sure descriptor address is cache line size aligned */
2331 BUG_ON(txq->descs !=
2332 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2333
2334 txq->last_desc = txq->size - 1;
2335
2336 /* Set maximum bandwidth for enabled TXQs */
2337 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2338 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2339
2340 /* Set Tx descriptors queue starting address */
2341 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2342 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2343
2344 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2345 if (txq->tx_skb == NULL) {
2346 dma_free_coherent(pp->dev->dev.parent,
2347 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2348 txq->descs, txq->descs_phys);
2349 return -ENOMEM;
2350 }
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002351
2352 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2353 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2354 txq->size * TSO_HEADER_SIZE,
2355 &txq->tso_hdrs_phys, GFP_KERNEL);
2356 if (txq->tso_hdrs == NULL) {
2357 kfree(txq->tx_skb);
2358 dma_free_coherent(pp->dev->dev.parent,
2359 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2360 txq->descs, txq->descs_phys);
2361 return -ENOMEM;
2362 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002363 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2364
2365 return 0;
2366}
2367
2368/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2369static void mvneta_txq_deinit(struct mvneta_port *pp,
2370 struct mvneta_tx_queue *txq)
2371{
2372 kfree(txq->tx_skb);
2373
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002374 if (txq->tso_hdrs)
2375 dma_free_coherent(pp->dev->dev.parent,
2376 txq->size * TSO_HEADER_SIZE,
2377 txq->tso_hdrs, txq->tso_hdrs_phys);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002378 if (txq->descs)
2379 dma_free_coherent(pp->dev->dev.parent,
2380 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2381 txq->descs, txq->descs_phys);
2382
2383 txq->descs = NULL;
2384 txq->last_desc = 0;
2385 txq->next_desc_to_proc = 0;
2386 txq->descs_phys = 0;
2387
2388 /* Set minimum bandwidth for disabled TXQs */
2389 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2390 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2391
2392 /* Set Tx descriptors queue starting address and size */
2393 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2394 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2395}
2396
2397/* Cleanup all Tx queues */
2398static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2399{
2400 int queue;
2401
2402 for (queue = 0; queue < txq_number; queue++)
2403 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2404}
2405
2406/* Cleanup all Rx queues */
2407static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2408{
Maxime Ripardd8936652015-09-25 18:09:37 +02002409 mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002410}
2411
2412
2413/* Init all Rx queues */
2414static int mvneta_setup_rxqs(struct mvneta_port *pp)
2415{
Maxime Ripardd8936652015-09-25 18:09:37 +02002416 int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
2417 if (err) {
2418 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2419 __func__, rxq_def);
2420 mvneta_cleanup_rxqs(pp);
2421 return err;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002422 }
2423
2424 return 0;
2425}
2426
2427/* Init all tx queues */
2428static int mvneta_setup_txqs(struct mvneta_port *pp)
2429{
2430 int queue;
2431
2432 for (queue = 0; queue < txq_number; queue++) {
2433 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2434 if (err) {
2435 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2436 __func__, queue);
2437 mvneta_cleanup_txqs(pp);
2438 return err;
2439 }
2440 }
2441
2442 return 0;
2443}
2444
2445static void mvneta_start_dev(struct mvneta_port *pp)
2446{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002447 unsigned int cpu;
2448
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002449 mvneta_max_rx_size_set(pp, pp->pkt_size);
2450 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2451
2452 /* start the Rx/Tx activity */
2453 mvneta_port_enable(pp);
2454
2455 /* Enable polling on the port */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002456 for_each_present_cpu(cpu) {
2457 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2458
2459 napi_enable(&port->napi);
2460 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002461
2462 /* Unmask interrupts */
2463 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
Stas Sergeev898b29702015-04-01 20:32:49 +03002464 MVNETA_RX_INTR_MASK(rxq_number) |
2465 MVNETA_TX_INTR_MASK(txq_number) |
2466 MVNETA_MISCINTR_INTR_MASK);
2467 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2468 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2469 MVNETA_CAUSE_LINK_CHANGE |
2470 MVNETA_CAUSE_PSC_SYNC_CHANGE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002471
2472 phy_start(pp->phy_dev);
2473 netif_tx_start_all_queues(pp->dev);
2474}
2475
2476static void mvneta_stop_dev(struct mvneta_port *pp)
2477{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002478 unsigned int cpu;
2479
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002480 phy_stop(pp->phy_dev);
2481
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002482 for_each_present_cpu(cpu) {
2483 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2484
2485 napi_disable(&port->napi);
2486 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002487
2488 netif_carrier_off(pp->dev);
2489
2490 mvneta_port_down(pp);
2491 netif_tx_stop_all_queues(pp->dev);
2492
2493 /* Stop the port activity */
2494 mvneta_port_disable(pp);
2495
2496 /* Clear all ethernet port interrupts */
2497 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2498 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2499
2500 /* Mask all ethernet port interrupts */
2501 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2502 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2503 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2504
2505 mvneta_tx_reset(pp);
2506 mvneta_rx_reset(pp);
2507}
2508
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002509/* Return positive if MTU is valid */
2510static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2511{
2512 if (mtu < 68) {
2513 netdev_err(dev, "cannot change mtu to less than 68\n");
2514 return -EINVAL;
2515 }
2516
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002517 /* 9676 == 9700 - 20 and rounding to 8 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002518 if (mtu > 9676) {
2519 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2520 mtu = 9676;
2521 }
2522
2523 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2524 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2525 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2526 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2527 }
2528
2529 return mtu;
2530}
2531
2532/* Change the device mtu */
2533static int mvneta_change_mtu(struct net_device *dev, int mtu)
2534{
2535 struct mvneta_port *pp = netdev_priv(dev);
2536 int ret;
2537
2538 mtu = mvneta_check_mtu_valid(dev, mtu);
2539 if (mtu < 0)
2540 return -EINVAL;
2541
2542 dev->mtu = mtu;
2543
Simon Guinotb65657f2015-06-30 16:20:22 +02002544 if (!netif_running(dev)) {
2545 netdev_update_features(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002546 return 0;
Simon Guinotb65657f2015-06-30 16:20:22 +02002547 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002548
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002549 /* The interface is running, so we have to force a
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002550 * reallocation of the queues
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002551 */
2552 mvneta_stop_dev(pp);
2553
2554 mvneta_cleanup_txqs(pp);
2555 mvneta_cleanup_rxqs(pp);
2556
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002557 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002558 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2559 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002560
2561 ret = mvneta_setup_rxqs(pp);
2562 if (ret) {
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002563 netdev_err(dev, "unable to setup rxqs after MTU change\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002564 return ret;
2565 }
2566
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002567 ret = mvneta_setup_txqs(pp);
2568 if (ret) {
2569 netdev_err(dev, "unable to setup txqs after MTU change\n");
2570 return ret;
2571 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002572
2573 mvneta_start_dev(pp);
2574 mvneta_port_up(pp);
2575
Simon Guinotb65657f2015-06-30 16:20:22 +02002576 netdev_update_features(dev);
2577
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002578 return 0;
2579}
2580
Simon Guinotb65657f2015-06-30 16:20:22 +02002581static netdev_features_t mvneta_fix_features(struct net_device *dev,
2582 netdev_features_t features)
2583{
2584 struct mvneta_port *pp = netdev_priv(dev);
2585
2586 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2587 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2588 netdev_info(dev,
2589 "Disable IP checksum for MTU greater than %dB\n",
2590 pp->tx_csum_limit);
2591 }
2592
2593 return features;
2594}
2595
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002596/* Get mac address */
2597static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2598{
2599 u32 mac_addr_l, mac_addr_h;
2600
2601 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2602 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2603 addr[0] = (mac_addr_h >> 24) & 0xFF;
2604 addr[1] = (mac_addr_h >> 16) & 0xFF;
2605 addr[2] = (mac_addr_h >> 8) & 0xFF;
2606 addr[3] = mac_addr_h & 0xFF;
2607 addr[4] = (mac_addr_l >> 8) & 0xFF;
2608 addr[5] = mac_addr_l & 0xFF;
2609}
2610
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002611/* Handle setting mac address */
2612static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2613{
2614 struct mvneta_port *pp = netdev_priv(dev);
Ezequiel Garciae68de362014-05-22 20:07:00 -03002615 struct sockaddr *sockaddr = addr;
2616 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002617
Ezequiel Garciae68de362014-05-22 20:07:00 -03002618 ret = eth_prepare_mac_addr_change(dev, addr);
2619 if (ret < 0)
2620 return ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002621 /* Remove previous address table entry */
2622 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2623
2624 /* Set new addr in hw */
Ezequiel Garciae68de362014-05-22 20:07:00 -03002625 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002626
Ezequiel Garciae68de362014-05-22 20:07:00 -03002627 eth_commit_mac_addr_change(dev, addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002628 return 0;
2629}
2630
2631static void mvneta_adjust_link(struct net_device *ndev)
2632{
2633 struct mvneta_port *pp = netdev_priv(ndev);
2634 struct phy_device *phydev = pp->phy_dev;
2635 int status_change = 0;
2636
2637 if (phydev->link) {
2638 if ((pp->speed != phydev->speed) ||
2639 (pp->duplex != phydev->duplex)) {
2640 u32 val;
2641
2642 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2643 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2644 MVNETA_GMAC_CONFIG_GMII_SPEED |
Stas Sergeev898b29702015-04-01 20:32:49 +03002645 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002646
2647 if (phydev->duplex)
2648 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2649
2650 if (phydev->speed == SPEED_1000)
2651 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni4d12bc62014-07-08 10:49:43 +02002652 else if (phydev->speed == SPEED_100)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002653 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2654
2655 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2656
2657 pp->duplex = phydev->duplex;
2658 pp->speed = phydev->speed;
2659 }
2660 }
2661
2662 if (phydev->link != pp->link) {
2663 if (!phydev->link) {
2664 pp->duplex = -1;
2665 pp->speed = 0;
2666 }
2667
2668 pp->link = phydev->link;
2669 status_change = 1;
2670 }
2671
2672 if (status_change) {
2673 if (phydev->link) {
Stas Sergeev898b29702015-04-01 20:32:49 +03002674 if (!pp->use_inband_status) {
2675 u32 val = mvreg_read(pp,
2676 MVNETA_GMAC_AUTONEG_CONFIG);
2677 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2678 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2679 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2680 val);
2681 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002682 mvneta_port_up(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002683 } else {
Stas Sergeev898b29702015-04-01 20:32:49 +03002684 if (!pp->use_inband_status) {
2685 u32 val = mvreg_read(pp,
2686 MVNETA_GMAC_AUTONEG_CONFIG);
2687 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2688 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2689 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2690 val);
2691 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002692 mvneta_port_down(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002693 }
Ezequiel Garcia0089b742014-10-31 12:57:20 -03002694 phy_print_status(phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002695 }
2696}
2697
2698static int mvneta_mdio_probe(struct mvneta_port *pp)
2699{
2700 struct phy_device *phy_dev;
2701
2702 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2703 pp->phy_interface);
2704 if (!phy_dev) {
2705 netdev_err(pp->dev, "could not find the PHY\n");
2706 return -ENODEV;
2707 }
2708
2709 phy_dev->supported &= PHY_GBIT_FEATURES;
2710 phy_dev->advertising = phy_dev->supported;
2711
2712 pp->phy_dev = phy_dev;
2713 pp->link = 0;
2714 pp->duplex = 0;
2715 pp->speed = 0;
2716
2717 return 0;
2718}
2719
2720static void mvneta_mdio_remove(struct mvneta_port *pp)
2721{
2722 phy_disconnect(pp->phy_dev);
2723 pp->phy_dev = NULL;
2724}
2725
Maxime Ripardf8642882015-09-25 18:09:38 +02002726static void mvneta_percpu_enable(void *arg)
2727{
2728 struct mvneta_port *pp = arg;
2729
2730 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
2731}
2732
2733static void mvneta_percpu_disable(void *arg)
2734{
2735 struct mvneta_port *pp = arg;
2736
2737 disable_percpu_irq(pp->dev->irq);
2738}
2739
2740static void mvneta_percpu_elect(struct mvneta_port *pp)
2741{
2742 int online_cpu_idx, cpu, i = 0;
2743
2744 online_cpu_idx = rxq_def % num_online_cpus();
2745
2746 for_each_online_cpu(cpu) {
2747 if (i == online_cpu_idx)
2748 /* Enable per-CPU interrupt on the one CPU we
2749 * just elected
2750 */
2751 smp_call_function_single(cpu, mvneta_percpu_enable,
2752 pp, true);
2753 else
2754 /* Disable per-CPU interrupt on all the other CPU */
2755 smp_call_function_single(cpu, mvneta_percpu_disable,
2756 pp, true);
2757 i++;
2758 }
2759};
2760
2761static int mvneta_percpu_notifier(struct notifier_block *nfb,
2762 unsigned long action, void *hcpu)
2763{
2764 struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
2765 cpu_notifier);
2766 int cpu = (unsigned long)hcpu, other_cpu;
2767 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2768
2769 switch (action) {
2770 case CPU_ONLINE:
2771 case CPU_ONLINE_FROZEN:
2772 netif_tx_stop_all_queues(pp->dev);
2773
2774 /* We have to synchronise on tha napi of each CPU
2775 * except the one just being waked up
2776 */
2777 for_each_online_cpu(other_cpu) {
2778 if (other_cpu != cpu) {
2779 struct mvneta_pcpu_port *other_port =
2780 per_cpu_ptr(pp->ports, other_cpu);
2781
2782 napi_synchronize(&other_port->napi);
2783 }
2784 }
2785
2786 /* Mask all ethernet port interrupts */
2787 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2788 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2789 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2790 napi_enable(&port->napi);
2791
2792 /* Enable per-CPU interrupt on the one CPU we care
2793 * about.
2794 */
2795 mvneta_percpu_elect(pp);
2796
2797 /* Unmask all ethernet port interrupts */
2798 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2799 MVNETA_RX_INTR_MASK(rxq_number) |
2800 MVNETA_TX_INTR_MASK(txq_number) |
2801 MVNETA_MISCINTR_INTR_MASK);
2802 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2803 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2804 MVNETA_CAUSE_LINK_CHANGE |
2805 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2806 netif_tx_start_all_queues(pp->dev);
2807 break;
2808 case CPU_DOWN_PREPARE:
2809 case CPU_DOWN_PREPARE_FROZEN:
2810 netif_tx_stop_all_queues(pp->dev);
2811 /* Mask all ethernet port interrupts */
2812 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2813 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2814 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2815
2816 napi_synchronize(&port->napi);
2817 napi_disable(&port->napi);
2818 /* Disable per-CPU interrupts on the CPU that is
2819 * brought down.
2820 */
2821 smp_call_function_single(cpu, mvneta_percpu_disable,
2822 pp, true);
2823
2824 break;
2825 case CPU_DEAD:
2826 case CPU_DEAD_FROZEN:
2827 /* Check if a new CPU must be elected now this on is down */
2828 mvneta_percpu_elect(pp);
2829 /* Unmask all ethernet port interrupts */
2830 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2831 MVNETA_RX_INTR_MASK(rxq_number) |
2832 MVNETA_TX_INTR_MASK(txq_number) |
2833 MVNETA_MISCINTR_INTR_MASK);
2834 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2835 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2836 MVNETA_CAUSE_LINK_CHANGE |
2837 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2838 netif_tx_start_all_queues(pp->dev);
2839 break;
2840 }
2841
2842 return NOTIFY_OK;
2843}
2844
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002845static int mvneta_open(struct net_device *dev)
2846{
2847 struct mvneta_port *pp = netdev_priv(dev);
2848 int ret;
2849
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002850 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002851 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2852 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002853
2854 ret = mvneta_setup_rxqs(pp);
2855 if (ret)
2856 return ret;
2857
2858 ret = mvneta_setup_txqs(pp);
2859 if (ret)
2860 goto err_cleanup_rxqs;
2861
2862 /* Connect to port interrupt line */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002863 ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
2864 MVNETA_DRIVER_NAME, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002865 if (ret) {
2866 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2867 goto err_cleanup_txqs;
2868 }
2869
Maxime Ripardf8642882015-09-25 18:09:38 +02002870 /* Even though the documentation says that request_percpu_irq
2871 * doesn't enable the interrupts automatically, it actually
2872 * does so on the local CPU.
2873 *
2874 * Make sure it's disabled.
2875 */
2876 mvneta_percpu_disable(pp);
2877
2878 /* Elect a CPU to handle our RX queue interrupt */
2879 mvneta_percpu_elect(pp);
2880
2881 /* Register a CPU notifier to handle the case where our CPU
2882 * might be taken offline.
2883 */
2884 register_cpu_notifier(&pp->cpu_notifier);
2885
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002886 /* In default link is down */
2887 netif_carrier_off(pp->dev);
2888
2889 ret = mvneta_mdio_probe(pp);
2890 if (ret < 0) {
2891 netdev_err(dev, "cannot probe MDIO bus\n");
2892 goto err_free_irq;
2893 }
2894
2895 mvneta_start_dev(pp);
2896
2897 return 0;
2898
2899err_free_irq:
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002900 free_percpu_irq(pp->dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002901err_cleanup_txqs:
2902 mvneta_cleanup_txqs(pp);
2903err_cleanup_rxqs:
2904 mvneta_cleanup_rxqs(pp);
2905 return ret;
2906}
2907
2908/* Stop the port, free port interrupt line */
2909static int mvneta_stop(struct net_device *dev)
2910{
2911 struct mvneta_port *pp = netdev_priv(dev);
Maxime Ripardf8642882015-09-25 18:09:38 +02002912 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002913
2914 mvneta_stop_dev(pp);
2915 mvneta_mdio_remove(pp);
Maxime Ripardf8642882015-09-25 18:09:38 +02002916 unregister_cpu_notifier(&pp->cpu_notifier);
2917 for_each_present_cpu(cpu)
2918 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002919 free_percpu_irq(dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002920 mvneta_cleanup_rxqs(pp);
2921 mvneta_cleanup_txqs(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002922
2923 return 0;
2924}
2925
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002926static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2927{
2928 struct mvneta_port *pp = netdev_priv(dev);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002929
2930 if (!pp->phy_dev)
2931 return -ENOTSUPP;
2932
Stas Sergeevecf7b362015-04-01 19:23:29 +03002933 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002934}
2935
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002936/* Ethtool methods */
2937
2938/* Get settings (phy address, speed) for ethtools */
2939int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2940{
2941 struct mvneta_port *pp = netdev_priv(dev);
2942
2943 if (!pp->phy_dev)
2944 return -ENODEV;
2945
2946 return phy_ethtool_gset(pp->phy_dev, cmd);
2947}
2948
2949/* Set settings (phy address, speed) for ethtools */
2950int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2951{
2952 struct mvneta_port *pp = netdev_priv(dev);
2953
2954 if (!pp->phy_dev)
2955 return -ENODEV;
2956
2957 return phy_ethtool_sset(pp->phy_dev, cmd);
2958}
2959
2960/* Set interrupt coalescing for ethtools */
2961static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2962 struct ethtool_coalesce *c)
2963{
2964 struct mvneta_port *pp = netdev_priv(dev);
2965 int queue;
2966
2967 for (queue = 0; queue < rxq_number; queue++) {
2968 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2969 rxq->time_coal = c->rx_coalesce_usecs;
2970 rxq->pkts_coal = c->rx_max_coalesced_frames;
2971 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2972 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2973 }
2974
2975 for (queue = 0; queue < txq_number; queue++) {
2976 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2977 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2978 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2979 }
2980
2981 return 0;
2982}
2983
2984/* get coalescing for ethtools */
2985static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2986 struct ethtool_coalesce *c)
2987{
2988 struct mvneta_port *pp = netdev_priv(dev);
2989
2990 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2991 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2992
2993 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2994 return 0;
2995}
2996
2997
2998static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2999 struct ethtool_drvinfo *drvinfo)
3000{
3001 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3002 sizeof(drvinfo->driver));
3003 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3004 sizeof(drvinfo->version));
3005 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3006 sizeof(drvinfo->bus_info));
3007}
3008
3009
3010static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3011 struct ethtool_ringparam *ring)
3012{
3013 struct mvneta_port *pp = netdev_priv(netdev);
3014
3015 ring->rx_max_pending = MVNETA_MAX_RXD;
3016 ring->tx_max_pending = MVNETA_MAX_TXD;
3017 ring->rx_pending = pp->rx_ring_size;
3018 ring->tx_pending = pp->tx_ring_size;
3019}
3020
3021static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3022 struct ethtool_ringparam *ring)
3023{
3024 struct mvneta_port *pp = netdev_priv(dev);
3025
3026 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3027 return -EINVAL;
3028 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3029 ring->rx_pending : MVNETA_MAX_RXD;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003030
3031 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3032 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3033 if (pp->tx_ring_size != ring->tx_pending)
3034 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3035 pp->tx_ring_size, ring->tx_pending);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003036
3037 if (netif_running(dev)) {
3038 mvneta_stop(dev);
3039 if (mvneta_open(dev)) {
3040 netdev_err(dev,
3041 "error on opening device after ring param change\n");
3042 return -ENOMEM;
3043 }
3044 }
3045
3046 return 0;
3047}
3048
Russell King9b0cdef2015-10-22 18:37:30 +01003049static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3050 u8 *data)
3051{
3052 if (sset == ETH_SS_STATS) {
3053 int i;
3054
3055 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3056 memcpy(data + i * ETH_GSTRING_LEN,
3057 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3058 }
3059}
3060
3061static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3062{
3063 const struct mvneta_statistic *s;
3064 void __iomem *base = pp->base;
3065 u32 high, low, val;
3066 int i;
3067
3068 for (i = 0, s = mvneta_statistics;
3069 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3070 s++, i++) {
3071 val = 0;
3072
3073 switch (s->type) {
3074 case T_REG_32:
3075 val = readl_relaxed(base + s->offset);
3076 break;
3077 case T_REG_64:
3078 /* Docs say to read low 32-bit then high */
3079 low = readl_relaxed(base + s->offset);
3080 high = readl_relaxed(base + s->offset + 4);
3081 val = (u64)high << 32 | low;
3082 break;
3083 }
3084
3085 pp->ethtool_stats[i] += val;
3086 }
3087}
3088
3089static void mvneta_ethtool_get_stats(struct net_device *dev,
3090 struct ethtool_stats *stats, u64 *data)
3091{
3092 struct mvneta_port *pp = netdev_priv(dev);
3093 int i;
3094
3095 mvneta_ethtool_update_stats(pp);
3096
3097 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3098 *data++ = pp->ethtool_stats[i];
3099}
3100
3101static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3102{
3103 if (sset == ETH_SS_STATS)
3104 return ARRAY_SIZE(mvneta_statistics);
3105 return -EOPNOTSUPP;
3106}
3107
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003108static const struct net_device_ops mvneta_netdev_ops = {
3109 .ndo_open = mvneta_open,
3110 .ndo_stop = mvneta_stop,
3111 .ndo_start_xmit = mvneta_tx,
3112 .ndo_set_rx_mode = mvneta_set_rx_mode,
3113 .ndo_set_mac_address = mvneta_set_mac_addr,
3114 .ndo_change_mtu = mvneta_change_mtu,
Simon Guinotb65657f2015-06-30 16:20:22 +02003115 .ndo_fix_features = mvneta_fix_features,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003116 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003117 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003118};
3119
3120const struct ethtool_ops mvneta_eth_tool_ops = {
3121 .get_link = ethtool_op_get_link,
3122 .get_settings = mvneta_ethtool_get_settings,
3123 .set_settings = mvneta_ethtool_set_settings,
3124 .set_coalesce = mvneta_ethtool_set_coalesce,
3125 .get_coalesce = mvneta_ethtool_get_coalesce,
3126 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3127 .get_ringparam = mvneta_ethtool_get_ringparam,
3128 .set_ringparam = mvneta_ethtool_set_ringparam,
Russell King9b0cdef2015-10-22 18:37:30 +01003129 .get_strings = mvneta_ethtool_get_strings,
3130 .get_ethtool_stats = mvneta_ethtool_get_stats,
3131 .get_sset_count = mvneta_ethtool_get_sset_count,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003132};
3133
3134/* Initialize hw */
Ezequiel Garcia96728502014-05-22 20:06:59 -03003135static int mvneta_init(struct device *dev, struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003136{
3137 int queue;
3138
3139 /* Disable port */
3140 mvneta_port_disable(pp);
3141
3142 /* Set port default values */
3143 mvneta_defaults_set(pp);
3144
Ezequiel Garcia96728502014-05-22 20:06:59 -03003145 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3146 GFP_KERNEL);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003147 if (!pp->txqs)
3148 return -ENOMEM;
3149
3150 /* Initialize TX descriptor rings */
3151 for (queue = 0; queue < txq_number; queue++) {
3152 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3153 txq->id = queue;
3154 txq->size = pp->tx_ring_size;
3155 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
3156 }
3157
Ezequiel Garcia96728502014-05-22 20:06:59 -03003158 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
3159 GFP_KERNEL);
3160 if (!pp->rxqs)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003161 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003162
3163 /* Create Rx descriptor rings */
3164 for (queue = 0; queue < rxq_number; queue++) {
3165 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3166 rxq->id = queue;
3167 rxq->size = pp->rx_ring_size;
3168 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
3169 rxq->time_coal = MVNETA_RX_COAL_USEC;
3170 }
3171
3172 return 0;
3173}
3174
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003175/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00003176static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3177 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003178{
3179 u32 win_enable;
3180 u32 win_protect;
3181 int i;
3182
3183 for (i = 0; i < 6; i++) {
3184 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
3185 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
3186
3187 if (i < 4)
3188 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
3189 }
3190
3191 win_enable = 0x3f;
3192 win_protect = 0;
3193
3194 for (i = 0; i < dram->num_cs; i++) {
3195 const struct mbus_dram_window *cs = dram->cs + i;
3196 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
3197 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
3198
3199 mvreg_write(pp, MVNETA_WIN_SIZE(i),
3200 (cs->size - 1) & 0xffff0000);
3201
3202 win_enable &= ~(1 << i);
3203 win_protect |= 3 << (2 * i);
3204 }
3205
3206 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
3207}
3208
3209/* Power up the port */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003210static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003211{
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003212 u32 ctrl;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003213
3214 /* MAC Cause register should be cleared */
3215 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
3216
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003217 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003218
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003219 /* Even though it might look weird, when we're configured in
3220 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3221 */
3222 switch(phy_mode) {
3223 case PHY_INTERFACE_MODE_QSGMII:
3224 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
3225 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3226 break;
3227 case PHY_INTERFACE_MODE_SGMII:
3228 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
3229 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3230 break;
3231 case PHY_INTERFACE_MODE_RGMII:
3232 case PHY_INTERFACE_MODE_RGMII_ID:
3233 ctrl |= MVNETA_GMAC2_PORT_RGMII;
3234 break;
3235 default:
3236 return -EINVAL;
3237 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003238
3239 /* Cancel Port Reset */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003240 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3241 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003242
3243 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3244 MVNETA_GMAC2_PORT_RESET) != 0)
3245 continue;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003246
3247 return 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003248}
3249
3250/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00003251static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003252{
3253 const struct mbus_dram_target_info *dram_target_info;
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003254 struct resource *res;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003255 struct device_node *dn = pdev->dev.of_node;
3256 struct device_node *phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003257 struct mvneta_port *pp;
3258 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003259 const char *dt_mac_addr;
3260 char hw_mac_addr[ETH_ALEN];
3261 const char *mac_from;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003262 const char *managed;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003263 int phy_mode;
3264 int err;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003265 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003266
Willy Tarreauee40a112013-04-11 23:00:37 +02003267 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003268 if (!dev)
3269 return -ENOMEM;
3270
3271 dev->irq = irq_of_parse_and_map(dn, 0);
3272 if (dev->irq == 0) {
3273 err = -EINVAL;
3274 goto err_free_netdev;
3275 }
3276
3277 phy_node = of_parse_phandle(dn, "phy", 0);
3278 if (!phy_node) {
Thomas Petazzoni83895be2014-05-16 16:14:06 +02003279 if (!of_phy_is_fixed_link(dn)) {
3280 dev_err(&pdev->dev, "no PHY specified\n");
3281 err = -ENODEV;
3282 goto err_free_irq;
3283 }
3284
3285 err = of_phy_register_fixed_link(dn);
3286 if (err < 0) {
3287 dev_err(&pdev->dev, "cannot register fixed PHY\n");
3288 goto err_free_irq;
3289 }
3290
3291 /* In the case of a fixed PHY, the DT node associated
3292 * to the PHY is the Ethernet MAC DT node.
3293 */
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003294 phy_node = of_node_get(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003295 }
3296
3297 phy_mode = of_get_phy_mode(dn);
3298 if (phy_mode < 0) {
3299 dev_err(&pdev->dev, "incorrect phy-mode\n");
3300 err = -EINVAL;
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003301 goto err_put_phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003302 }
3303
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003304 dev->tx_queue_len = MVNETA_MAX_TXD;
3305 dev->watchdog_timeo = 5 * HZ;
3306 dev->netdev_ops = &mvneta_netdev_ops;
3307
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003308 dev->ethtool_ops = &mvneta_eth_tool_ops;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003309
3310 pp = netdev_priv(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003311 pp->phy_node = phy_node;
3312 pp->phy_interface = phy_mode;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003313
3314 err = of_property_read_string(dn, "managed", &managed);
3315 pp->use_inband_status = (err == 0 &&
3316 strcmp(managed, "in-band-status") == 0);
Maxime Ripardf8642882015-09-25 18:09:38 +02003317 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003318
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003319 pp->clk = devm_clk_get(&pdev->dev, NULL);
3320 if (IS_ERR(pp->clk)) {
3321 err = PTR_ERR(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003322 goto err_put_phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003323 }
3324
3325 clk_prepare_enable(pp->clk);
3326
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003327 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3328 pp->base = devm_ioremap_resource(&pdev->dev, res);
3329 if (IS_ERR(pp->base)) {
3330 err = PTR_ERR(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003331 goto err_clk;
3332 }
3333
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003334 /* Alloc per-cpu port structure */
3335 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
3336 if (!pp->ports) {
3337 err = -ENOMEM;
3338 goto err_clk;
3339 }
3340
willy tarreau74c41b02014-01-16 08:20:08 +01003341 /* Alloc per-cpu stats */
WANG Cong1c213bd2014-02-13 11:46:28 -08003342 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
willy tarreau74c41b02014-01-16 08:20:08 +01003343 if (!pp->stats) {
3344 err = -ENOMEM;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003345 goto err_free_ports;
willy tarreau74c41b02014-01-16 08:20:08 +01003346 }
3347
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003348 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01003349 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003350 mac_from = "device tree";
3351 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3352 } else {
3353 mvneta_get_mac_addr(pp, hw_mac_addr);
3354 if (is_valid_ether_addr(hw_mac_addr)) {
3355 mac_from = "hardware";
3356 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3357 } else {
3358 mac_from = "random";
3359 eth_hw_addr_random(dev);
3360 }
3361 }
3362
Simon Guinotb65657f2015-06-30 16:20:22 +02003363 if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
3364 pp->tx_csum_limit = 1600;
3365
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003366 pp->tx_ring_size = MVNETA_MAX_TXD;
3367 pp->rx_ring_size = MVNETA_MAX_RXD;
3368
3369 pp->dev = dev;
3370 SET_NETDEV_DEV(dev, &pdev->dev);
3371
Ezequiel Garcia96728502014-05-22 20:06:59 -03003372 err = mvneta_init(&pdev->dev, pp);
3373 if (err < 0)
willy tarreau74c41b02014-01-16 08:20:08 +01003374 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003375
3376 err = mvneta_port_power_up(pp, phy_mode);
3377 if (err < 0) {
3378 dev_err(&pdev->dev, "can't power up port\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003379 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003380 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003381
3382 dram_target_info = mv_mbus_dram_info();
3383 if (dram_target_info)
3384 mvneta_conf_mbus_windows(pp, dram_target_info);
3385
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003386 for_each_present_cpu(cpu) {
3387 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3388
3389 netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3390 port->pp = pp;
3391 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003392
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03003393 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Ezequiel Garcia01ef26c2014-05-19 13:59:53 -03003394 dev->hw_features |= dev->features;
3395 dev->vlan_features |= dev->features;
willy tarreaub50b72d2013-04-06 08:47:01 +00003396 dev->priv_flags |= IFF_UNICAST_FLT;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003397 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
willy tarreaub50b72d2013-04-06 08:47:01 +00003398
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003399 err = register_netdev(dev);
3400 if (err < 0) {
3401 dev_err(&pdev->dev, "failed to register\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003402 goto err_free_stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003403 }
3404
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003405 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3406 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003407
3408 platform_set_drvdata(pdev, pp->dev);
3409
Stas Sergeev898b29702015-04-01 20:32:49 +03003410 if (pp->use_inband_status) {
3411 struct phy_device *phy = of_phy_find_device(dn);
3412
3413 mvneta_fixed_link_update(pp, phy);
Russell King04d53b22015-09-24 20:36:18 +01003414
3415 put_device(&phy->dev);
Stas Sergeev898b29702015-04-01 20:32:49 +03003416 }
3417
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003418 return 0;
3419
willy tarreau74c41b02014-01-16 08:20:08 +01003420err_free_stats:
3421 free_percpu(pp->stats);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003422err_free_ports:
3423 free_percpu(pp->ports);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003424err_clk:
3425 clk_disable_unprepare(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003426err_put_phy_node:
3427 of_node_put(phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003428err_free_irq:
3429 irq_dispose_mapping(dev->irq);
3430err_free_netdev:
3431 free_netdev(dev);
3432 return err;
3433}
3434
3435/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00003436static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003437{
3438 struct net_device *dev = platform_get_drvdata(pdev);
3439 struct mvneta_port *pp = netdev_priv(dev);
3440
3441 unregister_netdev(dev);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003442 clk_disable_unprepare(pp->clk);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003443 free_percpu(pp->ports);
willy tarreau74c41b02014-01-16 08:20:08 +01003444 free_percpu(pp->stats);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003445 irq_dispose_mapping(dev->irq);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003446 of_node_put(pp->phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003447 free_netdev(dev);
3448
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003449 return 0;
3450}
3451
3452static const struct of_device_id mvneta_match[] = {
3453 { .compatible = "marvell,armada-370-neta" },
Simon Guinotf522a972015-06-30 16:20:20 +02003454 { .compatible = "marvell,armada-xp-neta" },
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003455 { }
3456};
3457MODULE_DEVICE_TABLE(of, mvneta_match);
3458
3459static struct platform_driver mvneta_driver = {
3460 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00003461 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003462 .driver = {
3463 .name = MVNETA_DRIVER_NAME,
3464 .of_match_table = mvneta_match,
3465 },
3466};
3467
3468module_platform_driver(mvneta_driver);
3469
3470MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3471MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3472MODULE_LICENSE("GPL");
3473
3474module_param(rxq_number, int, S_IRUGO);
3475module_param(txq_number, int, S_IRUGO);
3476
3477module_param(rxq_def, int, S_IRUGO);
willy tarreauf19fadf2014-01-16 08:20:17 +01003478module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);