blob: d0e1ec51be50e272c54030a32928ec3853eb513a [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030015#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
David S. Miller2d39d122014-08-25 20:21:55 -070023#include <linux/if_vlan.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030024#include <net/ip.h>
25#include <net/ipv6.h>
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +010026#include <linux/io.h>
Ezequiel Garcia2adb7192014-05-19 13:59:55 -030027#include <net/tso.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030028#include <linux/of.h>
29#include <linux/of_irq.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/of_address.h>
33#include <linux/phy.h>
Thomas Petazzoni189dd622012-11-19 14:15:25 +010034#include <linux/clk.h>
Maxime Ripardf8642882015-09-25 18:09:38 +020035#include <linux/cpu.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030036
37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
40#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53#define MVNETA_PORT_RX_RESET 0x1cc0
54#define MVNETA_PORT_RX_DMA_RESET BIT(0)
55#define MVNETA_PHY_ADDR 0x2000
56#define MVNETA_PHY_ADDR_MASK 0x1f
57#define MVNETA_MBUS_RETRY 0x2010
58#define MVNETA_UNIT_INTR_CAUSE 0x2080
59#define MVNETA_UNIT_CONTROL 0x20B0
60#define MVNETA_PHY_POLLING_ENABLE BIT(1)
61#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290
65#define MVNETA_PORT_CONFIG 0x2400
66#define MVNETA_UNI_PROMISC_MODE BIT(0)
67#define MVNETA_DEF_RXQ(q) ((q) << 1)
68#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
69#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
70#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
71#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
72#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
73#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
74#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
75 MVNETA_DEF_RXQ_ARP(q) | \
76 MVNETA_DEF_RXQ_TCP(q) | \
77 MVNETA_DEF_RXQ_UDP(q) | \
78 MVNETA_DEF_RXQ_BPDU(q) | \
79 MVNETA_TX_UNSET_ERR_SUM | \
80 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
81#define MVNETA_PORT_CONFIG_EXTEND 0x2404
82#define MVNETA_MAC_ADDR_LOW 0x2414
83#define MVNETA_MAC_ADDR_HIGH 0x2418
84#define MVNETA_SDMA_CONFIG 0x241c
85#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030086#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
87#define MVNETA_RX_NO_DATA_SWAP BIT(4)
88#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +020089#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030090#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
91#define MVNETA_PORT_STATUS 0x2444
92#define MVNETA_TX_IN_PRGRS BIT(1)
93#define MVNETA_TX_FIFO_EMPTY BIT(8)
94#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020095#define MVNETA_SERDES_CFG 0x24A0
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +020096#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020097#define MVNETA_QSGMII_SERDES_PROTO 0x0667
Thomas Petazzonic5aff182012-08-17 14:04:28 +030098#define MVNETA_TYPE_PRIO 0x24bc
99#define MVNETA_FORCE_UNI BIT(21)
100#define MVNETA_TXQ_CMD_1 0x24e4
101#define MVNETA_TXQ_CMD 0x2448
102#define MVNETA_TXQ_DISABLE_SHIFT 8
103#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
Andrew Lunne4839112015-10-22 18:37:36 +0100104#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
105#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
Stas Sergeev898b2972015-04-01 20:32:49 +0300106#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
107#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300108#define MVNETA_ACC_MODE 0x2500
109#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
110#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
111#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
112#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
willy tarreau40ba35e2014-01-16 08:20:10 +0100113
114/* Exception Interrupt Port/Queue Cause register */
115
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300116#define MVNETA_INTR_NEW_CAUSE 0x25a0
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300117#define MVNETA_INTR_NEW_MASK 0x25a4
willy tarreau40ba35e2014-01-16 08:20:10 +0100118
119/* bits 0..7 = TXQ SENT, one bit per queue.
120 * bits 8..15 = RXQ OCCUP, one bit per queue.
121 * bits 16..23 = RXQ FREE, one bit per queue.
122 * bit 29 = OLD_REG_SUM, see old reg ?
123 * bit 30 = TX_ERR_SUM, one bit for 4 ports
124 * bit 31 = MISC_SUM, one bit for 4 ports
125 */
126#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
127#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
128#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
129#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
Stas Sergeev898b2972015-04-01 20:32:49 +0300130#define MVNETA_MISCINTR_INTR_MASK BIT(31)
willy tarreau40ba35e2014-01-16 08:20:10 +0100131
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300132#define MVNETA_INTR_OLD_CAUSE 0x25a8
133#define MVNETA_INTR_OLD_MASK 0x25ac
willy tarreau40ba35e2014-01-16 08:20:10 +0100134
135/* Data Path Port/Queue Cause Register */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300136#define MVNETA_INTR_MISC_CAUSE 0x25b0
137#define MVNETA_INTR_MISC_MASK 0x25b4
willy tarreau40ba35e2014-01-16 08:20:10 +0100138
139#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
140#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
141#define MVNETA_CAUSE_PTP BIT(4)
142
143#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
144#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
145#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
146#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
147#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
148#define MVNETA_CAUSE_PRBS_ERR BIT(12)
149#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
150#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
151
152#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
153#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
154#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
155
156#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
157#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
158#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
159
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300160#define MVNETA_INTR_ENABLE 0x25b8
161#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
willy tarreau40ba35e2014-01-16 08:20:10 +0100162#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
163
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300164#define MVNETA_RXQ_CMD 0x2680
165#define MVNETA_RXQ_DISABLE_SHIFT 8
166#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
167#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
168#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
169#define MVNETA_GMAC_CTRL_0 0x2c00
170#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
171#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
172#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
173#define MVNETA_GMAC_CTRL_2 0x2c08
Stas Sergeev898b2972015-04-01 20:32:49 +0300174#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
Thomas Petazzonia79121d2014-03-26 00:25:41 +0100175#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300176#define MVNETA_GMAC2_PORT_RGMII BIT(4)
177#define MVNETA_GMAC2_PORT_RESET BIT(6)
178#define MVNETA_GMAC_STATUS 0x2c10
179#define MVNETA_GMAC_LINK_UP BIT(0)
180#define MVNETA_GMAC_SPEED_1000 BIT(1)
181#define MVNETA_GMAC_SPEED_100 BIT(2)
182#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
183#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
184#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
185#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
186#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
187#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
188#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
189#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
Stas Sergeev898b2972015-04-01 20:32:49 +0300190#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300191#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
192#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200193#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Stas Sergeev898b2972015-04-01 20:32:49 +0300194#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300195#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200196#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Andrew Lunne4839112015-10-22 18:37:36 +0100197#define MVNETA_MIB_COUNTERS_BASE 0x3000
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300198#define MVNETA_MIB_LATE_COLLISION 0x7c
199#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
200#define MVNETA_DA_FILT_OTH_MCAST 0x3500
201#define MVNETA_DA_FILT_UCAST_BASE 0x3600
202#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
203#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
204#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
205#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
206#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
207#define MVNETA_TXQ_DEC_SENT_SHIFT 16
208#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
209#define MVNETA_TXQ_SENT_DESC_SHIFT 16
210#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
211#define MVNETA_PORT_TX_RESET 0x3cf0
212#define MVNETA_PORT_TX_DMA_RESET BIT(0)
213#define MVNETA_TX_MTU 0x3e0c
214#define MVNETA_TX_TOKEN_SIZE 0x3e14
215#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
216#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
217#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
218
219#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
220
221/* Descriptor ring Macros */
222#define MVNETA_QUEUE_NEXT_DESC(q, index) \
223 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
224
225/* Various constants */
226
227/* Coalescing */
willy tarreauaebea2b2014-12-02 08:13:04 +0100228#define MVNETA_TXDONE_COAL_PKTS 1
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300229#define MVNETA_RX_COAL_PKTS 32
230#define MVNETA_RX_COAL_USEC 100
231
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100232/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300233 * by Marvell switches when a specific hardware mode is enabled (not
234 * supported by this driver) or is filled automatically by zeroes on
235 * the RX side. Those two bytes being at the front of the Ethernet
236 * header, they allow to have the IP header aligned on a 4 bytes
237 * boundary automatically: the hardware skips those two bytes on its
238 * own.
239 */
240#define MVNETA_MH_SIZE 2
241
242#define MVNETA_VLAN_TAG_LEN 4
243
244#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
245#define MVNETA_TX_CSUM_MAX_SIZE 9800
246#define MVNETA_ACC_MODE_EXT 1
247
248/* Timeout constants */
249#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
250#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
251#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
252
253#define MVNETA_TX_MTU_MAX 0x3ffff
254
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300255/* TSO header size */
256#define TSO_HEADER_SIZE 128
257
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300258/* Max number of Rx descriptors */
259#define MVNETA_MAX_RXD 128
260
261/* Max number of Tx descriptors */
262#define MVNETA_MAX_TXD 532
263
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300264/* Max number of allowed TCP segments for software TSO */
265#define MVNETA_MAX_TSO_SEGS 100
266
267#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
268
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300269/* descriptor aligned size */
270#define MVNETA_DESC_ALIGNED_SIZE 32
271
272#define MVNETA_RX_PKT_SIZE(mtu) \
273 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
274 ETH_HLEN + ETH_FCS_LEN, \
275 MVNETA_CPU_D_CACHE_LINE_SIZE)
276
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -0300277#define IS_TSO_HEADER(txq, addr) \
278 ((addr >= txq->tso_hdrs_phys) && \
279 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
280
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300281#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
282
Russell King9b0cdef2015-10-22 18:37:30 +0100283struct mvneta_statistic {
284 unsigned short offset;
285 unsigned short type;
286 const char name[ETH_GSTRING_LEN];
287};
288
289#define T_REG_32 32
290#define T_REG_64 64
291
292static const struct mvneta_statistic mvneta_statistics[] = {
293 { 0x3000, T_REG_64, "good_octets_received", },
294 { 0x3010, T_REG_32, "good_frames_received", },
295 { 0x3008, T_REG_32, "bad_octets_received", },
296 { 0x3014, T_REG_32, "bad_frames_received", },
297 { 0x3018, T_REG_32, "broadcast_frames_received", },
298 { 0x301c, T_REG_32, "multicast_frames_received", },
299 { 0x3050, T_REG_32, "unrec_mac_control_received", },
300 { 0x3058, T_REG_32, "good_fc_received", },
301 { 0x305c, T_REG_32, "bad_fc_received", },
302 { 0x3060, T_REG_32, "undersize_received", },
303 { 0x3064, T_REG_32, "fragments_received", },
304 { 0x3068, T_REG_32, "oversize_received", },
305 { 0x306c, T_REG_32, "jabber_received", },
306 { 0x3070, T_REG_32, "mac_receive_error", },
307 { 0x3074, T_REG_32, "bad_crc_event", },
308 { 0x3078, T_REG_32, "collision", },
309 { 0x307c, T_REG_32, "late_collision", },
310 { 0x2484, T_REG_32, "rx_discard", },
311 { 0x2488, T_REG_32, "rx_overrun", },
312 { 0x3020, T_REG_32, "frames_64_octets", },
313 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
314 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
315 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
316 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
317 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
318 { 0x3038, T_REG_64, "good_octets_sent", },
319 { 0x3040, T_REG_32, "good_frames_sent", },
320 { 0x3044, T_REG_32, "excessive_collision", },
321 { 0x3048, T_REG_32, "multicast_frames_sent", },
322 { 0x304c, T_REG_32, "broadcast_frames_sent", },
323 { 0x3054, T_REG_32, "fc_sent", },
324 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
325};
326
willy tarreau74c41b02014-01-16 08:20:08 +0100327struct mvneta_pcpu_stats {
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300328 struct u64_stats_sync syncp;
willy tarreau74c41b02014-01-16 08:20:08 +0100329 u64 rx_packets;
330 u64 rx_bytes;
331 u64 tx_packets;
332 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300333};
334
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200335struct mvneta_pcpu_port {
336 /* Pointer to the shared port */
337 struct mvneta_port *pp;
338
339 /* Pointer to the CPU-local NAPI struct */
340 struct napi_struct napi;
341
342 /* Cause of the previous interrupt */
343 u32 cause_rx_tx;
344};
345
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300346struct mvneta_port {
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200347 struct mvneta_pcpu_port __percpu *ports;
348 struct mvneta_pcpu_stats __percpu *stats;
349
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300350 int pkt_size;
willy tarreau8ec2cd42014-01-16 08:20:16 +0100351 unsigned int frag_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300352 void __iomem *base;
353 struct mvneta_rx_queue *rxqs;
354 struct mvneta_tx_queue *txqs;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300355 struct net_device *dev;
Maxime Ripardf8642882015-09-25 18:09:38 +0200356 struct notifier_block cpu_notifier;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300357
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300358 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100359 struct clk *clk;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300360 u8 mcast_count[256];
361 u16 tx_ring_size;
362 u16 rx_ring_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300363
364 struct mii_bus *mii_bus;
365 struct phy_device *phy_dev;
366 phy_interface_t phy_interface;
367 struct device_node *phy_node;
368 unsigned int link;
369 unsigned int duplex;
370 unsigned int speed;
Simon Guinotb65657f2015-06-30 16:20:22 +0200371 unsigned int tx_csum_limit;
Stas Sergeev0c0744f2015-12-02 20:35:11 +0300372 unsigned int use_inband_status:1;
Russell King9b0cdef2015-10-22 18:37:30 +0100373
374 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300375};
376
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100377/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300378 * layout of the transmit and reception DMA descriptors, and their
379 * layout is therefore defined by the hardware design
380 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200381
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300382#define MVNETA_TX_L3_OFF_SHIFT 0
383#define MVNETA_TX_IP_HLEN_SHIFT 8
384#define MVNETA_TX_L4_UDP BIT(16)
385#define MVNETA_TX_L3_IP6 BIT(17)
386#define MVNETA_TXD_IP_CSUM BIT(18)
387#define MVNETA_TXD_Z_PAD BIT(19)
388#define MVNETA_TXD_L_DESC BIT(20)
389#define MVNETA_TXD_F_DESC BIT(21)
390#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
391 MVNETA_TXD_L_DESC | \
392 MVNETA_TXD_F_DESC)
393#define MVNETA_TX_L4_CSUM_FULL BIT(30)
394#define MVNETA_TX_L4_CSUM_NOT BIT(31)
395
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300396#define MVNETA_RXD_ERR_CRC 0x0
397#define MVNETA_RXD_ERR_SUMMARY BIT(16)
398#define MVNETA_RXD_ERR_OVERRUN BIT(17)
399#define MVNETA_RXD_ERR_LEN BIT(18)
400#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
401#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
402#define MVNETA_RXD_L3_IP4 BIT(25)
403#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
404#define MVNETA_RXD_L4_CSUM_OK BIT(30)
405
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200406#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200407struct mvneta_tx_desc {
408 u32 command; /* Options used by HW for packet transmitting.*/
409 u16 reserverd1; /* csum_l4 (for future use) */
410 u16 data_size; /* Data size of transmitted packet in bytes */
411 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
412 u32 reserved2; /* hw_cmd - (for future use, PMT) */
413 u32 reserved3[4]; /* Reserved - (for future use) */
414};
415
416struct mvneta_rx_desc {
417 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300418 u16 reserved1; /* pnc_info - (for future use, PnC) */
419 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200420
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300421 u32 buf_phys_addr; /* Physical address of the buffer */
422 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200423
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300424 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
425 u16 reserved3; /* prefetch_cmd, for future use */
426 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200427
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300428 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
429 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
430};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200431#else
432struct mvneta_tx_desc {
433 u16 data_size; /* Data size of transmitted packet in bytes */
434 u16 reserverd1; /* csum_l4 (for future use) */
435 u32 command; /* Options used by HW for packet transmitting.*/
436 u32 reserved2; /* hw_cmd - (for future use, PMT) */
437 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
438 u32 reserved3[4]; /* Reserved - (for future use) */
439};
440
441struct mvneta_rx_desc {
442 u16 data_size; /* Size of received packet in bytes */
443 u16 reserved1; /* pnc_info - (for future use, PnC) */
444 u32 status; /* Info about received packet */
445
446 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
447 u32 buf_phys_addr; /* Physical address of the buffer */
448
449 u16 reserved4; /* csum_l4 - (for future use, PnC) */
450 u16 reserved3; /* prefetch_cmd, for future use */
451 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
452
453 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
454 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
455};
456#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300457
458struct mvneta_tx_queue {
459 /* Number of this TX queue, in the range 0-7 */
460 u8 id;
461
462 /* Number of TX DMA descriptors in the descriptor ring */
463 int size;
464
465 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100466 * descriptor ring
467 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300468 int count;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300469 int tx_stop_threshold;
470 int tx_wake_threshold;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300471
472 /* Array of transmitted skb */
473 struct sk_buff **tx_skb;
474
475 /* Index of last TX DMA descriptor that was inserted */
476 int txq_put_index;
477
478 /* Index of the TX DMA descriptor to be cleaned up */
479 int txq_get_index;
480
481 u32 done_pkts_coal;
482
483 /* Virtual address of the TX DMA descriptors array */
484 struct mvneta_tx_desc *descs;
485
486 /* DMA address of the TX DMA descriptors array */
487 dma_addr_t descs_phys;
488
489 /* Index of the last TX DMA descriptor */
490 int last_desc;
491
492 /* Index of the next TX DMA descriptor to process */
493 int next_desc_to_proc;
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300494
495 /* DMA buffers for TSO headers */
496 char *tso_hdrs;
497
498 /* DMA address of TSO headers */
499 dma_addr_t tso_hdrs_phys;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300500};
501
502struct mvneta_rx_queue {
503 /* rx queue number, in the range 0-7 */
504 u8 id;
505
506 /* num of rx descriptors in the rx descriptor ring */
507 int size;
508
509 /* counter of times when mvneta_refill() failed */
510 int missed;
511
512 u32 pkts_coal;
513 u32 time_coal;
514
515 /* Virtual address of the RX DMA descriptors array */
516 struct mvneta_rx_desc *descs;
517
518 /* DMA address of the RX DMA descriptors array */
519 dma_addr_t descs_phys;
520
521 /* Index of the last RX DMA descriptor */
522 int last_desc;
523
524 /* Index of the next RX DMA descriptor to process */
525 int next_desc_to_proc;
526};
527
Ezequiel Garciaedadb7f2014-05-22 20:07:01 -0300528/* The hardware supports eight (8) rx queues, but we are only allowing
529 * the first one to be used. Therefore, let's just allocate one queue.
530 */
Maxime Ripardd8936652015-09-25 18:09:37 +0200531static int rxq_number = 8;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300532static int txq_number = 8;
533
534static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300535
willy tarreauf19fadf2014-01-16 08:20:17 +0100536static int rx_copybreak __read_mostly = 256;
537
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300538#define MVNETA_DRIVER_NAME "mvneta"
539#define MVNETA_DRIVER_VERSION "1.0"
540
541/* Utility/helper methods */
542
543/* Write helper method */
544static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
545{
546 writel(data, pp->base + offset);
547}
548
549/* Read helper method */
550static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
551{
552 return readl(pp->base + offset);
553}
554
555/* Increment txq get counter */
556static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
557{
558 txq->txq_get_index++;
559 if (txq->txq_get_index == txq->size)
560 txq->txq_get_index = 0;
561}
562
563/* Increment txq put counter */
564static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
565{
566 txq->txq_put_index++;
567 if (txq->txq_put_index == txq->size)
568 txq->txq_put_index = 0;
569}
570
571
572/* Clear all MIB counters */
573static void mvneta_mib_counters_clear(struct mvneta_port *pp)
574{
575 int i;
576 u32 dummy;
577
578 /* Perform dummy reads from MIB counters */
579 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
580 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
Andrew Lunne4839112015-10-22 18:37:36 +0100581 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
582 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300583}
584
585/* Get System Network Statistics */
586struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
587 struct rtnl_link_stats64 *stats)
588{
589 struct mvneta_port *pp = netdev_priv(dev);
590 unsigned int start;
willy tarreau74c41b02014-01-16 08:20:08 +0100591 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300592
willy tarreau74c41b02014-01-16 08:20:08 +0100593 for_each_possible_cpu(cpu) {
594 struct mvneta_pcpu_stats *cpu_stats;
595 u64 rx_packets;
596 u64 rx_bytes;
597 u64 tx_packets;
598 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300599
willy tarreau74c41b02014-01-16 08:20:08 +0100600 cpu_stats = per_cpu_ptr(pp->stats, cpu);
601 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700602 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
willy tarreau74c41b02014-01-16 08:20:08 +0100603 rx_packets = cpu_stats->rx_packets;
604 rx_bytes = cpu_stats->rx_bytes;
605 tx_packets = cpu_stats->tx_packets;
606 tx_bytes = cpu_stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700607 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300608
willy tarreau74c41b02014-01-16 08:20:08 +0100609 stats->rx_packets += rx_packets;
610 stats->rx_bytes += rx_bytes;
611 stats->tx_packets += tx_packets;
612 stats->tx_bytes += tx_bytes;
613 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300614
615 stats->rx_errors = dev->stats.rx_errors;
616 stats->rx_dropped = dev->stats.rx_dropped;
617
618 stats->tx_dropped = dev->stats.tx_dropped;
619
620 return stats;
621}
622
623/* Rx descriptors helper methods */
624
willy tarreau54282132014-01-16 08:20:14 +0100625/* Checks whether the RX descriptor having this status is both the first
626 * and the last descriptor for the RX packet. Each RX packet is currently
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300627 * received through a single RX descriptor, so not having each RX
628 * descriptor with its first and last bits set is an error
629 */
willy tarreau54282132014-01-16 08:20:14 +0100630static int mvneta_rxq_desc_is_first_last(u32 status)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300631{
willy tarreau54282132014-01-16 08:20:14 +0100632 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300633 MVNETA_RXD_FIRST_LAST_DESC;
634}
635
636/* Add number of descriptors ready to receive new packets */
637static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
638 struct mvneta_rx_queue *rxq,
639 int ndescs)
640{
641 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100642 * be added at once
643 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300644 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
645 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
646 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
647 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
648 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
649 }
650
651 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
652 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
653}
654
655/* Get number of RX descriptors occupied by received packets */
656static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
657 struct mvneta_rx_queue *rxq)
658{
659 u32 val;
660
661 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
662 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
663}
664
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100665/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300666 * from mvneta_rxq_drop_pkts().
667 */
668static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
669 struct mvneta_rx_queue *rxq,
670 int rx_done, int rx_filled)
671{
672 u32 val;
673
674 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
675 val = rx_done |
676 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
677 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
678 return;
679 }
680
681 /* Only 255 descriptors can be added at once */
682 while ((rx_done > 0) || (rx_filled > 0)) {
683 if (rx_done <= 0xff) {
684 val = rx_done;
685 rx_done = 0;
686 } else {
687 val = 0xff;
688 rx_done -= 0xff;
689 }
690 if (rx_filled <= 0xff) {
691 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
692 rx_filled = 0;
693 } else {
694 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
695 rx_filled -= 0xff;
696 }
697 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
698 }
699}
700
701/* Get pointer to next RX descriptor to be processed by SW */
702static struct mvneta_rx_desc *
703mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
704{
705 int rx_desc = rxq->next_desc_to_proc;
706
707 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
willy tarreau34e41792014-01-16 08:20:15 +0100708 prefetch(rxq->descs + rxq->next_desc_to_proc);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300709 return rxq->descs + rx_desc;
710}
711
712/* Change maximum receive size of the port. */
713static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
714{
715 u32 val;
716
717 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
718 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
719 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
720 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
721 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
722}
723
724
725/* Set rx queue offset */
726static void mvneta_rxq_offset_set(struct mvneta_port *pp,
727 struct mvneta_rx_queue *rxq,
728 int offset)
729{
730 u32 val;
731
732 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
733 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
734
735 /* Offset is in */
736 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
737 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
738}
739
740
741/* Tx descriptors helper methods */
742
743/* Update HW with number of TX descriptors to be sent */
744static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
745 struct mvneta_tx_queue *txq,
746 int pend_desc)
747{
748 u32 val;
749
750 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100751 * process TX desriptors in quanta less than 256
752 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300753 val = pend_desc;
754 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
755}
756
757/* Get pointer to next TX descriptor to be processed (send) by HW */
758static struct mvneta_tx_desc *
759mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
760{
761 int tx_desc = txq->next_desc_to_proc;
762
763 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
764 return txq->descs + tx_desc;
765}
766
767/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100768 * mapping failures in the TX path.
769 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300770static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
771{
772 if (txq->next_desc_to_proc == 0)
773 txq->next_desc_to_proc = txq->last_desc - 1;
774 else
775 txq->next_desc_to_proc--;
776}
777
778/* Set rxq buf size */
779static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
780 struct mvneta_rx_queue *rxq,
781 int buf_size)
782{
783 u32 val;
784
785 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
786
787 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
788 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
789
790 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
791}
792
793/* Disable buffer management (BM) */
794static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
795 struct mvneta_rx_queue *rxq)
796{
797 u32 val;
798
799 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
800 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
801 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
802}
803
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300804/* Start the Ethernet port RX and TX activity */
805static void mvneta_port_up(struct mvneta_port *pp)
806{
807 int queue;
808 u32 q_map;
809
810 /* Enable all initialized TXs. */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300811 q_map = 0;
812 for (queue = 0; queue < txq_number; queue++) {
813 struct mvneta_tx_queue *txq = &pp->txqs[queue];
814 if (txq->descs != NULL)
815 q_map |= (1 << queue);
816 }
817 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
818
819 /* Enable all initialized RXQs. */
Maxime Ripardd8936652015-09-25 18:09:37 +0200820 mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300821}
822
823/* Stop the Ethernet port activity */
824static void mvneta_port_down(struct mvneta_port *pp)
825{
826 u32 val;
827 int count;
828
829 /* Stop Rx port activity. Check port Rx activity. */
830 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
831
832 /* Issue stop command for active channels only */
833 if (val != 0)
834 mvreg_write(pp, MVNETA_RXQ_CMD,
835 val << MVNETA_RXQ_DISABLE_SHIFT);
836
837 /* Wait for all Rx activity to terminate. */
838 count = 0;
839 do {
840 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
841 netdev_warn(pp->dev,
842 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
843 val);
844 break;
845 }
846 mdelay(1);
847
848 val = mvreg_read(pp, MVNETA_RXQ_CMD);
849 } while (val & 0xff);
850
851 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100852 * command for active channels only
853 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300854 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
855
856 if (val != 0)
857 mvreg_write(pp, MVNETA_TXQ_CMD,
858 (val << MVNETA_TXQ_DISABLE_SHIFT));
859
860 /* Wait for all Tx activity to terminate. */
861 count = 0;
862 do {
863 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
864 netdev_warn(pp->dev,
865 "TIMEOUT for TX stopped status=0x%08x\n",
866 val);
867 break;
868 }
869 mdelay(1);
870
871 /* Check TX Command reg that all Txqs are stopped */
872 val = mvreg_read(pp, MVNETA_TXQ_CMD);
873
874 } while (val & 0xff);
875
876 /* Double check to verify that TX FIFO is empty */
877 count = 0;
878 do {
879 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
880 netdev_warn(pp->dev,
881 "TX FIFO empty timeout status=0x08%x\n",
882 val);
883 break;
884 }
885 mdelay(1);
886
887 val = mvreg_read(pp, MVNETA_PORT_STATUS);
888 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
889 (val & MVNETA_TX_IN_PRGRS));
890
891 udelay(200);
892}
893
894/* Enable the port by setting the port enable bit of the MAC control register */
895static void mvneta_port_enable(struct mvneta_port *pp)
896{
897 u32 val;
898
899 /* Enable port */
900 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
901 val |= MVNETA_GMAC0_PORT_ENABLE;
902 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
903}
904
905/* Disable the port and wait for about 200 usec before retuning */
906static void mvneta_port_disable(struct mvneta_port *pp)
907{
908 u32 val;
909
910 /* Reset the Enable bit in the Serial Control Register */
911 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
912 val &= ~MVNETA_GMAC0_PORT_ENABLE;
913 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
914
915 udelay(200);
916}
917
918/* Multicast tables methods */
919
920/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
921static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
922{
923 int offset;
924 u32 val;
925
926 if (queue == -1) {
927 val = 0;
928 } else {
929 val = 0x1 | (queue << 1);
930 val |= (val << 24) | (val << 16) | (val << 8);
931 }
932
933 for (offset = 0; offset <= 0xc; offset += 4)
934 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
935}
936
937/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
938static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
939{
940 int offset;
941 u32 val;
942
943 if (queue == -1) {
944 val = 0;
945 } else {
946 val = 0x1 | (queue << 1);
947 val |= (val << 24) | (val << 16) | (val << 8);
948 }
949
950 for (offset = 0; offset <= 0xfc; offset += 4)
951 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
952
953}
954
955/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
956static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
957{
958 int offset;
959 u32 val;
960
961 if (queue == -1) {
962 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
963 val = 0;
964 } else {
965 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
966 val = 0x1 | (queue << 1);
967 val |= (val << 24) | (val << 16) | (val << 8);
968 }
969
970 for (offset = 0; offset <= 0xfc; offset += 4)
971 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
972}
973
Stas Sergeev0c0744f2015-12-02 20:35:11 +0300974static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
975{
976 u32 val;
977
978 if (enable) {
979 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
980 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
981 MVNETA_GMAC_FORCE_LINK_DOWN |
982 MVNETA_GMAC_AN_FLOW_CTRL_EN);
983 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
984 MVNETA_GMAC_AN_SPEED_EN |
985 MVNETA_GMAC_AN_DUPLEX_EN;
986 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
987
988 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
989 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
990 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
991
992 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
993 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
994 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
995 } else {
996 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
997 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
998 MVNETA_GMAC_AN_SPEED_EN |
999 MVNETA_GMAC_AN_DUPLEX_EN);
1000 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1001
1002 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1003 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1004 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1005
1006 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1007 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1008 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1009 }
1010}
1011
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001012/* This method sets defaults to the NETA port:
1013 * Clears interrupt Cause and Mask registers.
1014 * Clears all MAC tables.
1015 * Sets defaults to all registers.
1016 * Resets RX and TX descriptor rings.
1017 * Resets PHY.
1018 * This method can be called after mvneta_port_down() to return the port
1019 * settings to defaults.
1020 */
1021static void mvneta_defaults_set(struct mvneta_port *pp)
1022{
1023 int cpu;
1024 int queue;
1025 u32 val;
1026
1027 /* Clear all Cause registers */
1028 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1029 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1030 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1031
1032 /* Mask all interrupts */
1033 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1034 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1035 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1036 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1037
1038 /* Enable MBUS Retry bit16 */
1039 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1040
1041 /* Set CPU queue access map - all CPUs have access to all RX
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001042 * queues and to all TX queues
1043 */
Maxime Ripard2502d0e2015-09-25 18:09:35 +02001044 for_each_present_cpu(cpu)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001045 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
1046 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
1047 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
1048
1049 /* Reset RX and TX DMAs */
1050 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1051 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1052
1053 /* Disable Legacy WRR, Disable EJP, Release from reset */
1054 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1055 for (queue = 0; queue < txq_number; queue++) {
1056 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1057 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1058 }
1059
1060 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1061 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1062
1063 /* Set Port Acceleration Mode */
1064 val = MVNETA_ACC_MODE_EXT;
1065 mvreg_write(pp, MVNETA_ACC_MODE, val);
1066
1067 /* Update val of portCfg register accordingly with all RxQueue types */
1068 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
1069 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1070
1071 val = 0;
1072 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1073 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1074
1075 /* Build PORT_SDMA_CONFIG_REG */
1076 val = 0;
1077
1078 /* Default burst size */
1079 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1080 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001081 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001082
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001083#if defined(__BIG_ENDIAN)
1084 val |= MVNETA_DESC_SWAP;
1085#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001086
1087 /* Assign port SDMA configuration */
1088 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1089
Thomas Petazzoni71408602013-09-04 16:21:18 +02001090 /* Disable PHY polling in hardware, since we're using the
1091 * kernel phylib to do this.
1092 */
1093 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1094 val &= ~MVNETA_PHY_POLLING_ENABLE;
1095 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1096
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001097 mvneta_set_autoneg(pp, pp->use_inband_status);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001098 mvneta_set_ucast_table(pp, -1);
1099 mvneta_set_special_mcast_table(pp, -1);
1100 mvneta_set_other_mcast_table(pp, -1);
1101
1102 /* Set port interrupt enable register - default enable all */
1103 mvreg_write(pp, MVNETA_INTR_ENABLE,
1104 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1105 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
Andrew Lunne4839112015-10-22 18:37:36 +01001106
1107 mvneta_mib_counters_clear(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001108}
1109
1110/* Set max sizes for tx queues */
1111static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1112
1113{
1114 u32 val, size, mtu;
1115 int queue;
1116
1117 mtu = max_tx_size * 8;
1118 if (mtu > MVNETA_TX_MTU_MAX)
1119 mtu = MVNETA_TX_MTU_MAX;
1120
1121 /* Set MTU */
1122 val = mvreg_read(pp, MVNETA_TX_MTU);
1123 val &= ~MVNETA_TX_MTU_MAX;
1124 val |= mtu;
1125 mvreg_write(pp, MVNETA_TX_MTU, val);
1126
1127 /* TX token size and all TXQs token size must be larger that MTU */
1128 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1129
1130 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1131 if (size < mtu) {
1132 size = mtu;
1133 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1134 val |= size;
1135 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1136 }
1137 for (queue = 0; queue < txq_number; queue++) {
1138 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1139
1140 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1141 if (size < mtu) {
1142 size = mtu;
1143 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1144 val |= size;
1145 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1146 }
1147 }
1148}
1149
1150/* Set unicast address */
1151static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1152 int queue)
1153{
1154 unsigned int unicast_reg;
1155 unsigned int tbl_offset;
1156 unsigned int reg_offset;
1157
1158 /* Locate the Unicast table entry */
1159 last_nibble = (0xf & last_nibble);
1160
1161 /* offset from unicast tbl base */
1162 tbl_offset = (last_nibble / 4) * 4;
1163
1164 /* offset within the above reg */
1165 reg_offset = last_nibble % 4;
1166
1167 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1168
1169 if (queue == -1) {
1170 /* Clear accepts frame bit at specified unicast DA tbl entry */
1171 unicast_reg &= ~(0xff << (8 * reg_offset));
1172 } else {
1173 unicast_reg &= ~(0xff << (8 * reg_offset));
1174 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1175 }
1176
1177 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1178}
1179
1180/* Set mac address */
1181static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1182 int queue)
1183{
1184 unsigned int mac_h;
1185 unsigned int mac_l;
1186
1187 if (queue != -1) {
1188 mac_l = (addr[4] << 8) | (addr[5]);
1189 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1190 (addr[2] << 8) | (addr[3] << 0);
1191
1192 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1193 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1194 }
1195
1196 /* Accept frames of this address */
1197 mvneta_set_ucast_addr(pp, addr[5], queue);
1198}
1199
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001200/* Set the number of packets that will be received before RX interrupt
1201 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001202 */
1203static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1204 struct mvneta_rx_queue *rxq, u32 value)
1205{
1206 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1207 value | MVNETA_RXQ_NON_OCCUPIED(0));
1208 rxq->pkts_coal = value;
1209}
1210
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001211/* Set the time delay in usec before RX interrupt will be generated by
1212 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001213 */
1214static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1215 struct mvneta_rx_queue *rxq, u32 value)
1216{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001217 u32 val;
1218 unsigned long clk_rate;
1219
1220 clk_rate = clk_get_rate(pp->clk);
1221 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001222
1223 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1224 rxq->time_coal = value;
1225}
1226
1227/* Set threshold for TX_DONE pkts coalescing */
1228static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1229 struct mvneta_tx_queue *txq, u32 value)
1230{
1231 u32 val;
1232
1233 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1234
1235 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1236 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1237
1238 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1239
1240 txq->done_pkts_coal = value;
1241}
1242
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001243/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1244static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1245 u32 phys_addr, u32 cookie)
1246{
1247 rx_desc->buf_cookie = cookie;
1248 rx_desc->buf_phys_addr = phys_addr;
1249}
1250
1251/* Decrement sent descriptors counter */
1252static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1253 struct mvneta_tx_queue *txq,
1254 int sent_desc)
1255{
1256 u32 val;
1257
1258 /* Only 255 TX descriptors can be updated at once */
1259 while (sent_desc > 0xff) {
1260 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1261 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1262 sent_desc = sent_desc - 0xff;
1263 }
1264
1265 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1266 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1267}
1268
1269/* Get number of TX descriptors already sent by HW */
1270static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1271 struct mvneta_tx_queue *txq)
1272{
1273 u32 val;
1274 int sent_desc;
1275
1276 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1277 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1278 MVNETA_TXQ_SENT_DESC_SHIFT;
1279
1280 return sent_desc;
1281}
1282
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001283/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001284 * The number of sent descriptors is returned.
1285 */
1286static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1287 struct mvneta_tx_queue *txq)
1288{
1289 int sent_desc;
1290
1291 /* Get number of sent descriptors */
1292 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1293
1294 /* Decrement sent descriptors counter */
1295 if (sent_desc)
1296 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1297
1298 return sent_desc;
1299}
1300
1301/* Set TXQ descriptors fields relevant for CSUM calculation */
1302static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1303 int ip_hdr_len, int l4_proto)
1304{
1305 u32 command;
1306
1307 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001308 * G_L4_chk, L4_type; required only for checksum
1309 * calculation
1310 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001311 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1312 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1313
Thomas Fitzsimmons0a198582014-07-08 19:44:07 -04001314 if (l3_proto == htons(ETH_P_IP))
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001315 command |= MVNETA_TXD_IP_CSUM;
1316 else
1317 command |= MVNETA_TX_L3_IP6;
1318
1319 if (l4_proto == IPPROTO_TCP)
1320 command |= MVNETA_TX_L4_CSUM_FULL;
1321 else if (l4_proto == IPPROTO_UDP)
1322 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1323 else
1324 command |= MVNETA_TX_L4_CSUM_NOT;
1325
1326 return command;
1327}
1328
1329
1330/* Display more error info */
1331static void mvneta_rx_error(struct mvneta_port *pp,
1332 struct mvneta_rx_desc *rx_desc)
1333{
1334 u32 status = rx_desc->status;
1335
willy tarreau54282132014-01-16 08:20:14 +01001336 if (!mvneta_rxq_desc_is_first_last(status)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001337 netdev_err(pp->dev,
1338 "bad rx status %08x (buffer oversize), size=%d\n",
willy tarreau54282132014-01-16 08:20:14 +01001339 status, rx_desc->data_size);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001340 return;
1341 }
1342
1343 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1344 case MVNETA_RXD_ERR_CRC:
1345 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1346 status, rx_desc->data_size);
1347 break;
1348 case MVNETA_RXD_ERR_OVERRUN:
1349 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1350 status, rx_desc->data_size);
1351 break;
1352 case MVNETA_RXD_ERR_LEN:
1353 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1354 status, rx_desc->data_size);
1355 break;
1356 case MVNETA_RXD_ERR_RESOURCE:
1357 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1358 status, rx_desc->data_size);
1359 break;
1360 }
1361}
1362
willy tarreau54282132014-01-16 08:20:14 +01001363/* Handle RX checksum offload based on the descriptor's status */
1364static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001365 struct sk_buff *skb)
1366{
willy tarreau54282132014-01-16 08:20:14 +01001367 if ((status & MVNETA_RXD_L3_IP4) &&
1368 (status & MVNETA_RXD_L4_CSUM_OK)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001369 skb->csum = 0;
1370 skb->ip_summed = CHECKSUM_UNNECESSARY;
1371 return;
1372 }
1373
1374 skb->ip_summed = CHECKSUM_NONE;
1375}
1376
willy tarreau6c498972014-01-16 08:20:12 +01001377/* Return tx queue pointer (find last set bit) according to <cause> returned
1378 * form tx_done reg. <cause> must not be null. The return value is always a
1379 * valid queue for matching the first one found in <cause>.
1380 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001381static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1382 u32 cause)
1383{
1384 int queue = fls(cause) - 1;
1385
willy tarreau6c498972014-01-16 08:20:12 +01001386 return &pp->txqs[queue];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001387}
1388
1389/* Free tx queue skbuffs */
1390static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1391 struct mvneta_tx_queue *txq, int num)
1392{
1393 int i;
1394
1395 for (i = 0; i < num; i++) {
1396 struct mvneta_tx_desc *tx_desc = txq->descs +
1397 txq->txq_get_index;
1398 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1399
1400 mvneta_txq_inc_get(txq);
1401
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001402 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1403 dma_unmap_single(pp->dev->dev.parent,
1404 tx_desc->buf_phys_addr,
1405 tx_desc->data_size, DMA_TO_DEVICE);
Ezequiel Garciaba7e46e2014-05-30 13:40:06 -03001406 if (!skb)
1407 continue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001408 dev_kfree_skb_any(skb);
1409 }
1410}
1411
1412/* Handle end of transmission */
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001413static void mvneta_txq_done(struct mvneta_port *pp,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001414 struct mvneta_tx_queue *txq)
1415{
1416 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1417 int tx_done;
1418
1419 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001420 if (!tx_done)
1421 return;
1422
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001423 mvneta_txq_bufs_free(pp, txq, tx_done);
1424
1425 txq->count -= tx_done;
1426
1427 if (netif_tx_queue_stopped(nq)) {
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001428 if (txq->count <= txq->tx_wake_threshold)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001429 netif_tx_wake_queue(nq);
1430 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001431}
1432
willy tarreau8ec2cd42014-01-16 08:20:16 +01001433static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1434{
1435 if (likely(pp->frag_size <= PAGE_SIZE))
1436 return netdev_alloc_frag(pp->frag_size);
1437 else
1438 return kmalloc(pp->frag_size, GFP_ATOMIC);
1439}
1440
1441static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1442{
1443 if (likely(pp->frag_size <= PAGE_SIZE))
Alexander Duyck13dc0d22015-05-06 21:12:14 -07001444 skb_free_frag(data);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001445 else
1446 kfree(data);
1447}
1448
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001449/* Refill processing */
1450static int mvneta_rx_refill(struct mvneta_port *pp,
1451 struct mvneta_rx_desc *rx_desc)
1452
1453{
1454 dma_addr_t phys_addr;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001455 void *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001456
willy tarreau8ec2cd42014-01-16 08:20:16 +01001457 data = mvneta_frag_alloc(pp);
1458 if (!data)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001459 return -ENOMEM;
1460
willy tarreau8ec2cd42014-01-16 08:20:16 +01001461 phys_addr = dma_map_single(pp->dev->dev.parent, data,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001462 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1463 DMA_FROM_DEVICE);
1464 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
willy tarreau8ec2cd42014-01-16 08:20:16 +01001465 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001466 return -ENOMEM;
1467 }
1468
willy tarreau8ec2cd42014-01-16 08:20:16 +01001469 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001470 return 0;
1471}
1472
1473/* Handle tx checksum */
1474static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1475{
1476 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1477 int ip_hdr_len = 0;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001478 __be16 l3_proto = vlan_get_protocol(skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001479 u8 l4_proto;
1480
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001481 if (l3_proto == htons(ETH_P_IP)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001482 struct iphdr *ip4h = ip_hdr(skb);
1483
1484 /* Calculate IPv4 checksum and L4 checksum */
1485 ip_hdr_len = ip4h->ihl;
1486 l4_proto = ip4h->protocol;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001487 } else if (l3_proto == htons(ETH_P_IPV6)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001488 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1489
1490 /* Read l4_protocol from one of IPv6 extra headers */
1491 if (skb_network_header_len(skb) > 0)
1492 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1493 l4_proto = ip6h->nexthdr;
1494 } else
1495 return MVNETA_TX_L4_CSUM_NOT;
1496
1497 return mvneta_txq_desc_csum(skb_network_offset(skb),
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001498 l3_proto, ip_hdr_len, l4_proto);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001499 }
1500
1501 return MVNETA_TX_L4_CSUM_NOT;
1502}
1503
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001504/* Drop packets received by the RXQ and free buffers */
1505static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1506 struct mvneta_rx_queue *rxq)
1507{
1508 int rx_done, i;
1509
1510 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1511 for (i = 0; i < rxq->size; i++) {
1512 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001513 void *data = (void *)rx_desc->buf_cookie;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001514
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001515 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001516 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Justin Maggard8c94ddb2015-11-09 17:21:05 -08001517 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001518 }
1519
1520 if (rx_done)
1521 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1522}
1523
1524/* Main rx processing */
1525static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1526 struct mvneta_rx_queue *rxq)
1527{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001528 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001529 struct net_device *dev = pp->dev;
Simon Guinota84e3282015-07-19 13:00:53 +02001530 int rx_done;
willy tarreaudc4277d2014-01-16 08:20:07 +01001531 u32 rcvd_pkts = 0;
1532 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001533
1534 /* Get number of received packets */
1535 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1536
1537 if (rx_todo > rx_done)
1538 rx_todo = rx_done;
1539
1540 rx_done = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001541
1542 /* Fairness NAPI loop */
1543 while (rx_done < rx_todo) {
1544 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1545 struct sk_buff *skb;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001546 unsigned char *data;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001547 dma_addr_t phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001548 u32 rx_status;
1549 int rx_bytes, err;
1550
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001551 rx_done++;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001552 rx_status = rx_desc->status;
willy tarreauf19fadf2014-01-16 08:20:17 +01001553 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001554 data = (unsigned char *)rx_desc->buf_cookie;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001555 phys_addr = rx_desc->buf_phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001556
willy tarreau54282132014-01-16 08:20:14 +01001557 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
willy tarreauf19fadf2014-01-16 08:20:17 +01001558 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1559 err_drop_frame:
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001560 dev->stats.rx_errors++;
1561 mvneta_rx_error(pp, rx_desc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001562 /* leave the descriptor untouched */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001563 continue;
1564 }
1565
willy tarreauf19fadf2014-01-16 08:20:17 +01001566 if (rx_bytes <= rx_copybreak) {
1567 /* better copy a small frame and not unmap the DMA region */
1568 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1569 if (unlikely(!skb))
1570 goto err_drop_frame;
1571
1572 dma_sync_single_range_for_cpu(dev->dev.parent,
1573 rx_desc->buf_phys_addr,
1574 MVNETA_MH_SIZE + NET_SKB_PAD,
1575 rx_bytes,
1576 DMA_FROM_DEVICE);
1577 memcpy(skb_put(skb, rx_bytes),
1578 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1579 rx_bytes);
1580
1581 skb->protocol = eth_type_trans(skb, dev);
1582 mvneta_rx_csum(pp, rx_status, skb);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001583 napi_gro_receive(&port->napi, skb);
willy tarreauf19fadf2014-01-16 08:20:17 +01001584
1585 rcvd_pkts++;
1586 rcvd_bytes += rx_bytes;
1587
1588 /* leave the descriptor and buffer untouched */
1589 continue;
1590 }
1591
Simon Guinota84e3282015-07-19 13:00:53 +02001592 /* Refill processing */
1593 err = mvneta_rx_refill(pp, rx_desc);
1594 if (err) {
1595 netdev_err(dev, "Linux processing - Can't refill\n");
1596 rxq->missed++;
1597 goto err_drop_frame;
1598 }
1599
willy tarreauf19fadf2014-01-16 08:20:17 +01001600 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1601 if (!skb)
1602 goto err_drop_frame;
1603
Simon Guinotdaf158d2015-09-15 22:41:21 +02001604 dma_unmap_single(dev->dev.parent, phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001605 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001606
willy tarreaudc4277d2014-01-16 08:20:07 +01001607 rcvd_pkts++;
1608 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001609
1610 /* Linux processing */
willy tarreau8ec2cd42014-01-16 08:20:16 +01001611 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001612 skb_put(skb, rx_bytes);
1613
1614 skb->protocol = eth_type_trans(skb, dev);
1615
willy tarreau54282132014-01-16 08:20:14 +01001616 mvneta_rx_csum(pp, rx_status, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001617
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001618 napi_gro_receive(&port->napi, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001619 }
1620
willy tarreaudc4277d2014-01-16 08:20:07 +01001621 if (rcvd_pkts) {
willy tarreau74c41b02014-01-16 08:20:08 +01001622 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1623
1624 u64_stats_update_begin(&stats->syncp);
1625 stats->rx_packets += rcvd_pkts;
1626 stats->rx_bytes += rcvd_bytes;
1627 u64_stats_update_end(&stats->syncp);
willy tarreaudc4277d2014-01-16 08:20:07 +01001628 }
1629
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001630 /* Update rxq management counters */
Simon Guinota84e3282015-07-19 13:00:53 +02001631 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001632
1633 return rx_done;
1634}
1635
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001636static inline void
1637mvneta_tso_put_hdr(struct sk_buff *skb,
1638 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1639{
1640 struct mvneta_tx_desc *tx_desc;
1641 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1642
1643 txq->tx_skb[txq->txq_put_index] = NULL;
1644 tx_desc = mvneta_txq_next_desc_get(txq);
1645 tx_desc->data_size = hdr_len;
1646 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1647 tx_desc->command |= MVNETA_TXD_F_DESC;
1648 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1649 txq->txq_put_index * TSO_HEADER_SIZE;
1650 mvneta_txq_inc_put(txq);
1651}
1652
1653static inline int
1654mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1655 struct sk_buff *skb, char *data, int size,
1656 bool last_tcp, bool is_last)
1657{
1658 struct mvneta_tx_desc *tx_desc;
1659
1660 tx_desc = mvneta_txq_next_desc_get(txq);
1661 tx_desc->data_size = size;
1662 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1663 size, DMA_TO_DEVICE);
1664 if (unlikely(dma_mapping_error(dev->dev.parent,
1665 tx_desc->buf_phys_addr))) {
1666 mvneta_txq_desc_put(txq);
1667 return -ENOMEM;
1668 }
1669
1670 tx_desc->command = 0;
1671 txq->tx_skb[txq->txq_put_index] = NULL;
1672
1673 if (last_tcp) {
1674 /* last descriptor in the TCP packet */
1675 tx_desc->command = MVNETA_TXD_L_DESC;
1676
1677 /* last descriptor in SKB */
1678 if (is_last)
1679 txq->tx_skb[txq->txq_put_index] = skb;
1680 }
1681 mvneta_txq_inc_put(txq);
1682 return 0;
1683}
1684
1685static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1686 struct mvneta_tx_queue *txq)
1687{
1688 int total_len, data_left;
1689 int desc_count = 0;
1690 struct mvneta_port *pp = netdev_priv(dev);
1691 struct tso_t tso;
1692 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1693 int i;
1694
1695 /* Count needed descriptors */
1696 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1697 return 0;
1698
1699 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1700 pr_info("*** Is this even possible???!?!?\n");
1701 return 0;
1702 }
1703
1704 /* Initialize the TSO handler, and prepare the first payload */
1705 tso_start(skb, &tso);
1706
1707 total_len = skb->len - hdr_len;
1708 while (total_len > 0) {
1709 char *hdr;
1710
1711 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1712 total_len -= data_left;
1713 desc_count++;
1714
1715 /* prepare packet headers: MAC + IP + TCP */
1716 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1717 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1718
1719 mvneta_tso_put_hdr(skb, pp, txq);
1720
1721 while (data_left > 0) {
1722 int size;
1723 desc_count++;
1724
1725 size = min_t(int, tso.size, data_left);
1726
1727 if (mvneta_tso_put_data(dev, txq, skb,
1728 tso.data, size,
1729 size == data_left,
1730 total_len == 0))
1731 goto err_release;
1732 data_left -= size;
1733
1734 tso_build_data(skb, &tso, size);
1735 }
1736 }
1737
1738 return desc_count;
1739
1740err_release:
1741 /* Release all used data descriptors; header descriptors must not
1742 * be DMA-unmapped.
1743 */
1744 for (i = desc_count - 1; i >= 0; i--) {
1745 struct mvneta_tx_desc *tx_desc = txq->descs + i;
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001746 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001747 dma_unmap_single(pp->dev->dev.parent,
1748 tx_desc->buf_phys_addr,
1749 tx_desc->data_size,
1750 DMA_TO_DEVICE);
1751 mvneta_txq_desc_put(txq);
1752 }
1753 return 0;
1754}
1755
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001756/* Handle tx fragmentation processing */
1757static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1758 struct mvneta_tx_queue *txq)
1759{
1760 struct mvneta_tx_desc *tx_desc;
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001761 int i, nr_frags = skb_shinfo(skb)->nr_frags;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001762
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001763 for (i = 0; i < nr_frags; i++) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001764 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1765 void *addr = page_address(frag->page.p) + frag->page_offset;
1766
1767 tx_desc = mvneta_txq_next_desc_get(txq);
1768 tx_desc->data_size = frag->size;
1769
1770 tx_desc->buf_phys_addr =
1771 dma_map_single(pp->dev->dev.parent, addr,
1772 tx_desc->data_size, DMA_TO_DEVICE);
1773
1774 if (dma_mapping_error(pp->dev->dev.parent,
1775 tx_desc->buf_phys_addr)) {
1776 mvneta_txq_desc_put(txq);
1777 goto error;
1778 }
1779
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001780 if (i == nr_frags - 1) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001781 /* Last descriptor */
1782 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001783 txq->tx_skb[txq->txq_put_index] = skb;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001784 } else {
1785 /* Descriptor in the middle: Not First, Not Last */
1786 tx_desc->command = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001787 txq->tx_skb[txq->txq_put_index] = NULL;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001788 }
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001789 mvneta_txq_inc_put(txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001790 }
1791
1792 return 0;
1793
1794error:
1795 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001796 * this packet, as well as the corresponding DMA mappings
1797 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001798 for (i = i - 1; i >= 0; i--) {
1799 tx_desc = txq->descs + i;
1800 dma_unmap_single(pp->dev->dev.parent,
1801 tx_desc->buf_phys_addr,
1802 tx_desc->data_size,
1803 DMA_TO_DEVICE);
1804 mvneta_txq_desc_put(txq);
1805 }
1806
1807 return -ENOMEM;
1808}
1809
1810/* Main tx processing */
1811static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1812{
1813 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02001814 u16 txq_id = skb_get_queue_mapping(skb);
1815 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001816 struct mvneta_tx_desc *tx_desc;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001817 int len = skb->len;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001818 int frags = 0;
1819 u32 tx_cmd;
1820
1821 if (!netif_running(dev))
1822 goto out;
1823
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001824 if (skb_is_gso(skb)) {
1825 frags = mvneta_tx_tso(skb, dev, txq);
1826 goto out;
1827 }
1828
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001829 frags = skb_shinfo(skb)->nr_frags + 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001830
1831 /* Get a descriptor for the first part of the packet */
1832 tx_desc = mvneta_txq_next_desc_get(txq);
1833
1834 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1835
1836 tx_desc->data_size = skb_headlen(skb);
1837
1838 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1839 tx_desc->data_size,
1840 DMA_TO_DEVICE);
1841 if (unlikely(dma_mapping_error(dev->dev.parent,
1842 tx_desc->buf_phys_addr))) {
1843 mvneta_txq_desc_put(txq);
1844 frags = 0;
1845 goto out;
1846 }
1847
1848 if (frags == 1) {
1849 /* First and Last descriptor */
1850 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1851 tx_desc->command = tx_cmd;
1852 txq->tx_skb[txq->txq_put_index] = skb;
1853 mvneta_txq_inc_put(txq);
1854 } else {
1855 /* First but not Last */
1856 tx_cmd |= MVNETA_TXD_F_DESC;
1857 txq->tx_skb[txq->txq_put_index] = NULL;
1858 mvneta_txq_inc_put(txq);
1859 tx_desc->command = tx_cmd;
1860 /* Continue with other skb fragments */
1861 if (mvneta_tx_frag_process(pp, skb, txq)) {
1862 dma_unmap_single(dev->dev.parent,
1863 tx_desc->buf_phys_addr,
1864 tx_desc->data_size,
1865 DMA_TO_DEVICE);
1866 mvneta_txq_desc_put(txq);
1867 frags = 0;
1868 goto out;
1869 }
1870 }
1871
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001872out:
1873 if (frags > 0) {
willy tarreau74c41b02014-01-16 08:20:08 +01001874 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001875 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1876
1877 txq->count += frags;
1878 mvneta_txq_pend_desc_add(pp, txq, frags);
1879
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001880 if (txq->count >= txq->tx_stop_threshold)
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001881 netif_tx_stop_queue(nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001882
willy tarreau74c41b02014-01-16 08:20:08 +01001883 u64_stats_update_begin(&stats->syncp);
1884 stats->tx_packets++;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001885 stats->tx_bytes += len;
willy tarreau74c41b02014-01-16 08:20:08 +01001886 u64_stats_update_end(&stats->syncp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001887 } else {
1888 dev->stats.tx_dropped++;
1889 dev_kfree_skb_any(skb);
1890 }
1891
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001892 return NETDEV_TX_OK;
1893}
1894
1895
1896/* Free tx resources, when resetting a port */
1897static void mvneta_txq_done_force(struct mvneta_port *pp,
1898 struct mvneta_tx_queue *txq)
1899
1900{
1901 int tx_done = txq->count;
1902
1903 mvneta_txq_bufs_free(pp, txq, tx_done);
1904
1905 /* reset txq */
1906 txq->count = 0;
1907 txq->txq_put_index = 0;
1908 txq->txq_get_index = 0;
1909}
1910
willy tarreau6c498972014-01-16 08:20:12 +01001911/* Handle tx done - called in softirq context. The <cause_tx_done> argument
1912 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1913 */
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001914static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001915{
1916 struct mvneta_tx_queue *txq;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001917 struct netdev_queue *nq;
1918
willy tarreau6c498972014-01-16 08:20:12 +01001919 while (cause_tx_done) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001920 txq = mvneta_tx_done_policy(pp, cause_tx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001921
1922 nq = netdev_get_tx_queue(pp->dev, txq->id);
1923 __netif_tx_lock(nq, smp_processor_id());
1924
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001925 if (txq->count)
1926 mvneta_txq_done(pp, txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001927
1928 __netif_tx_unlock(nq);
1929 cause_tx_done &= ~((1 << txq->id));
1930 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001931}
1932
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001933/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001934 * according to hw spec, different than generic crc8 algorithm
1935 */
1936static int mvneta_addr_crc(unsigned char *addr)
1937{
1938 int crc = 0;
1939 int i;
1940
1941 for (i = 0; i < ETH_ALEN; i++) {
1942 int j;
1943
1944 crc = (crc ^ addr[i]) << 8;
1945 for (j = 7; j >= 0; j--) {
1946 if (crc & (0x100 << j))
1947 crc ^= 0x107 << j;
1948 }
1949 }
1950
1951 return crc;
1952}
1953
1954/* This method controls the net device special MAC multicast support.
1955 * The Special Multicast Table for MAC addresses supports MAC of the form
1956 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1957 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1958 * Table entries in the DA-Filter table. This method set the Special
1959 * Multicast Table appropriate entry.
1960 */
1961static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1962 unsigned char last_byte,
1963 int queue)
1964{
1965 unsigned int smc_table_reg;
1966 unsigned int tbl_offset;
1967 unsigned int reg_offset;
1968
1969 /* Register offset from SMC table base */
1970 tbl_offset = (last_byte / 4);
1971 /* Entry offset within the above reg */
1972 reg_offset = last_byte % 4;
1973
1974 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1975 + tbl_offset * 4));
1976
1977 if (queue == -1)
1978 smc_table_reg &= ~(0xff << (8 * reg_offset));
1979 else {
1980 smc_table_reg &= ~(0xff << (8 * reg_offset));
1981 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1982 }
1983
1984 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1985 smc_table_reg);
1986}
1987
1988/* This method controls the network device Other MAC multicast support.
1989 * The Other Multicast Table is used for multicast of another type.
1990 * A CRC-8 is used as an index to the Other Multicast Table entries
1991 * in the DA-Filter table.
1992 * The method gets the CRC-8 value from the calling routine and
1993 * sets the Other Multicast Table appropriate entry according to the
1994 * specified CRC-8 .
1995 */
1996static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1997 unsigned char crc8,
1998 int queue)
1999{
2000 unsigned int omc_table_reg;
2001 unsigned int tbl_offset;
2002 unsigned int reg_offset;
2003
2004 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2005 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2006
2007 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2008
2009 if (queue == -1) {
2010 /* Clear accepts frame bit at specified Other DA table entry */
2011 omc_table_reg &= ~(0xff << (8 * reg_offset));
2012 } else {
2013 omc_table_reg &= ~(0xff << (8 * reg_offset));
2014 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2015 }
2016
2017 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2018}
2019
2020/* The network device supports multicast using two tables:
2021 * 1) Special Multicast Table for MAC addresses of the form
2022 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2023 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2024 * Table entries in the DA-Filter table.
2025 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2026 * is used as an index to the Other Multicast Table entries in the
2027 * DA-Filter table.
2028 */
2029static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2030 int queue)
2031{
2032 unsigned char crc_result = 0;
2033
2034 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2035 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2036 return 0;
2037 }
2038
2039 crc_result = mvneta_addr_crc(p_addr);
2040 if (queue == -1) {
2041 if (pp->mcast_count[crc_result] == 0) {
2042 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2043 crc_result);
2044 return -EINVAL;
2045 }
2046
2047 pp->mcast_count[crc_result]--;
2048 if (pp->mcast_count[crc_result] != 0) {
2049 netdev_info(pp->dev,
2050 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2051 pp->mcast_count[crc_result], crc_result);
2052 return -EINVAL;
2053 }
2054 } else
2055 pp->mcast_count[crc_result]++;
2056
2057 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2058
2059 return 0;
2060}
2061
2062/* Configure Fitering mode of Ethernet port */
2063static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2064 int is_promisc)
2065{
2066 u32 port_cfg_reg, val;
2067
2068 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2069
2070 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2071
2072 /* Set / Clear UPM bit in port configuration register */
2073 if (is_promisc) {
2074 /* Accept all Unicast addresses */
2075 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2076 val |= MVNETA_FORCE_UNI;
2077 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2078 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2079 } else {
2080 /* Reject all Unicast addresses */
2081 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2082 val &= ~MVNETA_FORCE_UNI;
2083 }
2084
2085 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2086 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2087}
2088
2089/* register unicast and multicast addresses */
2090static void mvneta_set_rx_mode(struct net_device *dev)
2091{
2092 struct mvneta_port *pp = netdev_priv(dev);
2093 struct netdev_hw_addr *ha;
2094
2095 if (dev->flags & IFF_PROMISC) {
2096 /* Accept all: Multicast + Unicast */
2097 mvneta_rx_unicast_promisc_set(pp, 1);
2098 mvneta_set_ucast_table(pp, rxq_def);
2099 mvneta_set_special_mcast_table(pp, rxq_def);
2100 mvneta_set_other_mcast_table(pp, rxq_def);
2101 } else {
2102 /* Accept single Unicast */
2103 mvneta_rx_unicast_promisc_set(pp, 0);
2104 mvneta_set_ucast_table(pp, -1);
2105 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2106
2107 if (dev->flags & IFF_ALLMULTI) {
2108 /* Accept all multicast */
2109 mvneta_set_special_mcast_table(pp, rxq_def);
2110 mvneta_set_other_mcast_table(pp, rxq_def);
2111 } else {
2112 /* Accept only initialized multicast */
2113 mvneta_set_special_mcast_table(pp, -1);
2114 mvneta_set_other_mcast_table(pp, -1);
2115
2116 if (!netdev_mc_empty(dev)) {
2117 netdev_for_each_mc_addr(ha, dev) {
2118 mvneta_mcast_addr_set(pp, ha->addr,
2119 rxq_def);
2120 }
2121 }
2122 }
2123 }
2124}
2125
2126/* Interrupt handling - the callback for request_irq() */
2127static irqreturn_t mvneta_isr(int irq, void *dev_id)
2128{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002129 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002130
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002131 disable_percpu_irq(port->pp->dev->irq);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002132 napi_schedule(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002133
2134 return IRQ_HANDLED;
2135}
2136
Stas Sergeev898b2972015-04-01 20:32:49 +03002137static int mvneta_fixed_link_update(struct mvneta_port *pp,
2138 struct phy_device *phy)
2139{
2140 struct fixed_phy_status status;
2141 struct fixed_phy_status changed = {};
2142 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2143
2144 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2145 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2146 status.speed = SPEED_1000;
2147 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2148 status.speed = SPEED_100;
2149 else
2150 status.speed = SPEED_10;
2151 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2152 changed.link = 1;
2153 changed.speed = 1;
2154 changed.duplex = 1;
2155 fixed_phy_update_state(phy, &status, &changed);
2156 return 0;
2157}
2158
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002159/* NAPI handler
2160 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2161 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2162 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2163 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2164 * Each CPU has its own causeRxTx register
2165 */
2166static int mvneta_poll(struct napi_struct *napi, int budget)
2167{
2168 int rx_done = 0;
2169 u32 cause_rx_tx;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002170 struct mvneta_port *pp = netdev_priv(napi->dev);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002171 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002172
2173 if (!netif_running(pp->dev)) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002174 napi_complete(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002175 return rx_done;
2176 }
2177
2178 /* Read cause register */
Stas Sergeev898b2972015-04-01 20:32:49 +03002179 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2180 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2181 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2182
2183 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2184 if (pp->use_inband_status && (cause_misc &
2185 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2186 MVNETA_CAUSE_LINK_CHANGE |
2187 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2188 mvneta_fixed_link_update(pp, pp->phy_dev);
2189 }
2190 }
willy tarreau71f6d1b2014-01-16 08:20:11 +01002191
2192 /* Release Tx descriptors */
2193 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002194 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
willy tarreau71f6d1b2014-01-16 08:20:11 +01002195 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2196 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002197
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002198 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002199 * RX packets
2200 */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002201 cause_rx_tx |= port->cause_rx_tx;
Maxime Ripardd8936652015-09-25 18:09:37 +02002202 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2203 budget -= rx_done;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002204
2205 if (budget > 0) {
2206 cause_rx_tx = 0;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002207 napi_complete(&port->napi);
2208 enable_percpu_irq(pp->dev->irq, 0);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002209 }
2210
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002211 port->cause_rx_tx = cause_rx_tx;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002212 return rx_done;
2213}
2214
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002215/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2216static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2217 int num)
2218{
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002219 int i;
2220
2221 for (i = 0; i < num; i++) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002222 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2223 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2224 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002225 __func__, rxq->id, i, num);
2226 break;
2227 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002228 }
2229
2230 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002231 * get packets)
2232 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002233 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2234
2235 return i;
2236}
2237
2238/* Free all packets pending transmit from all TXQs and reset TX port */
2239static void mvneta_tx_reset(struct mvneta_port *pp)
2240{
2241 int queue;
2242
Ezequiel Garcia96728502014-05-22 20:06:59 -03002243 /* free the skb's in the tx ring */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002244 for (queue = 0; queue < txq_number; queue++)
2245 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2246
2247 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2248 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2249}
2250
2251static void mvneta_rx_reset(struct mvneta_port *pp)
2252{
2253 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2254 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2255}
2256
2257/* Rx/Tx queue initialization/cleanup methods */
2258
2259/* Create a specified RX queue */
2260static int mvneta_rxq_init(struct mvneta_port *pp,
2261 struct mvneta_rx_queue *rxq)
2262
2263{
2264 rxq->size = pp->rx_ring_size;
2265
2266 /* Allocate memory for RX descriptors */
2267 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2268 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2269 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002270 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002271 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002272
2273 BUG_ON(rxq->descs !=
2274 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2275
2276 rxq->last_desc = rxq->size - 1;
2277
2278 /* Set Rx descriptors queue starting address */
2279 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2280 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2281
2282 /* Set Offset */
2283 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2284
2285 /* Set coalescing pkts and time */
2286 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2287 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2288
2289 /* Fill RXQ with buffers from RX pool */
2290 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2291 mvneta_rxq_bm_disable(pp, rxq);
2292 mvneta_rxq_fill(pp, rxq, rxq->size);
2293
2294 return 0;
2295}
2296
2297/* Cleanup Rx queue */
2298static void mvneta_rxq_deinit(struct mvneta_port *pp,
2299 struct mvneta_rx_queue *rxq)
2300{
2301 mvneta_rxq_drop_pkts(pp, rxq);
2302
2303 if (rxq->descs)
2304 dma_free_coherent(pp->dev->dev.parent,
2305 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2306 rxq->descs,
2307 rxq->descs_phys);
2308
2309 rxq->descs = NULL;
2310 rxq->last_desc = 0;
2311 rxq->next_desc_to_proc = 0;
2312 rxq->descs_phys = 0;
2313}
2314
2315/* Create and initialize a tx queue */
2316static int mvneta_txq_init(struct mvneta_port *pp,
2317 struct mvneta_tx_queue *txq)
2318{
2319 txq->size = pp->tx_ring_size;
2320
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002321 /* A queue must always have room for at least one skb.
2322 * Therefore, stop the queue when the free entries reaches
2323 * the maximum number of descriptors per skb.
2324 */
2325 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2326 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2327
2328
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002329 /* Allocate memory for TX descriptors */
2330 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2331 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2332 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002333 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002334 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002335
2336 /* Make sure descriptor address is cache line size aligned */
2337 BUG_ON(txq->descs !=
2338 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2339
2340 txq->last_desc = txq->size - 1;
2341
2342 /* Set maximum bandwidth for enabled TXQs */
2343 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2344 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2345
2346 /* Set Tx descriptors queue starting address */
2347 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2348 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2349
2350 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2351 if (txq->tx_skb == NULL) {
2352 dma_free_coherent(pp->dev->dev.parent,
2353 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2354 txq->descs, txq->descs_phys);
2355 return -ENOMEM;
2356 }
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002357
2358 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2359 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2360 txq->size * TSO_HEADER_SIZE,
2361 &txq->tso_hdrs_phys, GFP_KERNEL);
2362 if (txq->tso_hdrs == NULL) {
2363 kfree(txq->tx_skb);
2364 dma_free_coherent(pp->dev->dev.parent,
2365 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2366 txq->descs, txq->descs_phys);
2367 return -ENOMEM;
2368 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002369 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2370
2371 return 0;
2372}
2373
2374/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2375static void mvneta_txq_deinit(struct mvneta_port *pp,
2376 struct mvneta_tx_queue *txq)
2377{
2378 kfree(txq->tx_skb);
2379
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002380 if (txq->tso_hdrs)
2381 dma_free_coherent(pp->dev->dev.parent,
2382 txq->size * TSO_HEADER_SIZE,
2383 txq->tso_hdrs, txq->tso_hdrs_phys);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002384 if (txq->descs)
2385 dma_free_coherent(pp->dev->dev.parent,
2386 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2387 txq->descs, txq->descs_phys);
2388
2389 txq->descs = NULL;
2390 txq->last_desc = 0;
2391 txq->next_desc_to_proc = 0;
2392 txq->descs_phys = 0;
2393
2394 /* Set minimum bandwidth for disabled TXQs */
2395 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2396 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2397
2398 /* Set Tx descriptors queue starting address and size */
2399 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2400 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2401}
2402
2403/* Cleanup all Tx queues */
2404static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2405{
2406 int queue;
2407
2408 for (queue = 0; queue < txq_number; queue++)
2409 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2410}
2411
2412/* Cleanup all Rx queues */
2413static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2414{
Maxime Ripardd8936652015-09-25 18:09:37 +02002415 mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002416}
2417
2418
2419/* Init all Rx queues */
2420static int mvneta_setup_rxqs(struct mvneta_port *pp)
2421{
Maxime Ripardd8936652015-09-25 18:09:37 +02002422 int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]);
2423 if (err) {
2424 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2425 __func__, rxq_def);
2426 mvneta_cleanup_rxqs(pp);
2427 return err;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002428 }
2429
2430 return 0;
2431}
2432
2433/* Init all tx queues */
2434static int mvneta_setup_txqs(struct mvneta_port *pp)
2435{
2436 int queue;
2437
2438 for (queue = 0; queue < txq_number; queue++) {
2439 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2440 if (err) {
2441 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2442 __func__, queue);
2443 mvneta_cleanup_txqs(pp);
2444 return err;
2445 }
2446 }
2447
2448 return 0;
2449}
2450
2451static void mvneta_start_dev(struct mvneta_port *pp)
2452{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002453 unsigned int cpu;
2454
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002455 mvneta_max_rx_size_set(pp, pp->pkt_size);
2456 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2457
2458 /* start the Rx/Tx activity */
2459 mvneta_port_enable(pp);
2460
2461 /* Enable polling on the port */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002462 for_each_present_cpu(cpu) {
2463 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2464
2465 napi_enable(&port->napi);
2466 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002467
2468 /* Unmask interrupts */
2469 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
Stas Sergeev898b2972015-04-01 20:32:49 +03002470 MVNETA_RX_INTR_MASK(rxq_number) |
2471 MVNETA_TX_INTR_MASK(txq_number) |
2472 MVNETA_MISCINTR_INTR_MASK);
2473 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2474 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2475 MVNETA_CAUSE_LINK_CHANGE |
2476 MVNETA_CAUSE_PSC_SYNC_CHANGE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002477
2478 phy_start(pp->phy_dev);
2479 netif_tx_start_all_queues(pp->dev);
2480}
2481
2482static void mvneta_stop_dev(struct mvneta_port *pp)
2483{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002484 unsigned int cpu;
2485
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002486 phy_stop(pp->phy_dev);
2487
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002488 for_each_present_cpu(cpu) {
2489 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2490
2491 napi_disable(&port->napi);
2492 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002493
2494 netif_carrier_off(pp->dev);
2495
2496 mvneta_port_down(pp);
2497 netif_tx_stop_all_queues(pp->dev);
2498
2499 /* Stop the port activity */
2500 mvneta_port_disable(pp);
2501
2502 /* Clear all ethernet port interrupts */
2503 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2504 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2505
2506 /* Mask all ethernet port interrupts */
2507 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2508 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2509 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2510
2511 mvneta_tx_reset(pp);
2512 mvneta_rx_reset(pp);
2513}
2514
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002515/* Return positive if MTU is valid */
2516static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2517{
2518 if (mtu < 68) {
2519 netdev_err(dev, "cannot change mtu to less than 68\n");
2520 return -EINVAL;
2521 }
2522
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002523 /* 9676 == 9700 - 20 and rounding to 8 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002524 if (mtu > 9676) {
2525 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2526 mtu = 9676;
2527 }
2528
2529 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2530 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2531 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2532 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2533 }
2534
2535 return mtu;
2536}
2537
2538/* Change the device mtu */
2539static int mvneta_change_mtu(struct net_device *dev, int mtu)
2540{
2541 struct mvneta_port *pp = netdev_priv(dev);
2542 int ret;
2543
2544 mtu = mvneta_check_mtu_valid(dev, mtu);
2545 if (mtu < 0)
2546 return -EINVAL;
2547
2548 dev->mtu = mtu;
2549
Simon Guinotb65657f2015-06-30 16:20:22 +02002550 if (!netif_running(dev)) {
2551 netdev_update_features(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002552 return 0;
Simon Guinotb65657f2015-06-30 16:20:22 +02002553 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002554
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002555 /* The interface is running, so we have to force a
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002556 * reallocation of the queues
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002557 */
2558 mvneta_stop_dev(pp);
2559
2560 mvneta_cleanup_txqs(pp);
2561 mvneta_cleanup_rxqs(pp);
2562
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002563 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002564 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2565 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002566
2567 ret = mvneta_setup_rxqs(pp);
2568 if (ret) {
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002569 netdev_err(dev, "unable to setup rxqs after MTU change\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002570 return ret;
2571 }
2572
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002573 ret = mvneta_setup_txqs(pp);
2574 if (ret) {
2575 netdev_err(dev, "unable to setup txqs after MTU change\n");
2576 return ret;
2577 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002578
2579 mvneta_start_dev(pp);
2580 mvneta_port_up(pp);
2581
Simon Guinotb65657f2015-06-30 16:20:22 +02002582 netdev_update_features(dev);
2583
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002584 return 0;
2585}
2586
Simon Guinotb65657f2015-06-30 16:20:22 +02002587static netdev_features_t mvneta_fix_features(struct net_device *dev,
2588 netdev_features_t features)
2589{
2590 struct mvneta_port *pp = netdev_priv(dev);
2591
2592 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2593 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2594 netdev_info(dev,
2595 "Disable IP checksum for MTU greater than %dB\n",
2596 pp->tx_csum_limit);
2597 }
2598
2599 return features;
2600}
2601
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002602/* Get mac address */
2603static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2604{
2605 u32 mac_addr_l, mac_addr_h;
2606
2607 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2608 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2609 addr[0] = (mac_addr_h >> 24) & 0xFF;
2610 addr[1] = (mac_addr_h >> 16) & 0xFF;
2611 addr[2] = (mac_addr_h >> 8) & 0xFF;
2612 addr[3] = mac_addr_h & 0xFF;
2613 addr[4] = (mac_addr_l >> 8) & 0xFF;
2614 addr[5] = mac_addr_l & 0xFF;
2615}
2616
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002617/* Handle setting mac address */
2618static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2619{
2620 struct mvneta_port *pp = netdev_priv(dev);
Ezequiel Garciae68de362014-05-22 20:07:00 -03002621 struct sockaddr *sockaddr = addr;
2622 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002623
Ezequiel Garciae68de362014-05-22 20:07:00 -03002624 ret = eth_prepare_mac_addr_change(dev, addr);
2625 if (ret < 0)
2626 return ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002627 /* Remove previous address table entry */
2628 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2629
2630 /* Set new addr in hw */
Ezequiel Garciae68de362014-05-22 20:07:00 -03002631 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002632
Ezequiel Garciae68de362014-05-22 20:07:00 -03002633 eth_commit_mac_addr_change(dev, addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002634 return 0;
2635}
2636
2637static void mvneta_adjust_link(struct net_device *ndev)
2638{
2639 struct mvneta_port *pp = netdev_priv(ndev);
2640 struct phy_device *phydev = pp->phy_dev;
2641 int status_change = 0;
2642
2643 if (phydev->link) {
2644 if ((pp->speed != phydev->speed) ||
2645 (pp->duplex != phydev->duplex)) {
2646 u32 val;
2647
2648 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2649 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2650 MVNETA_GMAC_CONFIG_GMII_SPEED |
Stas Sergeev898b2972015-04-01 20:32:49 +03002651 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002652
2653 if (phydev->duplex)
2654 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2655
2656 if (phydev->speed == SPEED_1000)
2657 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni4d12bc62014-07-08 10:49:43 +02002658 else if (phydev->speed == SPEED_100)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002659 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2660
2661 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2662
2663 pp->duplex = phydev->duplex;
2664 pp->speed = phydev->speed;
2665 }
2666 }
2667
2668 if (phydev->link != pp->link) {
2669 if (!phydev->link) {
2670 pp->duplex = -1;
2671 pp->speed = 0;
2672 }
2673
2674 pp->link = phydev->link;
2675 status_change = 1;
2676 }
2677
2678 if (status_change) {
2679 if (phydev->link) {
Stas Sergeev898b2972015-04-01 20:32:49 +03002680 if (!pp->use_inband_status) {
2681 u32 val = mvreg_read(pp,
2682 MVNETA_GMAC_AUTONEG_CONFIG);
2683 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2684 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2685 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2686 val);
2687 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002688 mvneta_port_up(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002689 } else {
Stas Sergeev898b2972015-04-01 20:32:49 +03002690 if (!pp->use_inband_status) {
2691 u32 val = mvreg_read(pp,
2692 MVNETA_GMAC_AUTONEG_CONFIG);
2693 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2694 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2695 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2696 val);
2697 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002698 mvneta_port_down(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002699 }
Ezequiel Garcia0089b742014-10-31 12:57:20 -03002700 phy_print_status(phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002701 }
2702}
2703
2704static int mvneta_mdio_probe(struct mvneta_port *pp)
2705{
2706 struct phy_device *phy_dev;
2707
2708 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2709 pp->phy_interface);
2710 if (!phy_dev) {
2711 netdev_err(pp->dev, "could not find the PHY\n");
2712 return -ENODEV;
2713 }
2714
2715 phy_dev->supported &= PHY_GBIT_FEATURES;
2716 phy_dev->advertising = phy_dev->supported;
2717
2718 pp->phy_dev = phy_dev;
2719 pp->link = 0;
2720 pp->duplex = 0;
2721 pp->speed = 0;
2722
2723 return 0;
2724}
2725
2726static void mvneta_mdio_remove(struct mvneta_port *pp)
2727{
2728 phy_disconnect(pp->phy_dev);
2729 pp->phy_dev = NULL;
2730}
2731
Maxime Ripardf8642882015-09-25 18:09:38 +02002732static void mvneta_percpu_enable(void *arg)
2733{
2734 struct mvneta_port *pp = arg;
2735
2736 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
2737}
2738
2739static void mvneta_percpu_disable(void *arg)
2740{
2741 struct mvneta_port *pp = arg;
2742
2743 disable_percpu_irq(pp->dev->irq);
2744}
2745
2746static void mvneta_percpu_elect(struct mvneta_port *pp)
2747{
2748 int online_cpu_idx, cpu, i = 0;
2749
2750 online_cpu_idx = rxq_def % num_online_cpus();
2751
2752 for_each_online_cpu(cpu) {
2753 if (i == online_cpu_idx)
2754 /* Enable per-CPU interrupt on the one CPU we
2755 * just elected
2756 */
2757 smp_call_function_single(cpu, mvneta_percpu_enable,
2758 pp, true);
2759 else
2760 /* Disable per-CPU interrupt on all the other CPU */
2761 smp_call_function_single(cpu, mvneta_percpu_disable,
2762 pp, true);
2763 i++;
2764 }
2765};
2766
2767static int mvneta_percpu_notifier(struct notifier_block *nfb,
2768 unsigned long action, void *hcpu)
2769{
2770 struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
2771 cpu_notifier);
2772 int cpu = (unsigned long)hcpu, other_cpu;
2773 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2774
2775 switch (action) {
2776 case CPU_ONLINE:
2777 case CPU_ONLINE_FROZEN:
2778 netif_tx_stop_all_queues(pp->dev);
2779
2780 /* We have to synchronise on tha napi of each CPU
2781 * except the one just being waked up
2782 */
2783 for_each_online_cpu(other_cpu) {
2784 if (other_cpu != cpu) {
2785 struct mvneta_pcpu_port *other_port =
2786 per_cpu_ptr(pp->ports, other_cpu);
2787
2788 napi_synchronize(&other_port->napi);
2789 }
2790 }
2791
2792 /* Mask all ethernet port interrupts */
2793 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2794 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2795 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2796 napi_enable(&port->napi);
2797
2798 /* Enable per-CPU interrupt on the one CPU we care
2799 * about.
2800 */
2801 mvneta_percpu_elect(pp);
2802
2803 /* Unmask all ethernet port interrupts */
2804 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2805 MVNETA_RX_INTR_MASK(rxq_number) |
2806 MVNETA_TX_INTR_MASK(txq_number) |
2807 MVNETA_MISCINTR_INTR_MASK);
2808 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2809 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2810 MVNETA_CAUSE_LINK_CHANGE |
2811 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2812 netif_tx_start_all_queues(pp->dev);
2813 break;
2814 case CPU_DOWN_PREPARE:
2815 case CPU_DOWN_PREPARE_FROZEN:
2816 netif_tx_stop_all_queues(pp->dev);
2817 /* Mask all ethernet port interrupts */
2818 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2819 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2820 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2821
2822 napi_synchronize(&port->napi);
2823 napi_disable(&port->napi);
2824 /* Disable per-CPU interrupts on the CPU that is
2825 * brought down.
2826 */
2827 smp_call_function_single(cpu, mvneta_percpu_disable,
2828 pp, true);
2829
2830 break;
2831 case CPU_DEAD:
2832 case CPU_DEAD_FROZEN:
2833 /* Check if a new CPU must be elected now this on is down */
2834 mvneta_percpu_elect(pp);
2835 /* Unmask all ethernet port interrupts */
2836 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2837 MVNETA_RX_INTR_MASK(rxq_number) |
2838 MVNETA_TX_INTR_MASK(txq_number) |
2839 MVNETA_MISCINTR_INTR_MASK);
2840 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2841 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2842 MVNETA_CAUSE_LINK_CHANGE |
2843 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2844 netif_tx_start_all_queues(pp->dev);
2845 break;
2846 }
2847
2848 return NOTIFY_OK;
2849}
2850
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002851static int mvneta_open(struct net_device *dev)
2852{
2853 struct mvneta_port *pp = netdev_priv(dev);
2854 int ret;
2855
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002856 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002857 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002859
2860 ret = mvneta_setup_rxqs(pp);
2861 if (ret)
2862 return ret;
2863
2864 ret = mvneta_setup_txqs(pp);
2865 if (ret)
2866 goto err_cleanup_rxqs;
2867
2868 /* Connect to port interrupt line */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002869 ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
2870 MVNETA_DRIVER_NAME, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002871 if (ret) {
2872 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2873 goto err_cleanup_txqs;
2874 }
2875
Maxime Ripardf8642882015-09-25 18:09:38 +02002876 /* Even though the documentation says that request_percpu_irq
2877 * doesn't enable the interrupts automatically, it actually
2878 * does so on the local CPU.
2879 *
2880 * Make sure it's disabled.
2881 */
2882 mvneta_percpu_disable(pp);
2883
2884 /* Elect a CPU to handle our RX queue interrupt */
2885 mvneta_percpu_elect(pp);
2886
2887 /* Register a CPU notifier to handle the case where our CPU
2888 * might be taken offline.
2889 */
2890 register_cpu_notifier(&pp->cpu_notifier);
2891
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002892 /* In default link is down */
2893 netif_carrier_off(pp->dev);
2894
2895 ret = mvneta_mdio_probe(pp);
2896 if (ret < 0) {
2897 netdev_err(dev, "cannot probe MDIO bus\n");
2898 goto err_free_irq;
2899 }
2900
2901 mvneta_start_dev(pp);
2902
2903 return 0;
2904
2905err_free_irq:
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002906 free_percpu_irq(pp->dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002907err_cleanup_txqs:
2908 mvneta_cleanup_txqs(pp);
2909err_cleanup_rxqs:
2910 mvneta_cleanup_rxqs(pp);
2911 return ret;
2912}
2913
2914/* Stop the port, free port interrupt line */
2915static int mvneta_stop(struct net_device *dev)
2916{
2917 struct mvneta_port *pp = netdev_priv(dev);
Maxime Ripardf8642882015-09-25 18:09:38 +02002918 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002919
2920 mvneta_stop_dev(pp);
2921 mvneta_mdio_remove(pp);
Maxime Ripardf8642882015-09-25 18:09:38 +02002922 unregister_cpu_notifier(&pp->cpu_notifier);
2923 for_each_present_cpu(cpu)
2924 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002925 free_percpu_irq(dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002926 mvneta_cleanup_rxqs(pp);
2927 mvneta_cleanup_txqs(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002928
2929 return 0;
2930}
2931
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002932static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2933{
2934 struct mvneta_port *pp = netdev_priv(dev);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002935
2936 if (!pp->phy_dev)
2937 return -ENOTSUPP;
2938
Stas Sergeevecf7b362015-04-01 19:23:29 +03002939 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002940}
2941
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002942/* Ethtool methods */
2943
2944/* Get settings (phy address, speed) for ethtools */
2945int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2946{
2947 struct mvneta_port *pp = netdev_priv(dev);
2948
2949 if (!pp->phy_dev)
2950 return -ENODEV;
2951
2952 return phy_ethtool_gset(pp->phy_dev, cmd);
2953}
2954
2955/* Set settings (phy address, speed) for ethtools */
2956int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2957{
2958 struct mvneta_port *pp = netdev_priv(dev);
Stas Sergeev0c0744f2015-12-02 20:35:11 +03002959 struct phy_device *phydev = pp->phy_dev;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002960
Stas Sergeev0c0744f2015-12-02 20:35:11 +03002961 if (!phydev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002962 return -ENODEV;
2963
Stas Sergeev0c0744f2015-12-02 20:35:11 +03002964 if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
2965 u32 val;
2966
2967 mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE);
2968
2969 if (cmd->autoneg == AUTONEG_DISABLE) {
2970 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2971 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2972 MVNETA_GMAC_CONFIG_GMII_SPEED |
2973 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2974
2975 if (phydev->duplex)
2976 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2977
2978 if (phydev->speed == SPEED_1000)
2979 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2980 else if (phydev->speed == SPEED_100)
2981 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2982
2983 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2984 }
2985
2986 pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE);
2987 netdev_info(pp->dev, "autoneg status set to %i\n",
2988 pp->use_inband_status);
2989
2990 if (netif_running(dev)) {
2991 mvneta_port_down(pp);
2992 mvneta_port_up(pp);
2993 }
2994 }
2995
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002996 return phy_ethtool_sset(pp->phy_dev, cmd);
2997}
2998
2999/* Set interrupt coalescing for ethtools */
3000static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3001 struct ethtool_coalesce *c)
3002{
3003 struct mvneta_port *pp = netdev_priv(dev);
3004 int queue;
3005
3006 for (queue = 0; queue < rxq_number; queue++) {
3007 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3008 rxq->time_coal = c->rx_coalesce_usecs;
3009 rxq->pkts_coal = c->rx_max_coalesced_frames;
3010 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3011 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3012 }
3013
3014 for (queue = 0; queue < txq_number; queue++) {
3015 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3016 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3017 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3018 }
3019
3020 return 0;
3021}
3022
3023/* get coalescing for ethtools */
3024static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3025 struct ethtool_coalesce *c)
3026{
3027 struct mvneta_port *pp = netdev_priv(dev);
3028
3029 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3030 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3031
3032 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3033 return 0;
3034}
3035
3036
3037static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3038 struct ethtool_drvinfo *drvinfo)
3039{
3040 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3041 sizeof(drvinfo->driver));
3042 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3043 sizeof(drvinfo->version));
3044 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3045 sizeof(drvinfo->bus_info));
3046}
3047
3048
3049static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3050 struct ethtool_ringparam *ring)
3051{
3052 struct mvneta_port *pp = netdev_priv(netdev);
3053
3054 ring->rx_max_pending = MVNETA_MAX_RXD;
3055 ring->tx_max_pending = MVNETA_MAX_TXD;
3056 ring->rx_pending = pp->rx_ring_size;
3057 ring->tx_pending = pp->tx_ring_size;
3058}
3059
3060static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3061 struct ethtool_ringparam *ring)
3062{
3063 struct mvneta_port *pp = netdev_priv(dev);
3064
3065 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3066 return -EINVAL;
3067 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3068 ring->rx_pending : MVNETA_MAX_RXD;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003069
3070 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3071 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3072 if (pp->tx_ring_size != ring->tx_pending)
3073 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3074 pp->tx_ring_size, ring->tx_pending);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003075
3076 if (netif_running(dev)) {
3077 mvneta_stop(dev);
3078 if (mvneta_open(dev)) {
3079 netdev_err(dev,
3080 "error on opening device after ring param change\n");
3081 return -ENOMEM;
3082 }
3083 }
3084
3085 return 0;
3086}
3087
Russell King9b0cdef2015-10-22 18:37:30 +01003088static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3089 u8 *data)
3090{
3091 if (sset == ETH_SS_STATS) {
3092 int i;
3093
3094 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3095 memcpy(data + i * ETH_GSTRING_LEN,
3096 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3097 }
3098}
3099
3100static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3101{
3102 const struct mvneta_statistic *s;
3103 void __iomem *base = pp->base;
3104 u32 high, low, val;
3105 int i;
3106
3107 for (i = 0, s = mvneta_statistics;
3108 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3109 s++, i++) {
3110 val = 0;
3111
3112 switch (s->type) {
3113 case T_REG_32:
3114 val = readl_relaxed(base + s->offset);
3115 break;
3116 case T_REG_64:
3117 /* Docs say to read low 32-bit then high */
3118 low = readl_relaxed(base + s->offset);
3119 high = readl_relaxed(base + s->offset + 4);
3120 val = (u64)high << 32 | low;
3121 break;
3122 }
3123
3124 pp->ethtool_stats[i] += val;
3125 }
3126}
3127
3128static void mvneta_ethtool_get_stats(struct net_device *dev,
3129 struct ethtool_stats *stats, u64 *data)
3130{
3131 struct mvneta_port *pp = netdev_priv(dev);
3132 int i;
3133
3134 mvneta_ethtool_update_stats(pp);
3135
3136 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3137 *data++ = pp->ethtool_stats[i];
3138}
3139
3140static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3141{
3142 if (sset == ETH_SS_STATS)
3143 return ARRAY_SIZE(mvneta_statistics);
3144 return -EOPNOTSUPP;
3145}
3146
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003147static const struct net_device_ops mvneta_netdev_ops = {
3148 .ndo_open = mvneta_open,
3149 .ndo_stop = mvneta_stop,
3150 .ndo_start_xmit = mvneta_tx,
3151 .ndo_set_rx_mode = mvneta_set_rx_mode,
3152 .ndo_set_mac_address = mvneta_set_mac_addr,
3153 .ndo_change_mtu = mvneta_change_mtu,
Simon Guinotb65657f2015-06-30 16:20:22 +02003154 .ndo_fix_features = mvneta_fix_features,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003155 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003156 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003157};
3158
3159const struct ethtool_ops mvneta_eth_tool_ops = {
3160 .get_link = ethtool_op_get_link,
3161 .get_settings = mvneta_ethtool_get_settings,
3162 .set_settings = mvneta_ethtool_set_settings,
3163 .set_coalesce = mvneta_ethtool_set_coalesce,
3164 .get_coalesce = mvneta_ethtool_get_coalesce,
3165 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3166 .get_ringparam = mvneta_ethtool_get_ringparam,
3167 .set_ringparam = mvneta_ethtool_set_ringparam,
Russell King9b0cdef2015-10-22 18:37:30 +01003168 .get_strings = mvneta_ethtool_get_strings,
3169 .get_ethtool_stats = mvneta_ethtool_get_stats,
3170 .get_sset_count = mvneta_ethtool_get_sset_count,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003171};
3172
3173/* Initialize hw */
Ezequiel Garcia96728502014-05-22 20:06:59 -03003174static int mvneta_init(struct device *dev, struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003175{
3176 int queue;
3177
3178 /* Disable port */
3179 mvneta_port_disable(pp);
3180
3181 /* Set port default values */
3182 mvneta_defaults_set(pp);
3183
Ezequiel Garcia96728502014-05-22 20:06:59 -03003184 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3185 GFP_KERNEL);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003186 if (!pp->txqs)
3187 return -ENOMEM;
3188
3189 /* Initialize TX descriptor rings */
3190 for (queue = 0; queue < txq_number; queue++) {
3191 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3192 txq->id = queue;
3193 txq->size = pp->tx_ring_size;
3194 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
3195 }
3196
Ezequiel Garcia96728502014-05-22 20:06:59 -03003197 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
3198 GFP_KERNEL);
3199 if (!pp->rxqs)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003200 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003201
3202 /* Create Rx descriptor rings */
3203 for (queue = 0; queue < rxq_number; queue++) {
3204 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3205 rxq->id = queue;
3206 rxq->size = pp->rx_ring_size;
3207 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
3208 rxq->time_coal = MVNETA_RX_COAL_USEC;
3209 }
3210
3211 return 0;
3212}
3213
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003214/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00003215static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3216 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003217{
3218 u32 win_enable;
3219 u32 win_protect;
3220 int i;
3221
3222 for (i = 0; i < 6; i++) {
3223 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
3224 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
3225
3226 if (i < 4)
3227 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
3228 }
3229
3230 win_enable = 0x3f;
3231 win_protect = 0;
3232
3233 for (i = 0; i < dram->num_cs; i++) {
3234 const struct mbus_dram_window *cs = dram->cs + i;
3235 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
3236 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
3237
3238 mvreg_write(pp, MVNETA_WIN_SIZE(i),
3239 (cs->size - 1) & 0xffff0000);
3240
3241 win_enable &= ~(1 << i);
3242 win_protect |= 3 << (2 * i);
3243 }
3244
3245 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
3246}
3247
3248/* Power up the port */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003249static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003250{
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003251 u32 ctrl;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003252
3253 /* MAC Cause register should be cleared */
3254 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
3255
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003256 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003257
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003258 /* Even though it might look weird, when we're configured in
3259 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3260 */
3261 switch(phy_mode) {
3262 case PHY_INTERFACE_MODE_QSGMII:
3263 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
3264 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3265 break;
3266 case PHY_INTERFACE_MODE_SGMII:
3267 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
3268 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3269 break;
3270 case PHY_INTERFACE_MODE_RGMII:
3271 case PHY_INTERFACE_MODE_RGMII_ID:
3272 ctrl |= MVNETA_GMAC2_PORT_RGMII;
3273 break;
3274 default:
3275 return -EINVAL;
3276 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003277
3278 /* Cancel Port Reset */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003279 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3280 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003281
3282 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3283 MVNETA_GMAC2_PORT_RESET) != 0)
3284 continue;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003285
3286 return 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003287}
3288
3289/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00003290static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003291{
3292 const struct mbus_dram_target_info *dram_target_info;
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003293 struct resource *res;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003294 struct device_node *dn = pdev->dev.of_node;
3295 struct device_node *phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003296 struct mvneta_port *pp;
3297 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003298 const char *dt_mac_addr;
3299 char hw_mac_addr[ETH_ALEN];
3300 const char *mac_from;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003301 const char *managed;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003302 int phy_mode;
3303 int err;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003304 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003305
Willy Tarreauee40a112013-04-11 23:00:37 +02003306 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003307 if (!dev)
3308 return -ENOMEM;
3309
3310 dev->irq = irq_of_parse_and_map(dn, 0);
3311 if (dev->irq == 0) {
3312 err = -EINVAL;
3313 goto err_free_netdev;
3314 }
3315
3316 phy_node = of_parse_phandle(dn, "phy", 0);
3317 if (!phy_node) {
Thomas Petazzoni83895be2014-05-16 16:14:06 +02003318 if (!of_phy_is_fixed_link(dn)) {
3319 dev_err(&pdev->dev, "no PHY specified\n");
3320 err = -ENODEV;
3321 goto err_free_irq;
3322 }
3323
3324 err = of_phy_register_fixed_link(dn);
3325 if (err < 0) {
3326 dev_err(&pdev->dev, "cannot register fixed PHY\n");
3327 goto err_free_irq;
3328 }
3329
3330 /* In the case of a fixed PHY, the DT node associated
3331 * to the PHY is the Ethernet MAC DT node.
3332 */
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003333 phy_node = of_node_get(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003334 }
3335
3336 phy_mode = of_get_phy_mode(dn);
3337 if (phy_mode < 0) {
3338 dev_err(&pdev->dev, "incorrect phy-mode\n");
3339 err = -EINVAL;
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003340 goto err_put_phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003341 }
3342
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003343 dev->tx_queue_len = MVNETA_MAX_TXD;
3344 dev->watchdog_timeo = 5 * HZ;
3345 dev->netdev_ops = &mvneta_netdev_ops;
3346
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003347 dev->ethtool_ops = &mvneta_eth_tool_ops;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003348
3349 pp = netdev_priv(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003350 pp->phy_node = phy_node;
3351 pp->phy_interface = phy_mode;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003352
3353 err = of_property_read_string(dn, "managed", &managed);
3354 pp->use_inband_status = (err == 0 &&
3355 strcmp(managed, "in-band-status") == 0);
Maxime Ripardf8642882015-09-25 18:09:38 +02003356 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003357
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003358 pp->clk = devm_clk_get(&pdev->dev, NULL);
3359 if (IS_ERR(pp->clk)) {
3360 err = PTR_ERR(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003361 goto err_put_phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003362 }
3363
3364 clk_prepare_enable(pp->clk);
3365
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003366 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3367 pp->base = devm_ioremap_resource(&pdev->dev, res);
3368 if (IS_ERR(pp->base)) {
3369 err = PTR_ERR(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003370 goto err_clk;
3371 }
3372
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003373 /* Alloc per-cpu port structure */
3374 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
3375 if (!pp->ports) {
3376 err = -ENOMEM;
3377 goto err_clk;
3378 }
3379
willy tarreau74c41b02014-01-16 08:20:08 +01003380 /* Alloc per-cpu stats */
WANG Cong1c213bd2014-02-13 11:46:28 -08003381 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
willy tarreau74c41b02014-01-16 08:20:08 +01003382 if (!pp->stats) {
3383 err = -ENOMEM;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003384 goto err_free_ports;
willy tarreau74c41b02014-01-16 08:20:08 +01003385 }
3386
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003387 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01003388 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003389 mac_from = "device tree";
3390 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3391 } else {
3392 mvneta_get_mac_addr(pp, hw_mac_addr);
3393 if (is_valid_ether_addr(hw_mac_addr)) {
3394 mac_from = "hardware";
3395 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3396 } else {
3397 mac_from = "random";
3398 eth_hw_addr_random(dev);
3399 }
3400 }
3401
Simon Guinotb65657f2015-06-30 16:20:22 +02003402 if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
3403 pp->tx_csum_limit = 1600;
3404
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003405 pp->tx_ring_size = MVNETA_MAX_TXD;
3406 pp->rx_ring_size = MVNETA_MAX_RXD;
3407
3408 pp->dev = dev;
3409 SET_NETDEV_DEV(dev, &pdev->dev);
3410
Ezequiel Garcia96728502014-05-22 20:06:59 -03003411 err = mvneta_init(&pdev->dev, pp);
3412 if (err < 0)
willy tarreau74c41b02014-01-16 08:20:08 +01003413 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003414
3415 err = mvneta_port_power_up(pp, phy_mode);
3416 if (err < 0) {
3417 dev_err(&pdev->dev, "can't power up port\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003418 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003419 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003420
3421 dram_target_info = mv_mbus_dram_info();
3422 if (dram_target_info)
3423 mvneta_conf_mbus_windows(pp, dram_target_info);
3424
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003425 for_each_present_cpu(cpu) {
3426 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3427
3428 netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3429 port->pp = pp;
3430 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003431
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03003432 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Ezequiel Garcia01ef26c2014-05-19 13:59:53 -03003433 dev->hw_features |= dev->features;
3434 dev->vlan_features |= dev->features;
willy tarreaub50b72d2013-04-06 08:47:01 +00003435 dev->priv_flags |= IFF_UNICAST_FLT;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003436 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
willy tarreaub50b72d2013-04-06 08:47:01 +00003437
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003438 err = register_netdev(dev);
3439 if (err < 0) {
3440 dev_err(&pdev->dev, "failed to register\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003441 goto err_free_stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003442 }
3443
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003444 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3445 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003446
3447 platform_set_drvdata(pdev, pp->dev);
3448
Stas Sergeev898b2972015-04-01 20:32:49 +03003449 if (pp->use_inband_status) {
3450 struct phy_device *phy = of_phy_find_device(dn);
3451
3452 mvneta_fixed_link_update(pp, phy);
Russell King04d53b22015-09-24 20:36:18 +01003453
3454 put_device(&phy->dev);
Stas Sergeev898b2972015-04-01 20:32:49 +03003455 }
3456
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003457 return 0;
3458
willy tarreau74c41b02014-01-16 08:20:08 +01003459err_free_stats:
3460 free_percpu(pp->stats);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003461err_free_ports:
3462 free_percpu(pp->ports);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003463err_clk:
3464 clk_disable_unprepare(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003465err_put_phy_node:
3466 of_node_put(phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003467err_free_irq:
3468 irq_dispose_mapping(dev->irq);
3469err_free_netdev:
3470 free_netdev(dev);
3471 return err;
3472}
3473
3474/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00003475static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003476{
3477 struct net_device *dev = platform_get_drvdata(pdev);
3478 struct mvneta_port *pp = netdev_priv(dev);
3479
3480 unregister_netdev(dev);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003481 clk_disable_unprepare(pp->clk);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003482 free_percpu(pp->ports);
willy tarreau74c41b02014-01-16 08:20:08 +01003483 free_percpu(pp->stats);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003484 irq_dispose_mapping(dev->irq);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003485 of_node_put(pp->phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003486 free_netdev(dev);
3487
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003488 return 0;
3489}
3490
3491static const struct of_device_id mvneta_match[] = {
3492 { .compatible = "marvell,armada-370-neta" },
Simon Guinotf522a972015-06-30 16:20:20 +02003493 { .compatible = "marvell,armada-xp-neta" },
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003494 { }
3495};
3496MODULE_DEVICE_TABLE(of, mvneta_match);
3497
3498static struct platform_driver mvneta_driver = {
3499 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00003500 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003501 .driver = {
3502 .name = MVNETA_DRIVER_NAME,
3503 .of_match_table = mvneta_match,
3504 },
3505};
3506
3507module_platform_driver(mvneta_driver);
3508
3509MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3510MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3511MODULE_LICENSE("GPL");
3512
3513module_param(rxq_number, int, S_IRUGO);
3514module_param(txq_number, int, S_IRUGO);
3515
3516module_param(rxq_def, int, S_IRUGO);
willy tarreauf19fadf2014-01-16 08:20:17 +01003517module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);