blob: 2d0e8a605ca952b486ea6aa3c9a8fb55657081db [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
Jisheng Zhang0e03f562016-01-20 19:27:22 +080014#include <linux/clk.h>
15#include <linux/cpu.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030016#include <linux/etherdevice.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080017#include <linux/if_vlan.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030018#include <linux/inetdevice.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080019#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030022#include <linux/mbus.h>
23#include <linux/module.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080024#include <linux/netdevice.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030025#include <linux/of.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080026#include <linux/of_address.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030027#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030030#include <linux/phy.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080031#include <linux/platform_device.h>
32#include <linux/skbuff.h>
33#include <net/ip.h>
34#include <net/ipv6.h>
35#include <net/tso.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030036
37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
Marcin Wojtase5bdf682015-11-30 13:27:42 +010039#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030040#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53#define MVNETA_PORT_RX_RESET 0x1cc0
54#define MVNETA_PORT_RX_DMA_RESET BIT(0)
55#define MVNETA_PHY_ADDR 0x2000
56#define MVNETA_PHY_ADDR_MASK 0x1f
57#define MVNETA_MBUS_RETRY 0x2010
58#define MVNETA_UNIT_INTR_CAUSE 0x2080
59#define MVNETA_UNIT_CONTROL 0x20B0
60#define MVNETA_PHY_POLLING_ENABLE BIT(1)
61#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290
Marcin Wojtasdb6ba9a2015-11-30 13:27:41 +010065#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
Thomas Petazzonic5aff182012-08-17 14:04:28 +030066#define MVNETA_PORT_CONFIG 0x2400
67#define MVNETA_UNI_PROMISC_MODE BIT(0)
68#define MVNETA_DEF_RXQ(q) ((q) << 1)
69#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
70#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
71#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
72#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
73#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
74#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
75#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
76 MVNETA_DEF_RXQ_ARP(q) | \
77 MVNETA_DEF_RXQ_TCP(q) | \
78 MVNETA_DEF_RXQ_UDP(q) | \
79 MVNETA_DEF_RXQ_BPDU(q) | \
80 MVNETA_TX_UNSET_ERR_SUM | \
81 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
82#define MVNETA_PORT_CONFIG_EXTEND 0x2404
83#define MVNETA_MAC_ADDR_LOW 0x2414
84#define MVNETA_MAC_ADDR_HIGH 0x2418
85#define MVNETA_SDMA_CONFIG 0x241c
86#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030087#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
88#define MVNETA_RX_NO_DATA_SWAP BIT(4)
89#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +020090#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030091#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
92#define MVNETA_PORT_STATUS 0x2444
93#define MVNETA_TX_IN_PRGRS BIT(1)
94#define MVNETA_TX_FIFO_EMPTY BIT(8)
95#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020096#define MVNETA_SERDES_CFG 0x24A0
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +020097#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020098#define MVNETA_QSGMII_SERDES_PROTO 0x0667
Thomas Petazzonic5aff182012-08-17 14:04:28 +030099#define MVNETA_TYPE_PRIO 0x24bc
100#define MVNETA_FORCE_UNI BIT(21)
101#define MVNETA_TXQ_CMD_1 0x24e4
102#define MVNETA_TXQ_CMD 0x2448
103#define MVNETA_TXQ_DISABLE_SHIFT 8
104#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
Andrew Lunne4839112015-10-22 18:37:36 +0100105#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
106#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
Stas Sergeev898b2972015-04-01 20:32:49 +0300107#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
108#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300109#define MVNETA_ACC_MODE 0x2500
110#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
111#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
112#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100113#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +0100114#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300115#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
willy tarreau40ba35e2014-01-16 08:20:10 +0100116
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100117/* Exception Interrupt Port/Queue Cause register
118 *
119 * Their behavior depend of the mapping done using the PCPX2Q
120 * registers. For a given CPU if the bit associated to a queue is not
121 * set, then for the register a read from this CPU will always return
122 * 0 and a write won't do anything
123 */
willy tarreau40ba35e2014-01-16 08:20:10 +0100124
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300125#define MVNETA_INTR_NEW_CAUSE 0x25a0
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300126#define MVNETA_INTR_NEW_MASK 0x25a4
willy tarreau40ba35e2014-01-16 08:20:10 +0100127
128/* bits 0..7 = TXQ SENT, one bit per queue.
129 * bits 8..15 = RXQ OCCUP, one bit per queue.
130 * bits 16..23 = RXQ FREE, one bit per queue.
131 * bit 29 = OLD_REG_SUM, see old reg ?
132 * bit 30 = TX_ERR_SUM, one bit for 4 ports
133 * bit 31 = MISC_SUM, one bit for 4 ports
134 */
135#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
136#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
137#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
138#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
Stas Sergeev898b2972015-04-01 20:32:49 +0300139#define MVNETA_MISCINTR_INTR_MASK BIT(31)
willy tarreau40ba35e2014-01-16 08:20:10 +0100140
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300141#define MVNETA_INTR_OLD_CAUSE 0x25a8
142#define MVNETA_INTR_OLD_MASK 0x25ac
willy tarreau40ba35e2014-01-16 08:20:10 +0100143
144/* Data Path Port/Queue Cause Register */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300145#define MVNETA_INTR_MISC_CAUSE 0x25b0
146#define MVNETA_INTR_MISC_MASK 0x25b4
willy tarreau40ba35e2014-01-16 08:20:10 +0100147
148#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
149#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
150#define MVNETA_CAUSE_PTP BIT(4)
151
152#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
153#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
154#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
155#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
156#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
157#define MVNETA_CAUSE_PRBS_ERR BIT(12)
158#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
159#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
160
161#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
162#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
163#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
164
165#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
166#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
167#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
168
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300169#define MVNETA_INTR_ENABLE 0x25b8
170#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
Marcin Wojtasdc1aadf2015-11-30 13:27:43 +0100171#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
willy tarreau40ba35e2014-01-16 08:20:10 +0100172
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300173#define MVNETA_RXQ_CMD 0x2680
174#define MVNETA_RXQ_DISABLE_SHIFT 8
175#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
176#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
177#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
178#define MVNETA_GMAC_CTRL_0 0x2c00
179#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
180#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
181#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
182#define MVNETA_GMAC_CTRL_2 0x2c08
Stas Sergeev898b2972015-04-01 20:32:49 +0300183#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
Thomas Petazzonia79121d2014-03-26 00:25:41 +0100184#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300185#define MVNETA_GMAC2_PORT_RGMII BIT(4)
186#define MVNETA_GMAC2_PORT_RESET BIT(6)
187#define MVNETA_GMAC_STATUS 0x2c10
188#define MVNETA_GMAC_LINK_UP BIT(0)
189#define MVNETA_GMAC_SPEED_1000 BIT(1)
190#define MVNETA_GMAC_SPEED_100 BIT(2)
191#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
192#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
193#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
194#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
195#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
196#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
197#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
198#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
Stas Sergeev898b2972015-04-01 20:32:49 +0300199#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300200#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
201#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200202#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Stas Sergeev898b2972015-04-01 20:32:49 +0300203#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300204#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200205#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Andrew Lunne4839112015-10-22 18:37:36 +0100206#define MVNETA_MIB_COUNTERS_BASE 0x3000
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300207#define MVNETA_MIB_LATE_COLLISION 0x7c
208#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
209#define MVNETA_DA_FILT_OTH_MCAST 0x3500
210#define MVNETA_DA_FILT_UCAST_BASE 0x3600
211#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
212#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
213#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
214#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
215#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
216#define MVNETA_TXQ_DEC_SENT_SHIFT 16
217#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
218#define MVNETA_TXQ_SENT_DESC_SHIFT 16
219#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
220#define MVNETA_PORT_TX_RESET 0x3cf0
221#define MVNETA_PORT_TX_DMA_RESET BIT(0)
222#define MVNETA_TX_MTU 0x3e0c
223#define MVNETA_TX_TOKEN_SIZE 0x3e14
224#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
225#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
226#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
227
228#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
229
230/* Descriptor ring Macros */
231#define MVNETA_QUEUE_NEXT_DESC(q, index) \
232 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
233
234/* Various constants */
235
236/* Coalescing */
willy tarreauaebea2b2014-12-02 08:13:04 +0100237#define MVNETA_TXDONE_COAL_PKTS 1
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300238#define MVNETA_RX_COAL_PKTS 32
239#define MVNETA_RX_COAL_USEC 100
240
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100241/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300242 * by Marvell switches when a specific hardware mode is enabled (not
243 * supported by this driver) or is filled automatically by zeroes on
244 * the RX side. Those two bytes being at the front of the Ethernet
245 * header, they allow to have the IP header aligned on a 4 bytes
246 * boundary automatically: the hardware skips those two bytes on its
247 * own.
248 */
249#define MVNETA_MH_SIZE 2
250
251#define MVNETA_VLAN_TAG_LEN 4
252
253#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
Marcin Wojtas9110ee02015-11-30 13:27:45 +0100254#define MVNETA_TX_CSUM_DEF_SIZE 1600
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300255#define MVNETA_TX_CSUM_MAX_SIZE 9800
256#define MVNETA_ACC_MODE_EXT 1
257
258/* Timeout constants */
259#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
260#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
261#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
262
263#define MVNETA_TX_MTU_MAX 0x3ffff
264
Gregory CLEMENT9a401de2015-12-09 18:23:50 +0100265/* The RSS lookup table actually has 256 entries but we do not use
266 * them yet
267 */
268#define MVNETA_RSS_LU_TABLE_SIZE 1
269
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300270/* TSO header size */
271#define TSO_HEADER_SIZE 128
272
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300273/* Max number of Rx descriptors */
274#define MVNETA_MAX_RXD 128
275
276/* Max number of Tx descriptors */
277#define MVNETA_MAX_TXD 532
278
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300279/* Max number of allowed TCP segments for software TSO */
280#define MVNETA_MAX_TSO_SEGS 100
281
282#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
283
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300284/* descriptor aligned size */
285#define MVNETA_DESC_ALIGNED_SIZE 32
286
287#define MVNETA_RX_PKT_SIZE(mtu) \
288 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
289 ETH_HLEN + ETH_FCS_LEN, \
290 MVNETA_CPU_D_CACHE_LINE_SIZE)
291
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -0300292#define IS_TSO_HEADER(txq, addr) \
293 ((addr >= txq->tso_hdrs_phys) && \
294 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
295
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300296#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
297
Russell King9b0cdef2015-10-22 18:37:30 +0100298struct mvneta_statistic {
299 unsigned short offset;
300 unsigned short type;
301 const char name[ETH_GSTRING_LEN];
302};
303
304#define T_REG_32 32
305#define T_REG_64 64
306
307static const struct mvneta_statistic mvneta_statistics[] = {
308 { 0x3000, T_REG_64, "good_octets_received", },
309 { 0x3010, T_REG_32, "good_frames_received", },
310 { 0x3008, T_REG_32, "bad_octets_received", },
311 { 0x3014, T_REG_32, "bad_frames_received", },
312 { 0x3018, T_REG_32, "broadcast_frames_received", },
313 { 0x301c, T_REG_32, "multicast_frames_received", },
314 { 0x3050, T_REG_32, "unrec_mac_control_received", },
315 { 0x3058, T_REG_32, "good_fc_received", },
316 { 0x305c, T_REG_32, "bad_fc_received", },
317 { 0x3060, T_REG_32, "undersize_received", },
318 { 0x3064, T_REG_32, "fragments_received", },
319 { 0x3068, T_REG_32, "oversize_received", },
320 { 0x306c, T_REG_32, "jabber_received", },
321 { 0x3070, T_REG_32, "mac_receive_error", },
322 { 0x3074, T_REG_32, "bad_crc_event", },
323 { 0x3078, T_REG_32, "collision", },
324 { 0x307c, T_REG_32, "late_collision", },
325 { 0x2484, T_REG_32, "rx_discard", },
326 { 0x2488, T_REG_32, "rx_overrun", },
327 { 0x3020, T_REG_32, "frames_64_octets", },
328 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
329 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
330 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
331 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
332 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
333 { 0x3038, T_REG_64, "good_octets_sent", },
334 { 0x3040, T_REG_32, "good_frames_sent", },
335 { 0x3044, T_REG_32, "excessive_collision", },
336 { 0x3048, T_REG_32, "multicast_frames_sent", },
337 { 0x304c, T_REG_32, "broadcast_frames_sent", },
338 { 0x3054, T_REG_32, "fc_sent", },
339 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
340};
341
willy tarreau74c41b02014-01-16 08:20:08 +0100342struct mvneta_pcpu_stats {
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300343 struct u64_stats_sync syncp;
willy tarreau74c41b02014-01-16 08:20:08 +0100344 u64 rx_packets;
345 u64 rx_bytes;
346 u64 tx_packets;
347 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300348};
349
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200350struct mvneta_pcpu_port {
351 /* Pointer to the shared port */
352 struct mvneta_port *pp;
353
354 /* Pointer to the CPU-local NAPI struct */
355 struct napi_struct napi;
356
357 /* Cause of the previous interrupt */
358 u32 cause_rx_tx;
359};
360
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300361struct mvneta_port {
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200362 struct mvneta_pcpu_port __percpu *ports;
363 struct mvneta_pcpu_stats __percpu *stats;
364
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300365 int pkt_size;
willy tarreau8ec2cd42014-01-16 08:20:16 +0100366 unsigned int frag_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300367 void __iomem *base;
368 struct mvneta_rx_queue *rxqs;
369 struct mvneta_tx_queue *txqs;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300370 struct net_device *dev;
Maxime Ripardf8642882015-09-25 18:09:38 +0200371 struct notifier_block cpu_notifier;
Gregory CLEMENT90b74c02015-12-09 18:23:48 +0100372 int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300373
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300374 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100375 struct clk *clk;
Jisheng Zhang15cc4a42016-01-20 19:27:24 +0800376 /* AXI clock */
377 struct clk *clk_bus;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300378 u8 mcast_count[256];
379 u16 tx_ring_size;
380 u16 rx_ring_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300381
382 struct mii_bus *mii_bus;
383 struct phy_device *phy_dev;
384 phy_interface_t phy_interface;
385 struct device_node *phy_node;
386 unsigned int link;
387 unsigned int duplex;
388 unsigned int speed;
Simon Guinotb65657f2015-06-30 16:20:22 +0200389 unsigned int tx_csum_limit;
Stas Sergeev0c0744f2015-12-02 20:35:11 +0300390 unsigned int use_inband_status:1;
Russell King9b0cdef2015-10-22 18:37:30 +0100391
392 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
Gregory CLEMENT9a401de2015-12-09 18:23:50 +0100393
394 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300395};
396
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100397/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300398 * layout of the transmit and reception DMA descriptors, and their
399 * layout is therefore defined by the hardware design
400 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200401
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300402#define MVNETA_TX_L3_OFF_SHIFT 0
403#define MVNETA_TX_IP_HLEN_SHIFT 8
404#define MVNETA_TX_L4_UDP BIT(16)
405#define MVNETA_TX_L3_IP6 BIT(17)
406#define MVNETA_TXD_IP_CSUM BIT(18)
407#define MVNETA_TXD_Z_PAD BIT(19)
408#define MVNETA_TXD_L_DESC BIT(20)
409#define MVNETA_TXD_F_DESC BIT(21)
410#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
411 MVNETA_TXD_L_DESC | \
412 MVNETA_TXD_F_DESC)
413#define MVNETA_TX_L4_CSUM_FULL BIT(30)
414#define MVNETA_TX_L4_CSUM_NOT BIT(31)
415
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300416#define MVNETA_RXD_ERR_CRC 0x0
417#define MVNETA_RXD_ERR_SUMMARY BIT(16)
418#define MVNETA_RXD_ERR_OVERRUN BIT(17)
419#define MVNETA_RXD_ERR_LEN BIT(18)
420#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
421#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
422#define MVNETA_RXD_L3_IP4 BIT(25)
423#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
424#define MVNETA_RXD_L4_CSUM_OK BIT(30)
425
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200426#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200427struct mvneta_tx_desc {
428 u32 command; /* Options used by HW for packet transmitting.*/
429 u16 reserverd1; /* csum_l4 (for future use) */
430 u16 data_size; /* Data size of transmitted packet in bytes */
431 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
432 u32 reserved2; /* hw_cmd - (for future use, PMT) */
433 u32 reserved3[4]; /* Reserved - (for future use) */
434};
435
436struct mvneta_rx_desc {
437 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300438 u16 reserved1; /* pnc_info - (for future use, PnC) */
439 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200440
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300441 u32 buf_phys_addr; /* Physical address of the buffer */
442 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200443
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300444 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
445 u16 reserved3; /* prefetch_cmd, for future use */
446 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200447
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300448 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
449 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
450};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200451#else
452struct mvneta_tx_desc {
453 u16 data_size; /* Data size of transmitted packet in bytes */
454 u16 reserverd1; /* csum_l4 (for future use) */
455 u32 command; /* Options used by HW for packet transmitting.*/
456 u32 reserved2; /* hw_cmd - (for future use, PMT) */
457 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
458 u32 reserved3[4]; /* Reserved - (for future use) */
459};
460
461struct mvneta_rx_desc {
462 u16 data_size; /* Size of received packet in bytes */
463 u16 reserved1; /* pnc_info - (for future use, PnC) */
464 u32 status; /* Info about received packet */
465
466 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
467 u32 buf_phys_addr; /* Physical address of the buffer */
468
469 u16 reserved4; /* csum_l4 - (for future use, PnC) */
470 u16 reserved3; /* prefetch_cmd, for future use */
471 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
472
473 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
474 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
475};
476#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300477
478struct mvneta_tx_queue {
479 /* Number of this TX queue, in the range 0-7 */
480 u8 id;
481
482 /* Number of TX DMA descriptors in the descriptor ring */
483 int size;
484
485 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100486 * descriptor ring
487 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300488 int count;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300489 int tx_stop_threshold;
490 int tx_wake_threshold;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300491
492 /* Array of transmitted skb */
493 struct sk_buff **tx_skb;
494
495 /* Index of last TX DMA descriptor that was inserted */
496 int txq_put_index;
497
498 /* Index of the TX DMA descriptor to be cleaned up */
499 int txq_get_index;
500
501 u32 done_pkts_coal;
502
503 /* Virtual address of the TX DMA descriptors array */
504 struct mvneta_tx_desc *descs;
505
506 /* DMA address of the TX DMA descriptors array */
507 dma_addr_t descs_phys;
508
509 /* Index of the last TX DMA descriptor */
510 int last_desc;
511
512 /* Index of the next TX DMA descriptor to process */
513 int next_desc_to_proc;
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300514
515 /* DMA buffers for TSO headers */
516 char *tso_hdrs;
517
518 /* DMA address of TSO headers */
519 dma_addr_t tso_hdrs_phys;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +0100520
521 /* Affinity mask for CPUs*/
522 cpumask_t affinity_mask;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300523};
524
525struct mvneta_rx_queue {
526 /* rx queue number, in the range 0-7 */
527 u8 id;
528
529 /* num of rx descriptors in the rx descriptor ring */
530 int size;
531
532 /* counter of times when mvneta_refill() failed */
533 int missed;
534
535 u32 pkts_coal;
536 u32 time_coal;
537
538 /* Virtual address of the RX DMA descriptors array */
539 struct mvneta_rx_desc *descs;
540
541 /* DMA address of the RX DMA descriptors array */
542 dma_addr_t descs_phys;
543
544 /* Index of the last RX DMA descriptor */
545 int last_desc;
546
547 /* Index of the next RX DMA descriptor to process */
548 int next_desc_to_proc;
549};
550
Ezequiel Garciaedadb7f2014-05-22 20:07:01 -0300551/* The hardware supports eight (8) rx queues, but we are only allowing
552 * the first one to be used. Therefore, let's just allocate one queue.
553 */
Maxime Ripardd8936652015-09-25 18:09:37 +0200554static int rxq_number = 8;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300555static int txq_number = 8;
556
557static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300558
willy tarreauf19fadf2014-01-16 08:20:17 +0100559static int rx_copybreak __read_mostly = 256;
560
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300561#define MVNETA_DRIVER_NAME "mvneta"
562#define MVNETA_DRIVER_VERSION "1.0"
563
564/* Utility/helper methods */
565
566/* Write helper method */
567static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
568{
569 writel(data, pp->base + offset);
570}
571
572/* Read helper method */
573static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
574{
575 return readl(pp->base + offset);
576}
577
578/* Increment txq get counter */
579static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
580{
581 txq->txq_get_index++;
582 if (txq->txq_get_index == txq->size)
583 txq->txq_get_index = 0;
584}
585
586/* Increment txq put counter */
587static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
588{
589 txq->txq_put_index++;
590 if (txq->txq_put_index == txq->size)
591 txq->txq_put_index = 0;
592}
593
594
595/* Clear all MIB counters */
596static void mvneta_mib_counters_clear(struct mvneta_port *pp)
597{
598 int i;
599 u32 dummy;
600
601 /* Perform dummy reads from MIB counters */
602 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
603 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
Andrew Lunne4839112015-10-22 18:37:36 +0100604 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
605 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300606}
607
608/* Get System Network Statistics */
609struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
610 struct rtnl_link_stats64 *stats)
611{
612 struct mvneta_port *pp = netdev_priv(dev);
613 unsigned int start;
willy tarreau74c41b02014-01-16 08:20:08 +0100614 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300615
willy tarreau74c41b02014-01-16 08:20:08 +0100616 for_each_possible_cpu(cpu) {
617 struct mvneta_pcpu_stats *cpu_stats;
618 u64 rx_packets;
619 u64 rx_bytes;
620 u64 tx_packets;
621 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300622
willy tarreau74c41b02014-01-16 08:20:08 +0100623 cpu_stats = per_cpu_ptr(pp->stats, cpu);
624 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
willy tarreau74c41b02014-01-16 08:20:08 +0100626 rx_packets = cpu_stats->rx_packets;
627 rx_bytes = cpu_stats->rx_bytes;
628 tx_packets = cpu_stats->tx_packets;
629 tx_bytes = cpu_stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700630 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300631
willy tarreau74c41b02014-01-16 08:20:08 +0100632 stats->rx_packets += rx_packets;
633 stats->rx_bytes += rx_bytes;
634 stats->tx_packets += tx_packets;
635 stats->tx_bytes += tx_bytes;
636 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300637
638 stats->rx_errors = dev->stats.rx_errors;
639 stats->rx_dropped = dev->stats.rx_dropped;
640
641 stats->tx_dropped = dev->stats.tx_dropped;
642
643 return stats;
644}
645
646/* Rx descriptors helper methods */
647
willy tarreau54282132014-01-16 08:20:14 +0100648/* Checks whether the RX descriptor having this status is both the first
649 * and the last descriptor for the RX packet. Each RX packet is currently
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300650 * received through a single RX descriptor, so not having each RX
651 * descriptor with its first and last bits set is an error
652 */
willy tarreau54282132014-01-16 08:20:14 +0100653static int mvneta_rxq_desc_is_first_last(u32 status)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300654{
willy tarreau54282132014-01-16 08:20:14 +0100655 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300656 MVNETA_RXD_FIRST_LAST_DESC;
657}
658
659/* Add number of descriptors ready to receive new packets */
660static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
661 struct mvneta_rx_queue *rxq,
662 int ndescs)
663{
664 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100665 * be added at once
666 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300667 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
668 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
669 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
670 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
671 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
672 }
673
674 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
675 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
676}
677
678/* Get number of RX descriptors occupied by received packets */
679static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
680 struct mvneta_rx_queue *rxq)
681{
682 u32 val;
683
684 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
685 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
686}
687
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100688/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300689 * from mvneta_rxq_drop_pkts().
690 */
691static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
692 struct mvneta_rx_queue *rxq,
693 int rx_done, int rx_filled)
694{
695 u32 val;
696
697 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
698 val = rx_done |
699 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
700 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
701 return;
702 }
703
704 /* Only 255 descriptors can be added at once */
705 while ((rx_done > 0) || (rx_filled > 0)) {
706 if (rx_done <= 0xff) {
707 val = rx_done;
708 rx_done = 0;
709 } else {
710 val = 0xff;
711 rx_done -= 0xff;
712 }
713 if (rx_filled <= 0xff) {
714 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
715 rx_filled = 0;
716 } else {
717 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
718 rx_filled -= 0xff;
719 }
720 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
721 }
722}
723
724/* Get pointer to next RX descriptor to be processed by SW */
725static struct mvneta_rx_desc *
726mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
727{
728 int rx_desc = rxq->next_desc_to_proc;
729
730 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
willy tarreau34e41792014-01-16 08:20:15 +0100731 prefetch(rxq->descs + rxq->next_desc_to_proc);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300732 return rxq->descs + rx_desc;
733}
734
735/* Change maximum receive size of the port. */
736static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
737{
738 u32 val;
739
740 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
741 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
742 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
743 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
744 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
745}
746
747
748/* Set rx queue offset */
749static void mvneta_rxq_offset_set(struct mvneta_port *pp,
750 struct mvneta_rx_queue *rxq,
751 int offset)
752{
753 u32 val;
754
755 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
756 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
757
758 /* Offset is in */
759 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
760 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
761}
762
763
764/* Tx descriptors helper methods */
765
766/* Update HW with number of TX descriptors to be sent */
767static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
768 struct mvneta_tx_queue *txq,
769 int pend_desc)
770{
771 u32 val;
772
773 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100774 * process TX desriptors in quanta less than 256
775 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300776 val = pend_desc;
777 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
778}
779
780/* Get pointer to next TX descriptor to be processed (send) by HW */
781static struct mvneta_tx_desc *
782mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
783{
784 int tx_desc = txq->next_desc_to_proc;
785
786 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
787 return txq->descs + tx_desc;
788}
789
790/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100791 * mapping failures in the TX path.
792 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300793static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
794{
795 if (txq->next_desc_to_proc == 0)
796 txq->next_desc_to_proc = txq->last_desc - 1;
797 else
798 txq->next_desc_to_proc--;
799}
800
801/* Set rxq buf size */
802static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
803 struct mvneta_rx_queue *rxq,
804 int buf_size)
805{
806 u32 val;
807
808 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
809
810 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
811 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
812
813 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
814}
815
816/* Disable buffer management (BM) */
817static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
818 struct mvneta_rx_queue *rxq)
819{
820 u32 val;
821
822 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
823 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
824 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
825}
826
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300827/* Start the Ethernet port RX and TX activity */
828static void mvneta_port_up(struct mvneta_port *pp)
829{
830 int queue;
831 u32 q_map;
832
833 /* Enable all initialized TXs. */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300834 q_map = 0;
835 for (queue = 0; queue < txq_number; queue++) {
836 struct mvneta_tx_queue *txq = &pp->txqs[queue];
837 if (txq->descs != NULL)
838 q_map |= (1 << queue);
839 }
840 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
841
842 /* Enable all initialized RXQs. */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100843 for (queue = 0; queue < rxq_number; queue++) {
844 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
845
846 if (rxq->descs != NULL)
847 q_map |= (1 << queue);
848 }
849 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300850}
851
852/* Stop the Ethernet port activity */
853static void mvneta_port_down(struct mvneta_port *pp)
854{
855 u32 val;
856 int count;
857
858 /* Stop Rx port activity. Check port Rx activity. */
859 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
860
861 /* Issue stop command for active channels only */
862 if (val != 0)
863 mvreg_write(pp, MVNETA_RXQ_CMD,
864 val << MVNETA_RXQ_DISABLE_SHIFT);
865
866 /* Wait for all Rx activity to terminate. */
867 count = 0;
868 do {
869 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
870 netdev_warn(pp->dev,
871 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
872 val);
873 break;
874 }
875 mdelay(1);
876
877 val = mvreg_read(pp, MVNETA_RXQ_CMD);
878 } while (val & 0xff);
879
880 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100881 * command for active channels only
882 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300883 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
884
885 if (val != 0)
886 mvreg_write(pp, MVNETA_TXQ_CMD,
887 (val << MVNETA_TXQ_DISABLE_SHIFT));
888
889 /* Wait for all Tx activity to terminate. */
890 count = 0;
891 do {
892 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
893 netdev_warn(pp->dev,
894 "TIMEOUT for TX stopped status=0x%08x\n",
895 val);
896 break;
897 }
898 mdelay(1);
899
900 /* Check TX Command reg that all Txqs are stopped */
901 val = mvreg_read(pp, MVNETA_TXQ_CMD);
902
903 } while (val & 0xff);
904
905 /* Double check to verify that TX FIFO is empty */
906 count = 0;
907 do {
908 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
909 netdev_warn(pp->dev,
910 "TX FIFO empty timeout status=0x08%x\n",
911 val);
912 break;
913 }
914 mdelay(1);
915
916 val = mvreg_read(pp, MVNETA_PORT_STATUS);
917 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
918 (val & MVNETA_TX_IN_PRGRS));
919
920 udelay(200);
921}
922
923/* Enable the port by setting the port enable bit of the MAC control register */
924static void mvneta_port_enable(struct mvneta_port *pp)
925{
926 u32 val;
927
928 /* Enable port */
929 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
930 val |= MVNETA_GMAC0_PORT_ENABLE;
931 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
932}
933
934/* Disable the port and wait for about 200 usec before retuning */
935static void mvneta_port_disable(struct mvneta_port *pp)
936{
937 u32 val;
938
939 /* Reset the Enable bit in the Serial Control Register */
940 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
941 val &= ~MVNETA_GMAC0_PORT_ENABLE;
942 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
943
944 udelay(200);
945}
946
947/* Multicast tables methods */
948
949/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
950static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
951{
952 int offset;
953 u32 val;
954
955 if (queue == -1) {
956 val = 0;
957 } else {
958 val = 0x1 | (queue << 1);
959 val |= (val << 24) | (val << 16) | (val << 8);
960 }
961
962 for (offset = 0; offset <= 0xc; offset += 4)
963 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
964}
965
966/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
967static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
968{
969 int offset;
970 u32 val;
971
972 if (queue == -1) {
973 val = 0;
974 } else {
975 val = 0x1 | (queue << 1);
976 val |= (val << 24) | (val << 16) | (val << 8);
977 }
978
979 for (offset = 0; offset <= 0xfc; offset += 4)
980 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
981
982}
983
984/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
985static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
986{
987 int offset;
988 u32 val;
989
990 if (queue == -1) {
991 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
992 val = 0;
993 } else {
994 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
995 val = 0x1 | (queue << 1);
996 val |= (val << 24) | (val << 16) | (val << 8);
997 }
998
999 for (offset = 0; offset <= 0xfc; offset += 4)
1000 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1001}
1002
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001003static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1004{
1005 u32 val;
1006
1007 if (enable) {
1008 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1009 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1010 MVNETA_GMAC_FORCE_LINK_DOWN |
1011 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1012 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1013 MVNETA_GMAC_AN_SPEED_EN |
1014 MVNETA_GMAC_AN_DUPLEX_EN;
1015 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1016
1017 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1018 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1019 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1020
1021 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1022 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1023 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1024 } else {
1025 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1026 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1027 MVNETA_GMAC_AN_SPEED_EN |
1028 MVNETA_GMAC_AN_DUPLEX_EN);
1029 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1030
1031 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1032 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1033 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1034
1035 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1036 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1037 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1038 }
1039}
1040
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01001041static void mvneta_percpu_unmask_interrupt(void *arg)
1042{
1043 struct mvneta_port *pp = arg;
1044
1045 /* All the queue are unmasked, but actually only the ones
1046 * mapped to this CPU will be unmasked
1047 */
1048 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1049 MVNETA_RX_INTR_MASK_ALL |
1050 MVNETA_TX_INTR_MASK_ALL |
1051 MVNETA_MISCINTR_INTR_MASK);
1052}
1053
1054static void mvneta_percpu_mask_interrupt(void *arg)
1055{
1056 struct mvneta_port *pp = arg;
1057
1058 /* All the queue are masked, but actually only the ones
1059 * mapped to this CPU will be masked
1060 */
1061 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1062 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1063 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1064}
1065
1066static void mvneta_percpu_clear_intr_cause(void *arg)
1067{
1068 struct mvneta_port *pp = arg;
1069
1070 /* All the queue are cleared, but actually only the ones
1071 * mapped to this CPU will be cleared
1072 */
1073 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1074 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1075 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1076}
1077
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001078/* This method sets defaults to the NETA port:
1079 * Clears interrupt Cause and Mask registers.
1080 * Clears all MAC tables.
1081 * Sets defaults to all registers.
1082 * Resets RX and TX descriptor rings.
1083 * Resets PHY.
1084 * This method can be called after mvneta_port_down() to return the port
1085 * settings to defaults.
1086 */
1087static void mvneta_defaults_set(struct mvneta_port *pp)
1088{
1089 int cpu;
1090 int queue;
1091 u32 val;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001092 int max_cpu = num_present_cpus();
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001093
1094 /* Clear all Cause registers */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01001095 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001096
1097 /* Mask all interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01001098 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001099 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1100
1101 /* Enable MBUS Retry bit16 */
1102 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1103
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001104 /* Set CPU queue access map. CPUs are assigned to the RX and
1105 * TX queues modulo their number. If there is only one TX
1106 * queue then it is assigned to the CPU associated to the
1107 * default RX queue.
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001108 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001109 for_each_present_cpu(cpu) {
1110 int rxq_map = 0, txq_map = 0;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001111 int rxq, txq;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001112
1113 for (rxq = 0; rxq < rxq_number; rxq++)
1114 if ((rxq % max_cpu) == cpu)
1115 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1116
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001117 for (txq = 0; txq < txq_number; txq++)
1118 if ((txq % max_cpu) == cpu)
1119 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1120
1121 /* With only one TX queue we configure a special case
1122 * which will allow to get all the irq on a single
1123 * CPU
1124 */
1125 if (txq_number == 1)
1126 txq_map = (cpu == pp->rxq_def) ?
1127 MVNETA_CPU_TXQ_ACCESS(1) : 0;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001128
1129 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1130 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001131
1132 /* Reset RX and TX DMAs */
1133 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1134 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1135
1136 /* Disable Legacy WRR, Disable EJP, Release from reset */
1137 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1138 for (queue = 0; queue < txq_number; queue++) {
1139 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1140 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1141 }
1142
1143 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1144 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1145
1146 /* Set Port Acceleration Mode */
1147 val = MVNETA_ACC_MODE_EXT;
1148 mvreg_write(pp, MVNETA_ACC_MODE, val);
1149
1150 /* Update val of portCfg register accordingly with all RxQueue types */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01001151 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001152 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1153
1154 val = 0;
1155 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1156 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1157
1158 /* Build PORT_SDMA_CONFIG_REG */
1159 val = 0;
1160
1161 /* Default burst size */
1162 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1163 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001164 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001165
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001166#if defined(__BIG_ENDIAN)
1167 val |= MVNETA_DESC_SWAP;
1168#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001169
1170 /* Assign port SDMA configuration */
1171 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1172
Thomas Petazzoni71408602013-09-04 16:21:18 +02001173 /* Disable PHY polling in hardware, since we're using the
1174 * kernel phylib to do this.
1175 */
1176 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1177 val &= ~MVNETA_PHY_POLLING_ENABLE;
1178 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1179
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001180 mvneta_set_autoneg(pp, pp->use_inband_status);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001181 mvneta_set_ucast_table(pp, -1);
1182 mvneta_set_special_mcast_table(pp, -1);
1183 mvneta_set_other_mcast_table(pp, -1);
1184
1185 /* Set port interrupt enable register - default enable all */
1186 mvreg_write(pp, MVNETA_INTR_ENABLE,
1187 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1188 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
Andrew Lunne4839112015-10-22 18:37:36 +01001189
1190 mvneta_mib_counters_clear(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001191}
1192
1193/* Set max sizes for tx queues */
1194static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1195
1196{
1197 u32 val, size, mtu;
1198 int queue;
1199
1200 mtu = max_tx_size * 8;
1201 if (mtu > MVNETA_TX_MTU_MAX)
1202 mtu = MVNETA_TX_MTU_MAX;
1203
1204 /* Set MTU */
1205 val = mvreg_read(pp, MVNETA_TX_MTU);
1206 val &= ~MVNETA_TX_MTU_MAX;
1207 val |= mtu;
1208 mvreg_write(pp, MVNETA_TX_MTU, val);
1209
1210 /* TX token size and all TXQs token size must be larger that MTU */
1211 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1212
1213 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1214 if (size < mtu) {
1215 size = mtu;
1216 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1217 val |= size;
1218 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1219 }
1220 for (queue = 0; queue < txq_number; queue++) {
1221 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1222
1223 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1224 if (size < mtu) {
1225 size = mtu;
1226 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1227 val |= size;
1228 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1229 }
1230 }
1231}
1232
1233/* Set unicast address */
1234static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1235 int queue)
1236{
1237 unsigned int unicast_reg;
1238 unsigned int tbl_offset;
1239 unsigned int reg_offset;
1240
1241 /* Locate the Unicast table entry */
1242 last_nibble = (0xf & last_nibble);
1243
1244 /* offset from unicast tbl base */
1245 tbl_offset = (last_nibble / 4) * 4;
1246
1247 /* offset within the above reg */
1248 reg_offset = last_nibble % 4;
1249
1250 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1251
1252 if (queue == -1) {
1253 /* Clear accepts frame bit at specified unicast DA tbl entry */
1254 unicast_reg &= ~(0xff << (8 * reg_offset));
1255 } else {
1256 unicast_reg &= ~(0xff << (8 * reg_offset));
1257 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1258 }
1259
1260 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1261}
1262
1263/* Set mac address */
1264static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1265 int queue)
1266{
1267 unsigned int mac_h;
1268 unsigned int mac_l;
1269
1270 if (queue != -1) {
1271 mac_l = (addr[4] << 8) | (addr[5]);
1272 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1273 (addr[2] << 8) | (addr[3] << 0);
1274
1275 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1276 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1277 }
1278
1279 /* Accept frames of this address */
1280 mvneta_set_ucast_addr(pp, addr[5], queue);
1281}
1282
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001283/* Set the number of packets that will be received before RX interrupt
1284 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001285 */
1286static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1287 struct mvneta_rx_queue *rxq, u32 value)
1288{
1289 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1290 value | MVNETA_RXQ_NON_OCCUPIED(0));
1291 rxq->pkts_coal = value;
1292}
1293
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001294/* Set the time delay in usec before RX interrupt will be generated by
1295 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001296 */
1297static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1298 struct mvneta_rx_queue *rxq, u32 value)
1299{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001300 u32 val;
1301 unsigned long clk_rate;
1302
1303 clk_rate = clk_get_rate(pp->clk);
1304 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001305
1306 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1307 rxq->time_coal = value;
1308}
1309
1310/* Set threshold for TX_DONE pkts coalescing */
1311static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1312 struct mvneta_tx_queue *txq, u32 value)
1313{
1314 u32 val;
1315
1316 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1317
1318 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1319 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1320
1321 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1322
1323 txq->done_pkts_coal = value;
1324}
1325
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001326/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1327static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1328 u32 phys_addr, u32 cookie)
1329{
1330 rx_desc->buf_cookie = cookie;
1331 rx_desc->buf_phys_addr = phys_addr;
1332}
1333
1334/* Decrement sent descriptors counter */
1335static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1336 struct mvneta_tx_queue *txq,
1337 int sent_desc)
1338{
1339 u32 val;
1340
1341 /* Only 255 TX descriptors can be updated at once */
1342 while (sent_desc > 0xff) {
1343 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1344 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1345 sent_desc = sent_desc - 0xff;
1346 }
1347
1348 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1349 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1350}
1351
1352/* Get number of TX descriptors already sent by HW */
1353static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1354 struct mvneta_tx_queue *txq)
1355{
1356 u32 val;
1357 int sent_desc;
1358
1359 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1360 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1361 MVNETA_TXQ_SENT_DESC_SHIFT;
1362
1363 return sent_desc;
1364}
1365
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001366/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001367 * The number of sent descriptors is returned.
1368 */
1369static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1370 struct mvneta_tx_queue *txq)
1371{
1372 int sent_desc;
1373
1374 /* Get number of sent descriptors */
1375 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1376
1377 /* Decrement sent descriptors counter */
1378 if (sent_desc)
1379 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1380
1381 return sent_desc;
1382}
1383
1384/* Set TXQ descriptors fields relevant for CSUM calculation */
1385static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1386 int ip_hdr_len, int l4_proto)
1387{
1388 u32 command;
1389
1390 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001391 * G_L4_chk, L4_type; required only for checksum
1392 * calculation
1393 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001394 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1395 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1396
Thomas Fitzsimmons0a198582014-07-08 19:44:07 -04001397 if (l3_proto == htons(ETH_P_IP))
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001398 command |= MVNETA_TXD_IP_CSUM;
1399 else
1400 command |= MVNETA_TX_L3_IP6;
1401
1402 if (l4_proto == IPPROTO_TCP)
1403 command |= MVNETA_TX_L4_CSUM_FULL;
1404 else if (l4_proto == IPPROTO_UDP)
1405 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1406 else
1407 command |= MVNETA_TX_L4_CSUM_NOT;
1408
1409 return command;
1410}
1411
1412
1413/* Display more error info */
1414static void mvneta_rx_error(struct mvneta_port *pp,
1415 struct mvneta_rx_desc *rx_desc)
1416{
1417 u32 status = rx_desc->status;
1418
willy tarreau54282132014-01-16 08:20:14 +01001419 if (!mvneta_rxq_desc_is_first_last(status)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001420 netdev_err(pp->dev,
1421 "bad rx status %08x (buffer oversize), size=%d\n",
willy tarreau54282132014-01-16 08:20:14 +01001422 status, rx_desc->data_size);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001423 return;
1424 }
1425
1426 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1427 case MVNETA_RXD_ERR_CRC:
1428 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1429 status, rx_desc->data_size);
1430 break;
1431 case MVNETA_RXD_ERR_OVERRUN:
1432 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1433 status, rx_desc->data_size);
1434 break;
1435 case MVNETA_RXD_ERR_LEN:
1436 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1437 status, rx_desc->data_size);
1438 break;
1439 case MVNETA_RXD_ERR_RESOURCE:
1440 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1441 status, rx_desc->data_size);
1442 break;
1443 }
1444}
1445
willy tarreau54282132014-01-16 08:20:14 +01001446/* Handle RX checksum offload based on the descriptor's status */
1447static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001448 struct sk_buff *skb)
1449{
willy tarreau54282132014-01-16 08:20:14 +01001450 if ((status & MVNETA_RXD_L3_IP4) &&
1451 (status & MVNETA_RXD_L4_CSUM_OK)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001452 skb->csum = 0;
1453 skb->ip_summed = CHECKSUM_UNNECESSARY;
1454 return;
1455 }
1456
1457 skb->ip_summed = CHECKSUM_NONE;
1458}
1459
willy tarreau6c498972014-01-16 08:20:12 +01001460/* Return tx queue pointer (find last set bit) according to <cause> returned
1461 * form tx_done reg. <cause> must not be null. The return value is always a
1462 * valid queue for matching the first one found in <cause>.
1463 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001464static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1465 u32 cause)
1466{
1467 int queue = fls(cause) - 1;
1468
willy tarreau6c498972014-01-16 08:20:12 +01001469 return &pp->txqs[queue];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001470}
1471
1472/* Free tx queue skbuffs */
1473static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1474 struct mvneta_tx_queue *txq, int num)
1475{
1476 int i;
1477
1478 for (i = 0; i < num; i++) {
1479 struct mvneta_tx_desc *tx_desc = txq->descs +
1480 txq->txq_get_index;
1481 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1482
1483 mvneta_txq_inc_get(txq);
1484
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001485 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1486 dma_unmap_single(pp->dev->dev.parent,
1487 tx_desc->buf_phys_addr,
1488 tx_desc->data_size, DMA_TO_DEVICE);
Ezequiel Garciaba7e46e2014-05-30 13:40:06 -03001489 if (!skb)
1490 continue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001491 dev_kfree_skb_any(skb);
1492 }
1493}
1494
1495/* Handle end of transmission */
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001496static void mvneta_txq_done(struct mvneta_port *pp,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001497 struct mvneta_tx_queue *txq)
1498{
1499 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1500 int tx_done;
1501
1502 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001503 if (!tx_done)
1504 return;
1505
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001506 mvneta_txq_bufs_free(pp, txq, tx_done);
1507
1508 txq->count -= tx_done;
1509
1510 if (netif_tx_queue_stopped(nq)) {
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001511 if (txq->count <= txq->tx_wake_threshold)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001512 netif_tx_wake_queue(nq);
1513 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001514}
1515
willy tarreau8ec2cd42014-01-16 08:20:16 +01001516static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1517{
1518 if (likely(pp->frag_size <= PAGE_SIZE))
1519 return netdev_alloc_frag(pp->frag_size);
1520 else
1521 return kmalloc(pp->frag_size, GFP_ATOMIC);
1522}
1523
1524static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1525{
1526 if (likely(pp->frag_size <= PAGE_SIZE))
Alexander Duyck13dc0d22015-05-06 21:12:14 -07001527 skb_free_frag(data);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001528 else
1529 kfree(data);
1530}
1531
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001532/* Refill processing */
1533static int mvneta_rx_refill(struct mvneta_port *pp,
1534 struct mvneta_rx_desc *rx_desc)
1535
1536{
1537 dma_addr_t phys_addr;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001538 void *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001539
willy tarreau8ec2cd42014-01-16 08:20:16 +01001540 data = mvneta_frag_alloc(pp);
1541 if (!data)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001542 return -ENOMEM;
1543
willy tarreau8ec2cd42014-01-16 08:20:16 +01001544 phys_addr = dma_map_single(pp->dev->dev.parent, data,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001545 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1546 DMA_FROM_DEVICE);
1547 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
willy tarreau8ec2cd42014-01-16 08:20:16 +01001548 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001549 return -ENOMEM;
1550 }
1551
willy tarreau8ec2cd42014-01-16 08:20:16 +01001552 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001553 return 0;
1554}
1555
1556/* Handle tx checksum */
1557static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1558{
1559 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1560 int ip_hdr_len = 0;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001561 __be16 l3_proto = vlan_get_protocol(skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001562 u8 l4_proto;
1563
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001564 if (l3_proto == htons(ETH_P_IP)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001565 struct iphdr *ip4h = ip_hdr(skb);
1566
1567 /* Calculate IPv4 checksum and L4 checksum */
1568 ip_hdr_len = ip4h->ihl;
1569 l4_proto = ip4h->protocol;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001570 } else if (l3_proto == htons(ETH_P_IPV6)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001571 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1572
1573 /* Read l4_protocol from one of IPv6 extra headers */
1574 if (skb_network_header_len(skb) > 0)
1575 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1576 l4_proto = ip6h->nexthdr;
1577 } else
1578 return MVNETA_TX_L4_CSUM_NOT;
1579
1580 return mvneta_txq_desc_csum(skb_network_offset(skb),
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001581 l3_proto, ip_hdr_len, l4_proto);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001582 }
1583
1584 return MVNETA_TX_L4_CSUM_NOT;
1585}
1586
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001587/* Drop packets received by the RXQ and free buffers */
1588static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1589 struct mvneta_rx_queue *rxq)
1590{
1591 int rx_done, i;
1592
1593 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1594 for (i = 0; i < rxq->size; i++) {
1595 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001596 void *data = (void *)rx_desc->buf_cookie;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001597
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001598 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001599 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Justin Maggard8c94ddb2015-11-09 17:21:05 -08001600 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001601 }
1602
1603 if (rx_done)
1604 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1605}
1606
1607/* Main rx processing */
1608static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1609 struct mvneta_rx_queue *rxq)
1610{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001611 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001612 struct net_device *dev = pp->dev;
Simon Guinota84e3282015-07-19 13:00:53 +02001613 int rx_done;
willy tarreaudc4277d2014-01-16 08:20:07 +01001614 u32 rcvd_pkts = 0;
1615 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001616
1617 /* Get number of received packets */
1618 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1619
1620 if (rx_todo > rx_done)
1621 rx_todo = rx_done;
1622
1623 rx_done = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001624
1625 /* Fairness NAPI loop */
1626 while (rx_done < rx_todo) {
1627 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1628 struct sk_buff *skb;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001629 unsigned char *data;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001630 dma_addr_t phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001631 u32 rx_status;
1632 int rx_bytes, err;
1633
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001634 rx_done++;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001635 rx_status = rx_desc->status;
willy tarreauf19fadf2014-01-16 08:20:17 +01001636 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001637 data = (unsigned char *)rx_desc->buf_cookie;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001638 phys_addr = rx_desc->buf_phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001639
willy tarreau54282132014-01-16 08:20:14 +01001640 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
willy tarreauf19fadf2014-01-16 08:20:17 +01001641 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1642 err_drop_frame:
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001643 dev->stats.rx_errors++;
1644 mvneta_rx_error(pp, rx_desc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001645 /* leave the descriptor untouched */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001646 continue;
1647 }
1648
willy tarreauf19fadf2014-01-16 08:20:17 +01001649 if (rx_bytes <= rx_copybreak) {
1650 /* better copy a small frame and not unmap the DMA region */
1651 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1652 if (unlikely(!skb))
1653 goto err_drop_frame;
1654
1655 dma_sync_single_range_for_cpu(dev->dev.parent,
1656 rx_desc->buf_phys_addr,
1657 MVNETA_MH_SIZE + NET_SKB_PAD,
1658 rx_bytes,
1659 DMA_FROM_DEVICE);
1660 memcpy(skb_put(skb, rx_bytes),
1661 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1662 rx_bytes);
1663
1664 skb->protocol = eth_type_trans(skb, dev);
1665 mvneta_rx_csum(pp, rx_status, skb);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001666 napi_gro_receive(&port->napi, skb);
willy tarreauf19fadf2014-01-16 08:20:17 +01001667
1668 rcvd_pkts++;
1669 rcvd_bytes += rx_bytes;
1670
1671 /* leave the descriptor and buffer untouched */
1672 continue;
1673 }
1674
Simon Guinota84e3282015-07-19 13:00:53 +02001675 /* Refill processing */
1676 err = mvneta_rx_refill(pp, rx_desc);
1677 if (err) {
1678 netdev_err(dev, "Linux processing - Can't refill\n");
1679 rxq->missed++;
1680 goto err_drop_frame;
1681 }
1682
willy tarreauf19fadf2014-01-16 08:20:17 +01001683 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
willy tarreauf19fadf2014-01-16 08:20:17 +01001684
Marcin Wojtas26c17a172015-11-30 13:27:44 +01001685 /* After refill old buffer has to be unmapped regardless
1686 * the skb is successfully built or not.
1687 */
Simon Guinotdaf158d2015-09-15 22:41:21 +02001688 dma_unmap_single(dev->dev.parent, phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001689 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001690
Marcin Wojtas26c17a172015-11-30 13:27:44 +01001691 if (!skb)
1692 goto err_drop_frame;
1693
willy tarreaudc4277d2014-01-16 08:20:07 +01001694 rcvd_pkts++;
1695 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001696
1697 /* Linux processing */
willy tarreau8ec2cd42014-01-16 08:20:16 +01001698 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001699 skb_put(skb, rx_bytes);
1700
1701 skb->protocol = eth_type_trans(skb, dev);
1702
willy tarreau54282132014-01-16 08:20:14 +01001703 mvneta_rx_csum(pp, rx_status, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001704
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001705 napi_gro_receive(&port->napi, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001706 }
1707
willy tarreaudc4277d2014-01-16 08:20:07 +01001708 if (rcvd_pkts) {
willy tarreau74c41b02014-01-16 08:20:08 +01001709 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1710
1711 u64_stats_update_begin(&stats->syncp);
1712 stats->rx_packets += rcvd_pkts;
1713 stats->rx_bytes += rcvd_bytes;
1714 u64_stats_update_end(&stats->syncp);
willy tarreaudc4277d2014-01-16 08:20:07 +01001715 }
1716
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001717 /* Update rxq management counters */
Simon Guinota84e3282015-07-19 13:00:53 +02001718 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001719
1720 return rx_done;
1721}
1722
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001723static inline void
1724mvneta_tso_put_hdr(struct sk_buff *skb,
1725 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1726{
1727 struct mvneta_tx_desc *tx_desc;
1728 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1729
1730 txq->tx_skb[txq->txq_put_index] = NULL;
1731 tx_desc = mvneta_txq_next_desc_get(txq);
1732 tx_desc->data_size = hdr_len;
1733 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1734 tx_desc->command |= MVNETA_TXD_F_DESC;
1735 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1736 txq->txq_put_index * TSO_HEADER_SIZE;
1737 mvneta_txq_inc_put(txq);
1738}
1739
1740static inline int
1741mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1742 struct sk_buff *skb, char *data, int size,
1743 bool last_tcp, bool is_last)
1744{
1745 struct mvneta_tx_desc *tx_desc;
1746
1747 tx_desc = mvneta_txq_next_desc_get(txq);
1748 tx_desc->data_size = size;
1749 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1750 size, DMA_TO_DEVICE);
1751 if (unlikely(dma_mapping_error(dev->dev.parent,
1752 tx_desc->buf_phys_addr))) {
1753 mvneta_txq_desc_put(txq);
1754 return -ENOMEM;
1755 }
1756
1757 tx_desc->command = 0;
1758 txq->tx_skb[txq->txq_put_index] = NULL;
1759
1760 if (last_tcp) {
1761 /* last descriptor in the TCP packet */
1762 tx_desc->command = MVNETA_TXD_L_DESC;
1763
1764 /* last descriptor in SKB */
1765 if (is_last)
1766 txq->tx_skb[txq->txq_put_index] = skb;
1767 }
1768 mvneta_txq_inc_put(txq);
1769 return 0;
1770}
1771
1772static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1773 struct mvneta_tx_queue *txq)
1774{
1775 int total_len, data_left;
1776 int desc_count = 0;
1777 struct mvneta_port *pp = netdev_priv(dev);
1778 struct tso_t tso;
1779 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1780 int i;
1781
1782 /* Count needed descriptors */
1783 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1784 return 0;
1785
1786 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1787 pr_info("*** Is this even possible???!?!?\n");
1788 return 0;
1789 }
1790
1791 /* Initialize the TSO handler, and prepare the first payload */
1792 tso_start(skb, &tso);
1793
1794 total_len = skb->len - hdr_len;
1795 while (total_len > 0) {
1796 char *hdr;
1797
1798 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1799 total_len -= data_left;
1800 desc_count++;
1801
1802 /* prepare packet headers: MAC + IP + TCP */
1803 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1804 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1805
1806 mvneta_tso_put_hdr(skb, pp, txq);
1807
1808 while (data_left > 0) {
1809 int size;
1810 desc_count++;
1811
1812 size = min_t(int, tso.size, data_left);
1813
1814 if (mvneta_tso_put_data(dev, txq, skb,
1815 tso.data, size,
1816 size == data_left,
1817 total_len == 0))
1818 goto err_release;
1819 data_left -= size;
1820
1821 tso_build_data(skb, &tso, size);
1822 }
1823 }
1824
1825 return desc_count;
1826
1827err_release:
1828 /* Release all used data descriptors; header descriptors must not
1829 * be DMA-unmapped.
1830 */
1831 for (i = desc_count - 1; i >= 0; i--) {
1832 struct mvneta_tx_desc *tx_desc = txq->descs + i;
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001833 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001834 dma_unmap_single(pp->dev->dev.parent,
1835 tx_desc->buf_phys_addr,
1836 tx_desc->data_size,
1837 DMA_TO_DEVICE);
1838 mvneta_txq_desc_put(txq);
1839 }
1840 return 0;
1841}
1842
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001843/* Handle tx fragmentation processing */
1844static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1845 struct mvneta_tx_queue *txq)
1846{
1847 struct mvneta_tx_desc *tx_desc;
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001848 int i, nr_frags = skb_shinfo(skb)->nr_frags;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001849
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001850 for (i = 0; i < nr_frags; i++) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001851 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1852 void *addr = page_address(frag->page.p) + frag->page_offset;
1853
1854 tx_desc = mvneta_txq_next_desc_get(txq);
1855 tx_desc->data_size = frag->size;
1856
1857 tx_desc->buf_phys_addr =
1858 dma_map_single(pp->dev->dev.parent, addr,
1859 tx_desc->data_size, DMA_TO_DEVICE);
1860
1861 if (dma_mapping_error(pp->dev->dev.parent,
1862 tx_desc->buf_phys_addr)) {
1863 mvneta_txq_desc_put(txq);
1864 goto error;
1865 }
1866
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001867 if (i == nr_frags - 1) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001868 /* Last descriptor */
1869 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001870 txq->tx_skb[txq->txq_put_index] = skb;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001871 } else {
1872 /* Descriptor in the middle: Not First, Not Last */
1873 tx_desc->command = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001874 txq->tx_skb[txq->txq_put_index] = NULL;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001875 }
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001876 mvneta_txq_inc_put(txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001877 }
1878
1879 return 0;
1880
1881error:
1882 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001883 * this packet, as well as the corresponding DMA mappings
1884 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001885 for (i = i - 1; i >= 0; i--) {
1886 tx_desc = txq->descs + i;
1887 dma_unmap_single(pp->dev->dev.parent,
1888 tx_desc->buf_phys_addr,
1889 tx_desc->data_size,
1890 DMA_TO_DEVICE);
1891 mvneta_txq_desc_put(txq);
1892 }
1893
1894 return -ENOMEM;
1895}
1896
1897/* Main tx processing */
1898static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1899{
1900 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02001901 u16 txq_id = skb_get_queue_mapping(skb);
1902 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001903 struct mvneta_tx_desc *tx_desc;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001904 int len = skb->len;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001905 int frags = 0;
1906 u32 tx_cmd;
1907
1908 if (!netif_running(dev))
1909 goto out;
1910
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001911 if (skb_is_gso(skb)) {
1912 frags = mvneta_tx_tso(skb, dev, txq);
1913 goto out;
1914 }
1915
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001916 frags = skb_shinfo(skb)->nr_frags + 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001917
1918 /* Get a descriptor for the first part of the packet */
1919 tx_desc = mvneta_txq_next_desc_get(txq);
1920
1921 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1922
1923 tx_desc->data_size = skb_headlen(skb);
1924
1925 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1926 tx_desc->data_size,
1927 DMA_TO_DEVICE);
1928 if (unlikely(dma_mapping_error(dev->dev.parent,
1929 tx_desc->buf_phys_addr))) {
1930 mvneta_txq_desc_put(txq);
1931 frags = 0;
1932 goto out;
1933 }
1934
1935 if (frags == 1) {
1936 /* First and Last descriptor */
1937 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1938 tx_desc->command = tx_cmd;
1939 txq->tx_skb[txq->txq_put_index] = skb;
1940 mvneta_txq_inc_put(txq);
1941 } else {
1942 /* First but not Last */
1943 tx_cmd |= MVNETA_TXD_F_DESC;
1944 txq->tx_skb[txq->txq_put_index] = NULL;
1945 mvneta_txq_inc_put(txq);
1946 tx_desc->command = tx_cmd;
1947 /* Continue with other skb fragments */
1948 if (mvneta_tx_frag_process(pp, skb, txq)) {
1949 dma_unmap_single(dev->dev.parent,
1950 tx_desc->buf_phys_addr,
1951 tx_desc->data_size,
1952 DMA_TO_DEVICE);
1953 mvneta_txq_desc_put(txq);
1954 frags = 0;
1955 goto out;
1956 }
1957 }
1958
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001959out:
1960 if (frags > 0) {
willy tarreau74c41b02014-01-16 08:20:08 +01001961 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001962 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1963
1964 txq->count += frags;
1965 mvneta_txq_pend_desc_add(pp, txq, frags);
1966
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001967 if (txq->count >= txq->tx_stop_threshold)
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001968 netif_tx_stop_queue(nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001969
willy tarreau74c41b02014-01-16 08:20:08 +01001970 u64_stats_update_begin(&stats->syncp);
1971 stats->tx_packets++;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001972 stats->tx_bytes += len;
willy tarreau74c41b02014-01-16 08:20:08 +01001973 u64_stats_update_end(&stats->syncp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001974 } else {
1975 dev->stats.tx_dropped++;
1976 dev_kfree_skb_any(skb);
1977 }
1978
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001979 return NETDEV_TX_OK;
1980}
1981
1982
1983/* Free tx resources, when resetting a port */
1984static void mvneta_txq_done_force(struct mvneta_port *pp,
1985 struct mvneta_tx_queue *txq)
1986
1987{
1988 int tx_done = txq->count;
1989
1990 mvneta_txq_bufs_free(pp, txq, tx_done);
1991
1992 /* reset txq */
1993 txq->count = 0;
1994 txq->txq_put_index = 0;
1995 txq->txq_get_index = 0;
1996}
1997
willy tarreau6c498972014-01-16 08:20:12 +01001998/* Handle tx done - called in softirq context. The <cause_tx_done> argument
1999 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2000 */
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002001static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002002{
2003 struct mvneta_tx_queue *txq;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002004 struct netdev_queue *nq;
2005
willy tarreau6c498972014-01-16 08:20:12 +01002006 while (cause_tx_done) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002007 txq = mvneta_tx_done_policy(pp, cause_tx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002008
2009 nq = netdev_get_tx_queue(pp->dev, txq->id);
2010 __netif_tx_lock(nq, smp_processor_id());
2011
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002012 if (txq->count)
2013 mvneta_txq_done(pp, txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002014
2015 __netif_tx_unlock(nq);
2016 cause_tx_done &= ~((1 << txq->id));
2017 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002018}
2019
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002020/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002021 * according to hw spec, different than generic crc8 algorithm
2022 */
2023static int mvneta_addr_crc(unsigned char *addr)
2024{
2025 int crc = 0;
2026 int i;
2027
2028 for (i = 0; i < ETH_ALEN; i++) {
2029 int j;
2030
2031 crc = (crc ^ addr[i]) << 8;
2032 for (j = 7; j >= 0; j--) {
2033 if (crc & (0x100 << j))
2034 crc ^= 0x107 << j;
2035 }
2036 }
2037
2038 return crc;
2039}
2040
2041/* This method controls the net device special MAC multicast support.
2042 * The Special Multicast Table for MAC addresses supports MAC of the form
2043 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2044 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2045 * Table entries in the DA-Filter table. This method set the Special
2046 * Multicast Table appropriate entry.
2047 */
2048static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2049 unsigned char last_byte,
2050 int queue)
2051{
2052 unsigned int smc_table_reg;
2053 unsigned int tbl_offset;
2054 unsigned int reg_offset;
2055
2056 /* Register offset from SMC table base */
2057 tbl_offset = (last_byte / 4);
2058 /* Entry offset within the above reg */
2059 reg_offset = last_byte % 4;
2060
2061 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2062 + tbl_offset * 4));
2063
2064 if (queue == -1)
2065 smc_table_reg &= ~(0xff << (8 * reg_offset));
2066 else {
2067 smc_table_reg &= ~(0xff << (8 * reg_offset));
2068 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2069 }
2070
2071 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2072 smc_table_reg);
2073}
2074
2075/* This method controls the network device Other MAC multicast support.
2076 * The Other Multicast Table is used for multicast of another type.
2077 * A CRC-8 is used as an index to the Other Multicast Table entries
2078 * in the DA-Filter table.
2079 * The method gets the CRC-8 value from the calling routine and
2080 * sets the Other Multicast Table appropriate entry according to the
2081 * specified CRC-8 .
2082 */
2083static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2084 unsigned char crc8,
2085 int queue)
2086{
2087 unsigned int omc_table_reg;
2088 unsigned int tbl_offset;
2089 unsigned int reg_offset;
2090
2091 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2092 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2093
2094 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2095
2096 if (queue == -1) {
2097 /* Clear accepts frame bit at specified Other DA table entry */
2098 omc_table_reg &= ~(0xff << (8 * reg_offset));
2099 } else {
2100 omc_table_reg &= ~(0xff << (8 * reg_offset));
2101 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2102 }
2103
2104 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2105}
2106
2107/* The network device supports multicast using two tables:
2108 * 1) Special Multicast Table for MAC addresses of the form
2109 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2110 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2111 * Table entries in the DA-Filter table.
2112 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2113 * is used as an index to the Other Multicast Table entries in the
2114 * DA-Filter table.
2115 */
2116static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2117 int queue)
2118{
2119 unsigned char crc_result = 0;
2120
2121 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2122 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2123 return 0;
2124 }
2125
2126 crc_result = mvneta_addr_crc(p_addr);
2127 if (queue == -1) {
2128 if (pp->mcast_count[crc_result] == 0) {
2129 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2130 crc_result);
2131 return -EINVAL;
2132 }
2133
2134 pp->mcast_count[crc_result]--;
2135 if (pp->mcast_count[crc_result] != 0) {
2136 netdev_info(pp->dev,
2137 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2138 pp->mcast_count[crc_result], crc_result);
2139 return -EINVAL;
2140 }
2141 } else
2142 pp->mcast_count[crc_result]++;
2143
2144 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2145
2146 return 0;
2147}
2148
2149/* Configure Fitering mode of Ethernet port */
2150static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2151 int is_promisc)
2152{
2153 u32 port_cfg_reg, val;
2154
2155 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2156
2157 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2158
2159 /* Set / Clear UPM bit in port configuration register */
2160 if (is_promisc) {
2161 /* Accept all Unicast addresses */
2162 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2163 val |= MVNETA_FORCE_UNI;
2164 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2165 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2166 } else {
2167 /* Reject all Unicast addresses */
2168 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2169 val &= ~MVNETA_FORCE_UNI;
2170 }
2171
2172 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2173 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2174}
2175
2176/* register unicast and multicast addresses */
2177static void mvneta_set_rx_mode(struct net_device *dev)
2178{
2179 struct mvneta_port *pp = netdev_priv(dev);
2180 struct netdev_hw_addr *ha;
2181
2182 if (dev->flags & IFF_PROMISC) {
2183 /* Accept all: Multicast + Unicast */
2184 mvneta_rx_unicast_promisc_set(pp, 1);
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002185 mvneta_set_ucast_table(pp, pp->rxq_def);
2186 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2187 mvneta_set_other_mcast_table(pp, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002188 } else {
2189 /* Accept single Unicast */
2190 mvneta_rx_unicast_promisc_set(pp, 0);
2191 mvneta_set_ucast_table(pp, -1);
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002192 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002193
2194 if (dev->flags & IFF_ALLMULTI) {
2195 /* Accept all multicast */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002196 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2197 mvneta_set_other_mcast_table(pp, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002198 } else {
2199 /* Accept only initialized multicast */
2200 mvneta_set_special_mcast_table(pp, -1);
2201 mvneta_set_other_mcast_table(pp, -1);
2202
2203 if (!netdev_mc_empty(dev)) {
2204 netdev_for_each_mc_addr(ha, dev) {
2205 mvneta_mcast_addr_set(pp, ha->addr,
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002206 pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002207 }
2208 }
2209 }
2210 }
2211}
2212
2213/* Interrupt handling - the callback for request_irq() */
2214static irqreturn_t mvneta_isr(int irq, void *dev_id)
2215{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002216 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002217
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002218 disable_percpu_irq(port->pp->dev->irq);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002219 napi_schedule(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002220
2221 return IRQ_HANDLED;
2222}
2223
Stas Sergeev898b2972015-04-01 20:32:49 +03002224static int mvneta_fixed_link_update(struct mvneta_port *pp,
2225 struct phy_device *phy)
2226{
2227 struct fixed_phy_status status;
2228 struct fixed_phy_status changed = {};
2229 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2230
2231 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2232 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2233 status.speed = SPEED_1000;
2234 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2235 status.speed = SPEED_100;
2236 else
2237 status.speed = SPEED_10;
2238 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2239 changed.link = 1;
2240 changed.speed = 1;
2241 changed.duplex = 1;
2242 fixed_phy_update_state(phy, &status, &changed);
2243 return 0;
2244}
2245
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002246/* NAPI handler
2247 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2248 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2249 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2250 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2251 * Each CPU has its own causeRxTx register
2252 */
2253static int mvneta_poll(struct napi_struct *napi, int budget)
2254{
2255 int rx_done = 0;
2256 u32 cause_rx_tx;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002257 int rx_queue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002258 struct mvneta_port *pp = netdev_priv(napi->dev);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002259 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002260
2261 if (!netif_running(pp->dev)) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002262 napi_complete(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002263 return rx_done;
2264 }
2265
2266 /* Read cause register */
Stas Sergeev898b2972015-04-01 20:32:49 +03002267 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2268 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2269 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2270
2271 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2272 if (pp->use_inband_status && (cause_misc &
2273 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2274 MVNETA_CAUSE_LINK_CHANGE |
2275 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2276 mvneta_fixed_link_update(pp, pp->phy_dev);
2277 }
2278 }
willy tarreau71f6d1b2014-01-16 08:20:11 +01002279
2280 /* Release Tx descriptors */
2281 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002282 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
willy tarreau71f6d1b2014-01-16 08:20:11 +01002283 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2284 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002285
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002286 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002287 * RX packets
2288 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002289 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2290
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002291 cause_rx_tx |= port->cause_rx_tx;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002292
2293 if (rx_queue) {
2294 rx_queue = rx_queue - 1;
2295 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
2296 }
2297
Maxime Ripardd8936652015-09-25 18:09:37 +02002298 budget -= rx_done;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002299
2300 if (budget > 0) {
2301 cause_rx_tx = 0;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002302 napi_complete(&port->napi);
2303 enable_percpu_irq(pp->dev->irq, 0);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002304 }
2305
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002306 port->cause_rx_tx = cause_rx_tx;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002307 return rx_done;
2308}
2309
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002310/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2311static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2312 int num)
2313{
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002314 int i;
2315
2316 for (i = 0; i < num; i++) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002317 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2318 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2319 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002320 __func__, rxq->id, i, num);
2321 break;
2322 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002323 }
2324
2325 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002326 * get packets)
2327 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002328 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2329
2330 return i;
2331}
2332
2333/* Free all packets pending transmit from all TXQs and reset TX port */
2334static void mvneta_tx_reset(struct mvneta_port *pp)
2335{
2336 int queue;
2337
Ezequiel Garcia96728502014-05-22 20:06:59 -03002338 /* free the skb's in the tx ring */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002339 for (queue = 0; queue < txq_number; queue++)
2340 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2341
2342 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2343 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2344}
2345
2346static void mvneta_rx_reset(struct mvneta_port *pp)
2347{
2348 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2349 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2350}
2351
2352/* Rx/Tx queue initialization/cleanup methods */
2353
2354/* Create a specified RX queue */
2355static int mvneta_rxq_init(struct mvneta_port *pp,
2356 struct mvneta_rx_queue *rxq)
2357
2358{
2359 rxq->size = pp->rx_ring_size;
2360
2361 /* Allocate memory for RX descriptors */
2362 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2363 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2364 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002365 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002366 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002367
2368 BUG_ON(rxq->descs !=
2369 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2370
2371 rxq->last_desc = rxq->size - 1;
2372
2373 /* Set Rx descriptors queue starting address */
2374 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2375 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2376
2377 /* Set Offset */
2378 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2379
2380 /* Set coalescing pkts and time */
2381 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2382 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2383
2384 /* Fill RXQ with buffers from RX pool */
2385 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2386 mvneta_rxq_bm_disable(pp, rxq);
2387 mvneta_rxq_fill(pp, rxq, rxq->size);
2388
2389 return 0;
2390}
2391
2392/* Cleanup Rx queue */
2393static void mvneta_rxq_deinit(struct mvneta_port *pp,
2394 struct mvneta_rx_queue *rxq)
2395{
2396 mvneta_rxq_drop_pkts(pp, rxq);
2397
2398 if (rxq->descs)
2399 dma_free_coherent(pp->dev->dev.parent,
2400 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2401 rxq->descs,
2402 rxq->descs_phys);
2403
2404 rxq->descs = NULL;
2405 rxq->last_desc = 0;
2406 rxq->next_desc_to_proc = 0;
2407 rxq->descs_phys = 0;
2408}
2409
2410/* Create and initialize a tx queue */
2411static int mvneta_txq_init(struct mvneta_port *pp,
2412 struct mvneta_tx_queue *txq)
2413{
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002414 int cpu;
2415
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002416 txq->size = pp->tx_ring_size;
2417
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002418 /* A queue must always have room for at least one skb.
2419 * Therefore, stop the queue when the free entries reaches
2420 * the maximum number of descriptors per skb.
2421 */
2422 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2423 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2424
2425
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002426 /* Allocate memory for TX descriptors */
2427 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2428 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2429 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002430 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002431 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002432
2433 /* Make sure descriptor address is cache line size aligned */
2434 BUG_ON(txq->descs !=
2435 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2436
2437 txq->last_desc = txq->size - 1;
2438
2439 /* Set maximum bandwidth for enabled TXQs */
2440 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2441 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2442
2443 /* Set Tx descriptors queue starting address */
2444 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2445 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2446
2447 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2448 if (txq->tx_skb == NULL) {
2449 dma_free_coherent(pp->dev->dev.parent,
2450 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2451 txq->descs, txq->descs_phys);
2452 return -ENOMEM;
2453 }
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002454
2455 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2456 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2457 txq->size * TSO_HEADER_SIZE,
2458 &txq->tso_hdrs_phys, GFP_KERNEL);
2459 if (txq->tso_hdrs == NULL) {
2460 kfree(txq->tx_skb);
2461 dma_free_coherent(pp->dev->dev.parent,
2462 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2463 txq->descs, txq->descs_phys);
2464 return -ENOMEM;
2465 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002466 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2467
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002468 /* Setup XPS mapping */
2469 if (txq_number > 1)
2470 cpu = txq->id % num_present_cpus();
2471 else
2472 cpu = pp->rxq_def % num_present_cpus();
2473 cpumask_set_cpu(cpu, &txq->affinity_mask);
2474 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2475
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002476 return 0;
2477}
2478
2479/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2480static void mvneta_txq_deinit(struct mvneta_port *pp,
2481 struct mvneta_tx_queue *txq)
2482{
2483 kfree(txq->tx_skb);
2484
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002485 if (txq->tso_hdrs)
2486 dma_free_coherent(pp->dev->dev.parent,
2487 txq->size * TSO_HEADER_SIZE,
2488 txq->tso_hdrs, txq->tso_hdrs_phys);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002489 if (txq->descs)
2490 dma_free_coherent(pp->dev->dev.parent,
2491 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2492 txq->descs, txq->descs_phys);
2493
2494 txq->descs = NULL;
2495 txq->last_desc = 0;
2496 txq->next_desc_to_proc = 0;
2497 txq->descs_phys = 0;
2498
2499 /* Set minimum bandwidth for disabled TXQs */
2500 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2501 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2502
2503 /* Set Tx descriptors queue starting address and size */
2504 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2505 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2506}
2507
2508/* Cleanup all Tx queues */
2509static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2510{
2511 int queue;
2512
2513 for (queue = 0; queue < txq_number; queue++)
2514 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2515}
2516
2517/* Cleanup all Rx queues */
2518static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2519{
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002520 int queue;
2521
2522 for (queue = 0; queue < txq_number; queue++)
2523 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002524}
2525
2526
2527/* Init all Rx queues */
2528static int mvneta_setup_rxqs(struct mvneta_port *pp)
2529{
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002530 int queue;
2531
2532 for (queue = 0; queue < rxq_number; queue++) {
2533 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2534
2535 if (err) {
2536 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2537 __func__, queue);
2538 mvneta_cleanup_rxqs(pp);
2539 return err;
2540 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002541 }
2542
2543 return 0;
2544}
2545
2546/* Init all tx queues */
2547static int mvneta_setup_txqs(struct mvneta_port *pp)
2548{
2549 int queue;
2550
2551 for (queue = 0; queue < txq_number; queue++) {
2552 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2553 if (err) {
2554 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2555 __func__, queue);
2556 mvneta_cleanup_txqs(pp);
2557 return err;
2558 }
2559 }
2560
2561 return 0;
2562}
2563
2564static void mvneta_start_dev(struct mvneta_port *pp)
2565{
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01002566 int cpu;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002567
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002568 mvneta_max_rx_size_set(pp, pp->pkt_size);
2569 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2570
2571 /* start the Rx/Tx activity */
2572 mvneta_port_enable(pp);
2573
2574 /* Enable polling on the port */
Gregory CLEMENT129219e2016-02-04 22:09:23 +01002575 for_each_online_cpu(cpu) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002576 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2577
2578 napi_enable(&port->napi);
2579 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002580
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002581 /* Unmask interrupts. It has to be done from each CPU */
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01002582 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2583
Stas Sergeev898b2972015-04-01 20:32:49 +03002584 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2585 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2586 MVNETA_CAUSE_LINK_CHANGE |
2587 MVNETA_CAUSE_PSC_SYNC_CHANGE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002588
2589 phy_start(pp->phy_dev);
2590 netif_tx_start_all_queues(pp->dev);
2591}
2592
2593static void mvneta_stop_dev(struct mvneta_port *pp)
2594{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002595 unsigned int cpu;
2596
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002597 phy_stop(pp->phy_dev);
2598
Gregory CLEMENT129219e2016-02-04 22:09:23 +01002599 for_each_online_cpu(cpu) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002600 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2601
2602 napi_disable(&port->napi);
2603 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002604
2605 netif_carrier_off(pp->dev);
2606
2607 mvneta_port_down(pp);
2608 netif_tx_stop_all_queues(pp->dev);
2609
2610 /* Stop the port activity */
2611 mvneta_port_disable(pp);
2612
2613 /* Clear all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01002614 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002615
2616 /* Mask all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01002617 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002618
2619 mvneta_tx_reset(pp);
2620 mvneta_rx_reset(pp);
2621}
2622
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002623/* Return positive if MTU is valid */
2624static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2625{
2626 if (mtu < 68) {
2627 netdev_err(dev, "cannot change mtu to less than 68\n");
2628 return -EINVAL;
2629 }
2630
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002631 /* 9676 == 9700 - 20 and rounding to 8 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002632 if (mtu > 9676) {
2633 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2634 mtu = 9676;
2635 }
2636
2637 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2638 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2639 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2640 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2641 }
2642
2643 return mtu;
2644}
2645
2646/* Change the device mtu */
2647static int mvneta_change_mtu(struct net_device *dev, int mtu)
2648{
2649 struct mvneta_port *pp = netdev_priv(dev);
2650 int ret;
2651
2652 mtu = mvneta_check_mtu_valid(dev, mtu);
2653 if (mtu < 0)
2654 return -EINVAL;
2655
2656 dev->mtu = mtu;
2657
Simon Guinotb65657f2015-06-30 16:20:22 +02002658 if (!netif_running(dev)) {
2659 netdev_update_features(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002660 return 0;
Simon Guinotb65657f2015-06-30 16:20:22 +02002661 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002662
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002663 /* The interface is running, so we have to force a
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002664 * reallocation of the queues
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002665 */
2666 mvneta_stop_dev(pp);
2667
2668 mvneta_cleanup_txqs(pp);
2669 mvneta_cleanup_rxqs(pp);
2670
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002671 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002672 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2673 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002674
2675 ret = mvneta_setup_rxqs(pp);
2676 if (ret) {
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002677 netdev_err(dev, "unable to setup rxqs after MTU change\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002678 return ret;
2679 }
2680
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002681 ret = mvneta_setup_txqs(pp);
2682 if (ret) {
2683 netdev_err(dev, "unable to setup txqs after MTU change\n");
2684 return ret;
2685 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002686
2687 mvneta_start_dev(pp);
2688 mvneta_port_up(pp);
2689
Simon Guinotb65657f2015-06-30 16:20:22 +02002690 netdev_update_features(dev);
2691
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002692 return 0;
2693}
2694
Simon Guinotb65657f2015-06-30 16:20:22 +02002695static netdev_features_t mvneta_fix_features(struct net_device *dev,
2696 netdev_features_t features)
2697{
2698 struct mvneta_port *pp = netdev_priv(dev);
2699
2700 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2701 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2702 netdev_info(dev,
2703 "Disable IP checksum for MTU greater than %dB\n",
2704 pp->tx_csum_limit);
2705 }
2706
2707 return features;
2708}
2709
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002710/* Get mac address */
2711static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2712{
2713 u32 mac_addr_l, mac_addr_h;
2714
2715 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2716 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2717 addr[0] = (mac_addr_h >> 24) & 0xFF;
2718 addr[1] = (mac_addr_h >> 16) & 0xFF;
2719 addr[2] = (mac_addr_h >> 8) & 0xFF;
2720 addr[3] = mac_addr_h & 0xFF;
2721 addr[4] = (mac_addr_l >> 8) & 0xFF;
2722 addr[5] = mac_addr_l & 0xFF;
2723}
2724
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002725/* Handle setting mac address */
2726static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2727{
2728 struct mvneta_port *pp = netdev_priv(dev);
Ezequiel Garciae68de362014-05-22 20:07:00 -03002729 struct sockaddr *sockaddr = addr;
2730 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002731
Ezequiel Garciae68de362014-05-22 20:07:00 -03002732 ret = eth_prepare_mac_addr_change(dev, addr);
2733 if (ret < 0)
2734 return ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002735 /* Remove previous address table entry */
2736 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2737
2738 /* Set new addr in hw */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002739 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002740
Ezequiel Garciae68de362014-05-22 20:07:00 -03002741 eth_commit_mac_addr_change(dev, addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002742 return 0;
2743}
2744
2745static void mvneta_adjust_link(struct net_device *ndev)
2746{
2747 struct mvneta_port *pp = netdev_priv(ndev);
2748 struct phy_device *phydev = pp->phy_dev;
2749 int status_change = 0;
2750
2751 if (phydev->link) {
2752 if ((pp->speed != phydev->speed) ||
2753 (pp->duplex != phydev->duplex)) {
2754 u32 val;
2755
2756 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2757 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2758 MVNETA_GMAC_CONFIG_GMII_SPEED |
Stas Sergeev898b2972015-04-01 20:32:49 +03002759 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002760
2761 if (phydev->duplex)
2762 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2763
2764 if (phydev->speed == SPEED_1000)
2765 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni4d12bc62014-07-08 10:49:43 +02002766 else if (phydev->speed == SPEED_100)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002767 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2768
2769 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2770
2771 pp->duplex = phydev->duplex;
2772 pp->speed = phydev->speed;
2773 }
2774 }
2775
2776 if (phydev->link != pp->link) {
2777 if (!phydev->link) {
2778 pp->duplex = -1;
2779 pp->speed = 0;
2780 }
2781
2782 pp->link = phydev->link;
2783 status_change = 1;
2784 }
2785
2786 if (status_change) {
2787 if (phydev->link) {
Stas Sergeev898b2972015-04-01 20:32:49 +03002788 if (!pp->use_inband_status) {
2789 u32 val = mvreg_read(pp,
2790 MVNETA_GMAC_AUTONEG_CONFIG);
2791 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2792 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2793 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2794 val);
2795 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002796 mvneta_port_up(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002797 } else {
Stas Sergeev898b2972015-04-01 20:32:49 +03002798 if (!pp->use_inband_status) {
2799 u32 val = mvreg_read(pp,
2800 MVNETA_GMAC_AUTONEG_CONFIG);
2801 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2802 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2803 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2804 val);
2805 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002806 mvneta_port_down(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002807 }
Ezequiel Garcia0089b742014-10-31 12:57:20 -03002808 phy_print_status(phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002809 }
2810}
2811
2812static int mvneta_mdio_probe(struct mvneta_port *pp)
2813{
2814 struct phy_device *phy_dev;
2815
2816 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2817 pp->phy_interface);
2818 if (!phy_dev) {
2819 netdev_err(pp->dev, "could not find the PHY\n");
2820 return -ENODEV;
2821 }
2822
2823 phy_dev->supported &= PHY_GBIT_FEATURES;
2824 phy_dev->advertising = phy_dev->supported;
2825
2826 pp->phy_dev = phy_dev;
2827 pp->link = 0;
2828 pp->duplex = 0;
2829 pp->speed = 0;
2830
2831 return 0;
2832}
2833
2834static void mvneta_mdio_remove(struct mvneta_port *pp)
2835{
2836 phy_disconnect(pp->phy_dev);
2837 pp->phy_dev = NULL;
2838}
2839
Maxime Ripardf8642882015-09-25 18:09:38 +02002840static void mvneta_percpu_enable(void *arg)
2841{
2842 struct mvneta_port *pp = arg;
2843
2844 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
2845}
2846
2847static void mvneta_percpu_disable(void *arg)
2848{
2849 struct mvneta_port *pp = arg;
2850
2851 disable_percpu_irq(pp->dev->irq);
2852}
2853
2854static void mvneta_percpu_elect(struct mvneta_port *pp)
2855{
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002856 int elected_cpu = 0, max_cpu, cpu, i = 0;
Maxime Ripardf8642882015-09-25 18:09:38 +02002857
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002858 /* Use the cpu associated to the rxq when it is online, in all
2859 * the other cases, use the cpu 0 which can't be offline.
2860 */
2861 if (cpu_online(pp->rxq_def))
2862 elected_cpu = pp->rxq_def;
2863
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002864 max_cpu = num_present_cpus();
Maxime Ripardf8642882015-09-25 18:09:38 +02002865
2866 for_each_online_cpu(cpu) {
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002867 int rxq_map = 0, txq_map = 0;
2868 int rxq;
2869
2870 for (rxq = 0; rxq < rxq_number; rxq++)
2871 if ((rxq % max_cpu) == cpu)
2872 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2873
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002874 if (cpu == elected_cpu)
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002875 /* Map the default receive queue queue to the
2876 * elected CPU
Maxime Ripardf8642882015-09-25 18:09:38 +02002877 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002878 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002879
2880 /* We update the TX queue map only if we have one
2881 * queue. In this case we associate the TX queue to
2882 * the CPU bound to the default RX queue
2883 */
2884 if (txq_number == 1)
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002885 txq_map = (cpu == elected_cpu) ?
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002886 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2887 else
2888 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
2889 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
2890
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002891 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
2892
2893 /* Update the interrupt mask on each CPU according the
2894 * new mapping
2895 */
2896 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
2897 pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02002898 i++;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002899
Maxime Ripardf8642882015-09-25 18:09:38 +02002900 }
2901};
2902
2903static int mvneta_percpu_notifier(struct notifier_block *nfb,
2904 unsigned long action, void *hcpu)
2905{
2906 struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
2907 cpu_notifier);
2908 int cpu = (unsigned long)hcpu, other_cpu;
2909 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2910
2911 switch (action) {
2912 case CPU_ONLINE:
2913 case CPU_ONLINE_FROZEN:
2914 netif_tx_stop_all_queues(pp->dev);
2915
2916 /* We have to synchronise on tha napi of each CPU
2917 * except the one just being waked up
2918 */
2919 for_each_online_cpu(other_cpu) {
2920 if (other_cpu != cpu) {
2921 struct mvneta_pcpu_port *other_port =
2922 per_cpu_ptr(pp->ports, other_cpu);
2923
2924 napi_synchronize(&other_port->napi);
2925 }
2926 }
2927
2928 /* Mask all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01002929 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02002930 napi_enable(&port->napi);
2931
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002932
2933 /* Enable per-CPU interrupts on the CPU that is
2934 * brought up.
2935 */
2936 smp_call_function_single(cpu, mvneta_percpu_enable,
2937 pp, true);
2938
Maxime Ripardf8642882015-09-25 18:09:38 +02002939 /* Enable per-CPU interrupt on the one CPU we care
2940 * about.
2941 */
2942 mvneta_percpu_elect(pp);
2943
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01002944 /* Unmask all ethernet port interrupts */
2945 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02002946 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2947 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2948 MVNETA_CAUSE_LINK_CHANGE |
2949 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2950 netif_tx_start_all_queues(pp->dev);
2951 break;
2952 case CPU_DOWN_PREPARE:
2953 case CPU_DOWN_PREPARE_FROZEN:
2954 netif_tx_stop_all_queues(pp->dev);
2955 /* Mask all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01002956 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02002957
2958 napi_synchronize(&port->napi);
2959 napi_disable(&port->napi);
2960 /* Disable per-CPU interrupts on the CPU that is
2961 * brought down.
2962 */
2963 smp_call_function_single(cpu, mvneta_percpu_disable,
2964 pp, true);
2965
2966 break;
2967 case CPU_DEAD:
2968 case CPU_DEAD_FROZEN:
2969 /* Check if a new CPU must be elected now this on is down */
2970 mvneta_percpu_elect(pp);
2971 /* Unmask all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01002972 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02002973 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2974 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2975 MVNETA_CAUSE_LINK_CHANGE |
2976 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2977 netif_tx_start_all_queues(pp->dev);
2978 break;
2979 }
2980
2981 return NOTIFY_OK;
2982}
2983
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002984static int mvneta_open(struct net_device *dev)
2985{
2986 struct mvneta_port *pp = netdev_priv(dev);
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01002987 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002988
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002989 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002990 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2991 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002992
2993 ret = mvneta_setup_rxqs(pp);
2994 if (ret)
2995 return ret;
2996
2997 ret = mvneta_setup_txqs(pp);
2998 if (ret)
2999 goto err_cleanup_rxqs;
3000
3001 /* Connect to port interrupt line */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003002 ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
3003 MVNETA_DRIVER_NAME, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003004 if (ret) {
3005 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3006 goto err_cleanup_txqs;
3007 }
3008
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003009 /* Enable per-CPU interrupt on all the CPU to handle our RX
3010 * queue interrupts
3011 */
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003012 on_each_cpu(mvneta_percpu_enable, pp, true);
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003013
Maxime Ripardf8642882015-09-25 18:09:38 +02003014
3015 /* Register a CPU notifier to handle the case where our CPU
3016 * might be taken offline.
3017 */
3018 register_cpu_notifier(&pp->cpu_notifier);
3019
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003020 /* In default link is down */
3021 netif_carrier_off(pp->dev);
3022
3023 ret = mvneta_mdio_probe(pp);
3024 if (ret < 0) {
3025 netdev_err(dev, "cannot probe MDIO bus\n");
3026 goto err_free_irq;
3027 }
3028
3029 mvneta_start_dev(pp);
3030
3031 return 0;
3032
3033err_free_irq:
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003034 free_percpu_irq(pp->dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003035err_cleanup_txqs:
3036 mvneta_cleanup_txqs(pp);
3037err_cleanup_rxqs:
3038 mvneta_cleanup_rxqs(pp);
3039 return ret;
3040}
3041
3042/* Stop the port, free port interrupt line */
3043static int mvneta_stop(struct net_device *dev)
3044{
3045 struct mvneta_port *pp = netdev_priv(dev);
3046
3047 mvneta_stop_dev(pp);
3048 mvneta_mdio_remove(pp);
Maxime Ripardf8642882015-09-25 18:09:38 +02003049 unregister_cpu_notifier(&pp->cpu_notifier);
Gregory CLEMENT129219e2016-02-04 22:09:23 +01003050 on_each_cpu(mvneta_percpu_disable, pp, true);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003051 free_percpu_irq(dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003052 mvneta_cleanup_rxqs(pp);
3053 mvneta_cleanup_txqs(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003054
3055 return 0;
3056}
3057
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003058static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3059{
3060 struct mvneta_port *pp = netdev_priv(dev);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003061
3062 if (!pp->phy_dev)
3063 return -ENOTSUPP;
3064
Stas Sergeevecf7b362015-04-01 19:23:29 +03003065 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003066}
3067
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003068/* Ethtool methods */
3069
3070/* Get settings (phy address, speed) for ethtools */
3071int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3072{
3073 struct mvneta_port *pp = netdev_priv(dev);
3074
3075 if (!pp->phy_dev)
3076 return -ENODEV;
3077
3078 return phy_ethtool_gset(pp->phy_dev, cmd);
3079}
3080
3081/* Set settings (phy address, speed) for ethtools */
3082int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3083{
3084 struct mvneta_port *pp = netdev_priv(dev);
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003085 struct phy_device *phydev = pp->phy_dev;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003086
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003087 if (!phydev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003088 return -ENODEV;
3089
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003090 if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
3091 u32 val;
3092
3093 mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE);
3094
3095 if (cmd->autoneg == AUTONEG_DISABLE) {
3096 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3097 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3098 MVNETA_GMAC_CONFIG_GMII_SPEED |
3099 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3100
3101 if (phydev->duplex)
3102 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3103
3104 if (phydev->speed == SPEED_1000)
3105 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3106 else if (phydev->speed == SPEED_100)
3107 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3108
3109 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3110 }
3111
3112 pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE);
3113 netdev_info(pp->dev, "autoneg status set to %i\n",
3114 pp->use_inband_status);
3115
3116 if (netif_running(dev)) {
3117 mvneta_port_down(pp);
3118 mvneta_port_up(pp);
3119 }
3120 }
3121
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003122 return phy_ethtool_sset(pp->phy_dev, cmd);
3123}
3124
3125/* Set interrupt coalescing for ethtools */
3126static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3127 struct ethtool_coalesce *c)
3128{
3129 struct mvneta_port *pp = netdev_priv(dev);
3130 int queue;
3131
3132 for (queue = 0; queue < rxq_number; queue++) {
3133 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3134 rxq->time_coal = c->rx_coalesce_usecs;
3135 rxq->pkts_coal = c->rx_max_coalesced_frames;
3136 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3137 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3138 }
3139
3140 for (queue = 0; queue < txq_number; queue++) {
3141 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3142 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3143 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3144 }
3145
3146 return 0;
3147}
3148
3149/* get coalescing for ethtools */
3150static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3151 struct ethtool_coalesce *c)
3152{
3153 struct mvneta_port *pp = netdev_priv(dev);
3154
3155 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3156 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3157
3158 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3159 return 0;
3160}
3161
3162
3163static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3164 struct ethtool_drvinfo *drvinfo)
3165{
3166 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3167 sizeof(drvinfo->driver));
3168 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3169 sizeof(drvinfo->version));
3170 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3171 sizeof(drvinfo->bus_info));
3172}
3173
3174
3175static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3176 struct ethtool_ringparam *ring)
3177{
3178 struct mvneta_port *pp = netdev_priv(netdev);
3179
3180 ring->rx_max_pending = MVNETA_MAX_RXD;
3181 ring->tx_max_pending = MVNETA_MAX_TXD;
3182 ring->rx_pending = pp->rx_ring_size;
3183 ring->tx_pending = pp->tx_ring_size;
3184}
3185
3186static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3187 struct ethtool_ringparam *ring)
3188{
3189 struct mvneta_port *pp = netdev_priv(dev);
3190
3191 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3192 return -EINVAL;
3193 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3194 ring->rx_pending : MVNETA_MAX_RXD;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003195
3196 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3197 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3198 if (pp->tx_ring_size != ring->tx_pending)
3199 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3200 pp->tx_ring_size, ring->tx_pending);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003201
3202 if (netif_running(dev)) {
3203 mvneta_stop(dev);
3204 if (mvneta_open(dev)) {
3205 netdev_err(dev,
3206 "error on opening device after ring param change\n");
3207 return -ENOMEM;
3208 }
3209 }
3210
3211 return 0;
3212}
3213
Russell King9b0cdef2015-10-22 18:37:30 +01003214static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3215 u8 *data)
3216{
3217 if (sset == ETH_SS_STATS) {
3218 int i;
3219
3220 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3221 memcpy(data + i * ETH_GSTRING_LEN,
3222 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3223 }
3224}
3225
3226static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3227{
3228 const struct mvneta_statistic *s;
3229 void __iomem *base = pp->base;
3230 u32 high, low, val;
Jisheng Zhang2c832292016-01-20 16:36:25 +08003231 u64 val64;
Russell King9b0cdef2015-10-22 18:37:30 +01003232 int i;
3233
3234 for (i = 0, s = mvneta_statistics;
3235 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3236 s++, i++) {
Russell King9b0cdef2015-10-22 18:37:30 +01003237 switch (s->type) {
3238 case T_REG_32:
3239 val = readl_relaxed(base + s->offset);
Jisheng Zhang2c832292016-01-20 16:36:25 +08003240 pp->ethtool_stats[i] += val;
Russell King9b0cdef2015-10-22 18:37:30 +01003241 break;
3242 case T_REG_64:
3243 /* Docs say to read low 32-bit then high */
3244 low = readl_relaxed(base + s->offset);
3245 high = readl_relaxed(base + s->offset + 4);
Jisheng Zhang2c832292016-01-20 16:36:25 +08003246 val64 = (u64)high << 32 | low;
3247 pp->ethtool_stats[i] += val64;
Russell King9b0cdef2015-10-22 18:37:30 +01003248 break;
3249 }
Russell King9b0cdef2015-10-22 18:37:30 +01003250 }
3251}
3252
3253static void mvneta_ethtool_get_stats(struct net_device *dev,
3254 struct ethtool_stats *stats, u64 *data)
3255{
3256 struct mvneta_port *pp = netdev_priv(dev);
3257 int i;
3258
3259 mvneta_ethtool_update_stats(pp);
3260
3261 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3262 *data++ = pp->ethtool_stats[i];
3263}
3264
3265static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3266{
3267 if (sset == ETH_SS_STATS)
3268 return ARRAY_SIZE(mvneta_statistics);
3269 return -EOPNOTSUPP;
3270}
3271
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003272static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3273{
3274 return MVNETA_RSS_LU_TABLE_SIZE;
3275}
3276
3277static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3278 struct ethtool_rxnfc *info,
3279 u32 *rules __always_unused)
3280{
3281 switch (info->cmd) {
3282 case ETHTOOL_GRXRINGS:
3283 info->data = rxq_number;
3284 return 0;
3285 case ETHTOOL_GRXFH:
3286 return -EOPNOTSUPP;
3287 default:
3288 return -EOPNOTSUPP;
3289 }
3290}
3291
3292static int mvneta_config_rss(struct mvneta_port *pp)
3293{
3294 int cpu;
3295 u32 val;
3296
3297 netif_tx_stop_all_queues(pp->dev);
3298
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003299 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003300
3301 /* We have to synchronise on the napi of each CPU */
3302 for_each_online_cpu(cpu) {
3303 struct mvneta_pcpu_port *pcpu_port =
3304 per_cpu_ptr(pp->ports, cpu);
3305
3306 napi_synchronize(&pcpu_port->napi);
3307 napi_disable(&pcpu_port->napi);
3308 }
3309
3310 pp->rxq_def = pp->indir[0];
3311
3312 /* Update unicast mapping */
3313 mvneta_set_rx_mode(pp->dev);
3314
3315 /* Update val of portCfg register accordingly with all RxQueue types */
3316 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3317 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3318
3319 /* Update the elected CPU matching the new rxq_def */
3320 mvneta_percpu_elect(pp);
3321
3322 /* We have to synchronise on the napi of each CPU */
3323 for_each_online_cpu(cpu) {
3324 struct mvneta_pcpu_port *pcpu_port =
3325 per_cpu_ptr(pp->ports, cpu);
3326
3327 napi_enable(&pcpu_port->napi);
3328 }
3329
3330 netif_tx_start_all_queues(pp->dev);
3331
3332 return 0;
3333}
3334
3335static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3336 const u8 *key, const u8 hfunc)
3337{
3338 struct mvneta_port *pp = netdev_priv(dev);
3339 /* We require at least one supported parameter to be changed
3340 * and no change in any of the unsupported parameters
3341 */
3342 if (key ||
3343 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3344 return -EOPNOTSUPP;
3345
3346 if (!indir)
3347 return 0;
3348
3349 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3350
3351 return mvneta_config_rss(pp);
3352}
3353
3354static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3355 u8 *hfunc)
3356{
3357 struct mvneta_port *pp = netdev_priv(dev);
3358
3359 if (hfunc)
3360 *hfunc = ETH_RSS_HASH_TOP;
3361
3362 if (!indir)
3363 return 0;
3364
3365 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3366
3367 return 0;
3368}
3369
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003370static const struct net_device_ops mvneta_netdev_ops = {
3371 .ndo_open = mvneta_open,
3372 .ndo_stop = mvneta_stop,
3373 .ndo_start_xmit = mvneta_tx,
3374 .ndo_set_rx_mode = mvneta_set_rx_mode,
3375 .ndo_set_mac_address = mvneta_set_mac_addr,
3376 .ndo_change_mtu = mvneta_change_mtu,
Simon Guinotb65657f2015-06-30 16:20:22 +02003377 .ndo_fix_features = mvneta_fix_features,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003378 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003379 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003380};
3381
3382const struct ethtool_ops mvneta_eth_tool_ops = {
3383 .get_link = ethtool_op_get_link,
3384 .get_settings = mvneta_ethtool_get_settings,
3385 .set_settings = mvneta_ethtool_set_settings,
3386 .set_coalesce = mvneta_ethtool_set_coalesce,
3387 .get_coalesce = mvneta_ethtool_get_coalesce,
3388 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3389 .get_ringparam = mvneta_ethtool_get_ringparam,
3390 .set_ringparam = mvneta_ethtool_set_ringparam,
Russell King9b0cdef2015-10-22 18:37:30 +01003391 .get_strings = mvneta_ethtool_get_strings,
3392 .get_ethtool_stats = mvneta_ethtool_get_stats,
3393 .get_sset_count = mvneta_ethtool_get_sset_count,
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003394 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3395 .get_rxnfc = mvneta_ethtool_get_rxnfc,
3396 .get_rxfh = mvneta_ethtool_get_rxfh,
3397 .set_rxfh = mvneta_ethtool_set_rxfh,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003398};
3399
3400/* Initialize hw */
Ezequiel Garcia96728502014-05-22 20:06:59 -03003401static int mvneta_init(struct device *dev, struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003402{
3403 int queue;
3404
3405 /* Disable port */
3406 mvneta_port_disable(pp);
3407
3408 /* Set port default values */
3409 mvneta_defaults_set(pp);
3410
Ezequiel Garcia96728502014-05-22 20:06:59 -03003411 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3412 GFP_KERNEL);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003413 if (!pp->txqs)
3414 return -ENOMEM;
3415
3416 /* Initialize TX descriptor rings */
3417 for (queue = 0; queue < txq_number; queue++) {
3418 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3419 txq->id = queue;
3420 txq->size = pp->tx_ring_size;
3421 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
3422 }
3423
Ezequiel Garcia96728502014-05-22 20:06:59 -03003424 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
3425 GFP_KERNEL);
3426 if (!pp->rxqs)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003427 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003428
3429 /* Create Rx descriptor rings */
3430 for (queue = 0; queue < rxq_number; queue++) {
3431 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3432 rxq->id = queue;
3433 rxq->size = pp->rx_ring_size;
3434 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
3435 rxq->time_coal = MVNETA_RX_COAL_USEC;
3436 }
3437
3438 return 0;
3439}
3440
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003441/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00003442static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3443 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003444{
3445 u32 win_enable;
3446 u32 win_protect;
3447 int i;
3448
3449 for (i = 0; i < 6; i++) {
3450 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
3451 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
3452
3453 if (i < 4)
3454 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
3455 }
3456
3457 win_enable = 0x3f;
3458 win_protect = 0;
3459
3460 for (i = 0; i < dram->num_cs; i++) {
3461 const struct mbus_dram_window *cs = dram->cs + i;
3462 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
3463 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
3464
3465 mvreg_write(pp, MVNETA_WIN_SIZE(i),
3466 (cs->size - 1) & 0xffff0000);
3467
3468 win_enable &= ~(1 << i);
3469 win_protect |= 3 << (2 * i);
3470 }
3471
3472 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
Marcin Wojtasdb6ba9a2015-11-30 13:27:41 +01003473 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003474}
3475
3476/* Power up the port */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003477static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003478{
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003479 u32 ctrl;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003480
3481 /* MAC Cause register should be cleared */
3482 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
3483
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003484 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003485
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003486 /* Even though it might look weird, when we're configured in
3487 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3488 */
3489 switch(phy_mode) {
3490 case PHY_INTERFACE_MODE_QSGMII:
3491 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
3492 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3493 break;
3494 case PHY_INTERFACE_MODE_SGMII:
3495 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
3496 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3497 break;
3498 case PHY_INTERFACE_MODE_RGMII:
3499 case PHY_INTERFACE_MODE_RGMII_ID:
3500 ctrl |= MVNETA_GMAC2_PORT_RGMII;
3501 break;
3502 default:
3503 return -EINVAL;
3504 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003505
3506 /* Cancel Port Reset */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003507 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3508 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003509
3510 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3511 MVNETA_GMAC2_PORT_RESET) != 0)
3512 continue;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003513
3514 return 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003515}
3516
3517/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00003518static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003519{
3520 const struct mbus_dram_target_info *dram_target_info;
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003521 struct resource *res;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003522 struct device_node *dn = pdev->dev.of_node;
3523 struct device_node *phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003524 struct mvneta_port *pp;
3525 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003526 const char *dt_mac_addr;
3527 char hw_mac_addr[ETH_ALEN];
3528 const char *mac_from;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003529 const char *managed;
Marcin Wojtas9110ee02015-11-30 13:27:45 +01003530 int tx_csum_limit;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003531 int phy_mode;
3532 int err;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003533 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003534
Willy Tarreauee40a112013-04-11 23:00:37 +02003535 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003536 if (!dev)
3537 return -ENOMEM;
3538
3539 dev->irq = irq_of_parse_and_map(dn, 0);
3540 if (dev->irq == 0) {
3541 err = -EINVAL;
3542 goto err_free_netdev;
3543 }
3544
3545 phy_node = of_parse_phandle(dn, "phy", 0);
3546 if (!phy_node) {
Thomas Petazzoni83895be2014-05-16 16:14:06 +02003547 if (!of_phy_is_fixed_link(dn)) {
3548 dev_err(&pdev->dev, "no PHY specified\n");
3549 err = -ENODEV;
3550 goto err_free_irq;
3551 }
3552
3553 err = of_phy_register_fixed_link(dn);
3554 if (err < 0) {
3555 dev_err(&pdev->dev, "cannot register fixed PHY\n");
3556 goto err_free_irq;
3557 }
3558
3559 /* In the case of a fixed PHY, the DT node associated
3560 * to the PHY is the Ethernet MAC DT node.
3561 */
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003562 phy_node = of_node_get(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003563 }
3564
3565 phy_mode = of_get_phy_mode(dn);
3566 if (phy_mode < 0) {
3567 dev_err(&pdev->dev, "incorrect phy-mode\n");
3568 err = -EINVAL;
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003569 goto err_put_phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003570 }
3571
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003572 dev->tx_queue_len = MVNETA_MAX_TXD;
3573 dev->watchdog_timeo = 5 * HZ;
3574 dev->netdev_ops = &mvneta_netdev_ops;
3575
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003576 dev->ethtool_ops = &mvneta_eth_tool_ops;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003577
3578 pp = netdev_priv(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003579 pp->phy_node = phy_node;
3580 pp->phy_interface = phy_mode;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003581
3582 err = of_property_read_string(dn, "managed", &managed);
3583 pp->use_inband_status = (err == 0 &&
3584 strcmp(managed, "in-band-status") == 0);
Maxime Ripardf8642882015-09-25 18:09:38 +02003585 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003586
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01003587 pp->rxq_def = rxq_def;
3588
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003589 pp->indir[0] = rxq_def;
3590
Jisheng Zhang2804ba42016-01-20 19:27:23 +08003591 pp->clk = devm_clk_get(&pdev->dev, "core");
3592 if (IS_ERR(pp->clk))
3593 pp->clk = devm_clk_get(&pdev->dev, NULL);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003594 if (IS_ERR(pp->clk)) {
3595 err = PTR_ERR(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003596 goto err_put_phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003597 }
3598
3599 clk_prepare_enable(pp->clk);
3600
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08003601 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
3602 if (!IS_ERR(pp->clk_bus))
3603 clk_prepare_enable(pp->clk_bus);
3604
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003605 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3606 pp->base = devm_ioremap_resource(&pdev->dev, res);
3607 if (IS_ERR(pp->base)) {
3608 err = PTR_ERR(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003609 goto err_clk;
3610 }
3611
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003612 /* Alloc per-cpu port structure */
3613 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
3614 if (!pp->ports) {
3615 err = -ENOMEM;
3616 goto err_clk;
3617 }
3618
willy tarreau74c41b02014-01-16 08:20:08 +01003619 /* Alloc per-cpu stats */
WANG Cong1c213bd2014-02-13 11:46:28 -08003620 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
willy tarreau74c41b02014-01-16 08:20:08 +01003621 if (!pp->stats) {
3622 err = -ENOMEM;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003623 goto err_free_ports;
willy tarreau74c41b02014-01-16 08:20:08 +01003624 }
3625
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003626 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01003627 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003628 mac_from = "device tree";
3629 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3630 } else {
3631 mvneta_get_mac_addr(pp, hw_mac_addr);
3632 if (is_valid_ether_addr(hw_mac_addr)) {
3633 mac_from = "hardware";
3634 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3635 } else {
3636 mac_from = "random";
3637 eth_hw_addr_random(dev);
3638 }
3639 }
3640
Marcin Wojtas9110ee02015-11-30 13:27:45 +01003641 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
3642 if (tx_csum_limit < 0 ||
3643 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
3644 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3645 dev_info(&pdev->dev,
3646 "Wrong TX csum limit in DT, set to %dB\n",
3647 MVNETA_TX_CSUM_DEF_SIZE);
3648 }
3649 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
3650 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3651 } else {
3652 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
3653 }
3654
3655 pp->tx_csum_limit = tx_csum_limit;
Simon Guinotb65657f2015-06-30 16:20:22 +02003656
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003657 pp->tx_ring_size = MVNETA_MAX_TXD;
3658 pp->rx_ring_size = MVNETA_MAX_RXD;
3659
3660 pp->dev = dev;
3661 SET_NETDEV_DEV(dev, &pdev->dev);
3662
Ezequiel Garcia96728502014-05-22 20:06:59 -03003663 err = mvneta_init(&pdev->dev, pp);
3664 if (err < 0)
willy tarreau74c41b02014-01-16 08:20:08 +01003665 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003666
3667 err = mvneta_port_power_up(pp, phy_mode);
3668 if (err < 0) {
3669 dev_err(&pdev->dev, "can't power up port\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003670 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003671 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003672
3673 dram_target_info = mv_mbus_dram_info();
3674 if (dram_target_info)
3675 mvneta_conf_mbus_windows(pp, dram_target_info);
3676
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003677 for_each_present_cpu(cpu) {
3678 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3679
3680 netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3681 port->pp = pp;
3682 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003683
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03003684 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Ezequiel Garcia01ef26c2014-05-19 13:59:53 -03003685 dev->hw_features |= dev->features;
3686 dev->vlan_features |= dev->features;
willy tarreaub50b72d2013-04-06 08:47:01 +00003687 dev->priv_flags |= IFF_UNICAST_FLT;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003688 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
willy tarreaub50b72d2013-04-06 08:47:01 +00003689
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003690 err = register_netdev(dev);
3691 if (err < 0) {
3692 dev_err(&pdev->dev, "failed to register\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003693 goto err_free_stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003694 }
3695
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003696 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3697 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003698
3699 platform_set_drvdata(pdev, pp->dev);
3700
Stas Sergeev898b2972015-04-01 20:32:49 +03003701 if (pp->use_inband_status) {
3702 struct phy_device *phy = of_phy_find_device(dn);
3703
3704 mvneta_fixed_link_update(pp, phy);
Russell King04d53b22015-09-24 20:36:18 +01003705
Andrew Lunne5a03bf2016-01-06 20:11:16 +01003706 put_device(&phy->mdio.dev);
Stas Sergeev898b2972015-04-01 20:32:49 +03003707 }
3708
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003709 return 0;
3710
willy tarreau74c41b02014-01-16 08:20:08 +01003711err_free_stats:
3712 free_percpu(pp->stats);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003713err_free_ports:
3714 free_percpu(pp->ports);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003715err_clk:
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08003716 clk_disable_unprepare(pp->clk_bus);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003717 clk_disable_unprepare(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003718err_put_phy_node:
3719 of_node_put(phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003720err_free_irq:
3721 irq_dispose_mapping(dev->irq);
3722err_free_netdev:
3723 free_netdev(dev);
3724 return err;
3725}
3726
3727/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00003728static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003729{
3730 struct net_device *dev = platform_get_drvdata(pdev);
3731 struct mvneta_port *pp = netdev_priv(dev);
3732
3733 unregister_netdev(dev);
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08003734 clk_disable_unprepare(pp->clk_bus);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003735 clk_disable_unprepare(pp->clk);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003736 free_percpu(pp->ports);
willy tarreau74c41b02014-01-16 08:20:08 +01003737 free_percpu(pp->stats);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003738 irq_dispose_mapping(dev->irq);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003739 of_node_put(pp->phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003740 free_netdev(dev);
3741
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003742 return 0;
3743}
3744
3745static const struct of_device_id mvneta_match[] = {
3746 { .compatible = "marvell,armada-370-neta" },
Simon Guinotf522a972015-06-30 16:20:20 +02003747 { .compatible = "marvell,armada-xp-neta" },
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003748 { }
3749};
3750MODULE_DEVICE_TABLE(of, mvneta_match);
3751
3752static struct platform_driver mvneta_driver = {
3753 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00003754 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003755 .driver = {
3756 .name = MVNETA_DRIVER_NAME,
3757 .of_match_table = mvneta_match,
3758 },
3759};
3760
3761module_platform_driver(mvneta_driver);
3762
3763MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3764MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3765MODULE_LICENSE("GPL");
3766
3767module_param(rxq_number, int, S_IRUGO);
3768module_param(txq_number, int, S_IRUGO);
3769
3770module_param(rxq_def, int, S_IRUGO);
willy tarreauf19fadf2014-01-16 08:20:17 +01003771module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);