blob: f496f97165690486e2bf91a391e99aca65a4b1ee [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
Jisheng Zhang0e03f562016-01-20 19:27:22 +080014#include <linux/clk.h>
15#include <linux/cpu.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030016#include <linux/etherdevice.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080017#include <linux/if_vlan.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030018#include <linux/inetdevice.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080019#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030022#include <linux/mbus.h>
23#include <linux/module.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080024#include <linux/netdevice.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030025#include <linux/of.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080026#include <linux/of_address.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030027#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030030#include <linux/phy.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080031#include <linux/platform_device.h>
32#include <linux/skbuff.h>
33#include <net/ip.h>
34#include <net/ipv6.h>
35#include <net/tso.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030036
37/* Registers */
38#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
Marcin Wojtase5bdf682015-11-30 13:27:42 +010039#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030040#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53#define MVNETA_PORT_RX_RESET 0x1cc0
54#define MVNETA_PORT_RX_DMA_RESET BIT(0)
55#define MVNETA_PHY_ADDR 0x2000
56#define MVNETA_PHY_ADDR_MASK 0x1f
57#define MVNETA_MBUS_RETRY 0x2010
58#define MVNETA_UNIT_INTR_CAUSE 0x2080
59#define MVNETA_UNIT_CONTROL 0x20B0
60#define MVNETA_PHY_POLLING_ENABLE BIT(1)
61#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64#define MVNETA_BASE_ADDR_ENABLE 0x2290
Marcin Wojtasdb6ba9a2015-11-30 13:27:41 +010065#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
Thomas Petazzonic5aff182012-08-17 14:04:28 +030066#define MVNETA_PORT_CONFIG 0x2400
67#define MVNETA_UNI_PROMISC_MODE BIT(0)
68#define MVNETA_DEF_RXQ(q) ((q) << 1)
69#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
70#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
71#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
72#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
73#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
74#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
75#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
76 MVNETA_DEF_RXQ_ARP(q) | \
77 MVNETA_DEF_RXQ_TCP(q) | \
78 MVNETA_DEF_RXQ_UDP(q) | \
79 MVNETA_DEF_RXQ_BPDU(q) | \
80 MVNETA_TX_UNSET_ERR_SUM | \
81 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
82#define MVNETA_PORT_CONFIG_EXTEND 0x2404
83#define MVNETA_MAC_ADDR_LOW 0x2414
84#define MVNETA_MAC_ADDR_HIGH 0x2418
85#define MVNETA_SDMA_CONFIG 0x241c
86#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030087#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
88#define MVNETA_RX_NO_DATA_SWAP BIT(4)
89#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +020090#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030091#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
92#define MVNETA_PORT_STATUS 0x2444
93#define MVNETA_TX_IN_PRGRS BIT(1)
94#define MVNETA_TX_FIFO_EMPTY BIT(8)
95#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020096#define MVNETA_SERDES_CFG 0x24A0
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +020097#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020098#define MVNETA_QSGMII_SERDES_PROTO 0x0667
Thomas Petazzonic5aff182012-08-17 14:04:28 +030099#define MVNETA_TYPE_PRIO 0x24bc
100#define MVNETA_FORCE_UNI BIT(21)
101#define MVNETA_TXQ_CMD_1 0x24e4
102#define MVNETA_TXQ_CMD 0x2448
103#define MVNETA_TXQ_DISABLE_SHIFT 8
104#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
Andrew Lunne4839112015-10-22 18:37:36 +0100105#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
106#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
Stas Sergeev898b2972015-04-01 20:32:49 +0300107#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
108#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300109#define MVNETA_ACC_MODE 0x2500
110#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
111#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
112#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100113#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +0100114#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300115#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
willy tarreau40ba35e2014-01-16 08:20:10 +0100116
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100117/* Exception Interrupt Port/Queue Cause register
118 *
119 * Their behavior depend of the mapping done using the PCPX2Q
120 * registers. For a given CPU if the bit associated to a queue is not
121 * set, then for the register a read from this CPU will always return
122 * 0 and a write won't do anything
123 */
willy tarreau40ba35e2014-01-16 08:20:10 +0100124
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300125#define MVNETA_INTR_NEW_CAUSE 0x25a0
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300126#define MVNETA_INTR_NEW_MASK 0x25a4
willy tarreau40ba35e2014-01-16 08:20:10 +0100127
128/* bits 0..7 = TXQ SENT, one bit per queue.
129 * bits 8..15 = RXQ OCCUP, one bit per queue.
130 * bits 16..23 = RXQ FREE, one bit per queue.
131 * bit 29 = OLD_REG_SUM, see old reg ?
132 * bit 30 = TX_ERR_SUM, one bit for 4 ports
133 * bit 31 = MISC_SUM, one bit for 4 ports
134 */
135#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
136#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
137#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
138#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
Stas Sergeev898b2972015-04-01 20:32:49 +0300139#define MVNETA_MISCINTR_INTR_MASK BIT(31)
willy tarreau40ba35e2014-01-16 08:20:10 +0100140
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300141#define MVNETA_INTR_OLD_CAUSE 0x25a8
142#define MVNETA_INTR_OLD_MASK 0x25ac
willy tarreau40ba35e2014-01-16 08:20:10 +0100143
144/* Data Path Port/Queue Cause Register */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300145#define MVNETA_INTR_MISC_CAUSE 0x25b0
146#define MVNETA_INTR_MISC_MASK 0x25b4
willy tarreau40ba35e2014-01-16 08:20:10 +0100147
148#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
149#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
150#define MVNETA_CAUSE_PTP BIT(4)
151
152#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
153#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
154#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
155#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
156#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
157#define MVNETA_CAUSE_PRBS_ERR BIT(12)
158#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
159#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
160
161#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
162#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
163#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
164
165#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
166#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
167#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
168
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300169#define MVNETA_INTR_ENABLE 0x25b8
170#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
Marcin Wojtasdc1aadf2015-11-30 13:27:43 +0100171#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
willy tarreau40ba35e2014-01-16 08:20:10 +0100172
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300173#define MVNETA_RXQ_CMD 0x2680
174#define MVNETA_RXQ_DISABLE_SHIFT 8
175#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
176#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
177#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
178#define MVNETA_GMAC_CTRL_0 0x2c00
179#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
180#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
181#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
182#define MVNETA_GMAC_CTRL_2 0x2c08
Stas Sergeev898b2972015-04-01 20:32:49 +0300183#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
Thomas Petazzonia79121d2014-03-26 00:25:41 +0100184#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300185#define MVNETA_GMAC2_PORT_RGMII BIT(4)
186#define MVNETA_GMAC2_PORT_RESET BIT(6)
187#define MVNETA_GMAC_STATUS 0x2c10
188#define MVNETA_GMAC_LINK_UP BIT(0)
189#define MVNETA_GMAC_SPEED_1000 BIT(1)
190#define MVNETA_GMAC_SPEED_100 BIT(2)
191#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
192#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
193#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
194#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
195#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
196#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
197#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
198#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
Stas Sergeev898b2972015-04-01 20:32:49 +0300199#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300200#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
201#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200202#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Stas Sergeev898b2972015-04-01 20:32:49 +0300203#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300204#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200205#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Andrew Lunne4839112015-10-22 18:37:36 +0100206#define MVNETA_MIB_COUNTERS_BASE 0x3000
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300207#define MVNETA_MIB_LATE_COLLISION 0x7c
208#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
209#define MVNETA_DA_FILT_OTH_MCAST 0x3500
210#define MVNETA_DA_FILT_UCAST_BASE 0x3600
211#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
212#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
213#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
214#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
215#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
216#define MVNETA_TXQ_DEC_SENT_SHIFT 16
217#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
218#define MVNETA_TXQ_SENT_DESC_SHIFT 16
219#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
220#define MVNETA_PORT_TX_RESET 0x3cf0
221#define MVNETA_PORT_TX_DMA_RESET BIT(0)
222#define MVNETA_TX_MTU 0x3e0c
223#define MVNETA_TX_TOKEN_SIZE 0x3e14
224#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
225#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
226#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
227
228#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
229
230/* Descriptor ring Macros */
231#define MVNETA_QUEUE_NEXT_DESC(q, index) \
232 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
233
234/* Various constants */
235
236/* Coalescing */
willy tarreauaebea2b2014-12-02 08:13:04 +0100237#define MVNETA_TXDONE_COAL_PKTS 1
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300238#define MVNETA_RX_COAL_PKTS 32
239#define MVNETA_RX_COAL_USEC 100
240
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100241/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300242 * by Marvell switches when a specific hardware mode is enabled (not
243 * supported by this driver) or is filled automatically by zeroes on
244 * the RX side. Those two bytes being at the front of the Ethernet
245 * header, they allow to have the IP header aligned on a 4 bytes
246 * boundary automatically: the hardware skips those two bytes on its
247 * own.
248 */
249#define MVNETA_MH_SIZE 2
250
251#define MVNETA_VLAN_TAG_LEN 4
252
253#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
Marcin Wojtas9110ee02015-11-30 13:27:45 +0100254#define MVNETA_TX_CSUM_DEF_SIZE 1600
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300255#define MVNETA_TX_CSUM_MAX_SIZE 9800
256#define MVNETA_ACC_MODE_EXT 1
257
258/* Timeout constants */
259#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
260#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
261#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
262
263#define MVNETA_TX_MTU_MAX 0x3ffff
264
Gregory CLEMENT9a401de2015-12-09 18:23:50 +0100265/* The RSS lookup table actually has 256 entries but we do not use
266 * them yet
267 */
268#define MVNETA_RSS_LU_TABLE_SIZE 1
269
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300270/* TSO header size */
271#define TSO_HEADER_SIZE 128
272
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300273/* Max number of Rx descriptors */
274#define MVNETA_MAX_RXD 128
275
276/* Max number of Tx descriptors */
277#define MVNETA_MAX_TXD 532
278
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300279/* Max number of allowed TCP segments for software TSO */
280#define MVNETA_MAX_TSO_SEGS 100
281
282#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
283
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300284/* descriptor aligned size */
285#define MVNETA_DESC_ALIGNED_SIZE 32
286
287#define MVNETA_RX_PKT_SIZE(mtu) \
288 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
289 ETH_HLEN + ETH_FCS_LEN, \
290 MVNETA_CPU_D_CACHE_LINE_SIZE)
291
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -0300292#define IS_TSO_HEADER(txq, addr) \
293 ((addr >= txq->tso_hdrs_phys) && \
294 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
295
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300296#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
297
Russell King9b0cdef2015-10-22 18:37:30 +0100298struct mvneta_statistic {
299 unsigned short offset;
300 unsigned short type;
301 const char name[ETH_GSTRING_LEN];
302};
303
304#define T_REG_32 32
305#define T_REG_64 64
306
307static const struct mvneta_statistic mvneta_statistics[] = {
308 { 0x3000, T_REG_64, "good_octets_received", },
309 { 0x3010, T_REG_32, "good_frames_received", },
310 { 0x3008, T_REG_32, "bad_octets_received", },
311 { 0x3014, T_REG_32, "bad_frames_received", },
312 { 0x3018, T_REG_32, "broadcast_frames_received", },
313 { 0x301c, T_REG_32, "multicast_frames_received", },
314 { 0x3050, T_REG_32, "unrec_mac_control_received", },
315 { 0x3058, T_REG_32, "good_fc_received", },
316 { 0x305c, T_REG_32, "bad_fc_received", },
317 { 0x3060, T_REG_32, "undersize_received", },
318 { 0x3064, T_REG_32, "fragments_received", },
319 { 0x3068, T_REG_32, "oversize_received", },
320 { 0x306c, T_REG_32, "jabber_received", },
321 { 0x3070, T_REG_32, "mac_receive_error", },
322 { 0x3074, T_REG_32, "bad_crc_event", },
323 { 0x3078, T_REG_32, "collision", },
324 { 0x307c, T_REG_32, "late_collision", },
325 { 0x2484, T_REG_32, "rx_discard", },
326 { 0x2488, T_REG_32, "rx_overrun", },
327 { 0x3020, T_REG_32, "frames_64_octets", },
328 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
329 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
330 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
331 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
332 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
333 { 0x3038, T_REG_64, "good_octets_sent", },
334 { 0x3040, T_REG_32, "good_frames_sent", },
335 { 0x3044, T_REG_32, "excessive_collision", },
336 { 0x3048, T_REG_32, "multicast_frames_sent", },
337 { 0x304c, T_REG_32, "broadcast_frames_sent", },
338 { 0x3054, T_REG_32, "fc_sent", },
339 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
340};
341
willy tarreau74c41b02014-01-16 08:20:08 +0100342struct mvneta_pcpu_stats {
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300343 struct u64_stats_sync syncp;
willy tarreau74c41b02014-01-16 08:20:08 +0100344 u64 rx_packets;
345 u64 rx_bytes;
346 u64 tx_packets;
347 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300348};
349
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200350struct mvneta_pcpu_port {
351 /* Pointer to the shared port */
352 struct mvneta_port *pp;
353
354 /* Pointer to the CPU-local NAPI struct */
355 struct napi_struct napi;
356
357 /* Cause of the previous interrupt */
358 u32 cause_rx_tx;
359};
360
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300361struct mvneta_port {
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200362 struct mvneta_pcpu_port __percpu *ports;
363 struct mvneta_pcpu_stats __percpu *stats;
364
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300365 int pkt_size;
willy tarreau8ec2cd42014-01-16 08:20:16 +0100366 unsigned int frag_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300367 void __iomem *base;
368 struct mvneta_rx_queue *rxqs;
369 struct mvneta_tx_queue *txqs;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300370 struct net_device *dev;
Maxime Ripardf8642882015-09-25 18:09:38 +0200371 struct notifier_block cpu_notifier;
Gregory CLEMENT90b74c02015-12-09 18:23:48 +0100372 int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300373
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300374 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100375 struct clk *clk;
Jisheng Zhang15cc4a42016-01-20 19:27:24 +0800376 /* AXI clock */
377 struct clk *clk_bus;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300378 u8 mcast_count[256];
379 u16 tx_ring_size;
380 u16 rx_ring_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300381
382 struct mii_bus *mii_bus;
383 struct phy_device *phy_dev;
384 phy_interface_t phy_interface;
385 struct device_node *phy_node;
386 unsigned int link;
387 unsigned int duplex;
388 unsigned int speed;
Simon Guinotb65657f2015-06-30 16:20:22 +0200389 unsigned int tx_csum_limit;
Stas Sergeev0c0744f2015-12-02 20:35:11 +0300390 unsigned int use_inband_status:1;
Russell King9b0cdef2015-10-22 18:37:30 +0100391
392 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
Gregory CLEMENT9a401de2015-12-09 18:23:50 +0100393
394 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300395};
396
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100397/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300398 * layout of the transmit and reception DMA descriptors, and their
399 * layout is therefore defined by the hardware design
400 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200401
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300402#define MVNETA_TX_L3_OFF_SHIFT 0
403#define MVNETA_TX_IP_HLEN_SHIFT 8
404#define MVNETA_TX_L4_UDP BIT(16)
405#define MVNETA_TX_L3_IP6 BIT(17)
406#define MVNETA_TXD_IP_CSUM BIT(18)
407#define MVNETA_TXD_Z_PAD BIT(19)
408#define MVNETA_TXD_L_DESC BIT(20)
409#define MVNETA_TXD_F_DESC BIT(21)
410#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
411 MVNETA_TXD_L_DESC | \
412 MVNETA_TXD_F_DESC)
413#define MVNETA_TX_L4_CSUM_FULL BIT(30)
414#define MVNETA_TX_L4_CSUM_NOT BIT(31)
415
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300416#define MVNETA_RXD_ERR_CRC 0x0
417#define MVNETA_RXD_ERR_SUMMARY BIT(16)
418#define MVNETA_RXD_ERR_OVERRUN BIT(17)
419#define MVNETA_RXD_ERR_LEN BIT(18)
420#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
421#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
422#define MVNETA_RXD_L3_IP4 BIT(25)
423#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
424#define MVNETA_RXD_L4_CSUM_OK BIT(30)
425
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200426#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200427struct mvneta_tx_desc {
428 u32 command; /* Options used by HW for packet transmitting.*/
429 u16 reserverd1; /* csum_l4 (for future use) */
430 u16 data_size; /* Data size of transmitted packet in bytes */
431 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
432 u32 reserved2; /* hw_cmd - (for future use, PMT) */
433 u32 reserved3[4]; /* Reserved - (for future use) */
434};
435
436struct mvneta_rx_desc {
437 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300438 u16 reserved1; /* pnc_info - (for future use, PnC) */
439 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200440
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300441 u32 buf_phys_addr; /* Physical address of the buffer */
442 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200443
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300444 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
445 u16 reserved3; /* prefetch_cmd, for future use */
446 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200447
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300448 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
449 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
450};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200451#else
452struct mvneta_tx_desc {
453 u16 data_size; /* Data size of transmitted packet in bytes */
454 u16 reserverd1; /* csum_l4 (for future use) */
455 u32 command; /* Options used by HW for packet transmitting.*/
456 u32 reserved2; /* hw_cmd - (for future use, PMT) */
457 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
458 u32 reserved3[4]; /* Reserved - (for future use) */
459};
460
461struct mvneta_rx_desc {
462 u16 data_size; /* Size of received packet in bytes */
463 u16 reserved1; /* pnc_info - (for future use, PnC) */
464 u32 status; /* Info about received packet */
465
466 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
467 u32 buf_phys_addr; /* Physical address of the buffer */
468
469 u16 reserved4; /* csum_l4 - (for future use, PnC) */
470 u16 reserved3; /* prefetch_cmd, for future use */
471 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
472
473 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
474 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
475};
476#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300477
478struct mvneta_tx_queue {
479 /* Number of this TX queue, in the range 0-7 */
480 u8 id;
481
482 /* Number of TX DMA descriptors in the descriptor ring */
483 int size;
484
485 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100486 * descriptor ring
487 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300488 int count;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300489 int tx_stop_threshold;
490 int tx_wake_threshold;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300491
492 /* Array of transmitted skb */
493 struct sk_buff **tx_skb;
494
495 /* Index of last TX DMA descriptor that was inserted */
496 int txq_put_index;
497
498 /* Index of the TX DMA descriptor to be cleaned up */
499 int txq_get_index;
500
501 u32 done_pkts_coal;
502
503 /* Virtual address of the TX DMA descriptors array */
504 struct mvneta_tx_desc *descs;
505
506 /* DMA address of the TX DMA descriptors array */
507 dma_addr_t descs_phys;
508
509 /* Index of the last TX DMA descriptor */
510 int last_desc;
511
512 /* Index of the next TX DMA descriptor to process */
513 int next_desc_to_proc;
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300514
515 /* DMA buffers for TSO headers */
516 char *tso_hdrs;
517
518 /* DMA address of TSO headers */
519 dma_addr_t tso_hdrs_phys;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +0100520
521 /* Affinity mask for CPUs*/
522 cpumask_t affinity_mask;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300523};
524
525struct mvneta_rx_queue {
526 /* rx queue number, in the range 0-7 */
527 u8 id;
528
529 /* num of rx descriptors in the rx descriptor ring */
530 int size;
531
532 /* counter of times when mvneta_refill() failed */
533 int missed;
534
535 u32 pkts_coal;
536 u32 time_coal;
537
538 /* Virtual address of the RX DMA descriptors array */
539 struct mvneta_rx_desc *descs;
540
541 /* DMA address of the RX DMA descriptors array */
542 dma_addr_t descs_phys;
543
544 /* Index of the last RX DMA descriptor */
545 int last_desc;
546
547 /* Index of the next RX DMA descriptor to process */
548 int next_desc_to_proc;
549};
550
Ezequiel Garciaedadb7f2014-05-22 20:07:01 -0300551/* The hardware supports eight (8) rx queues, but we are only allowing
552 * the first one to be used. Therefore, let's just allocate one queue.
553 */
Maxime Ripardd8936652015-09-25 18:09:37 +0200554static int rxq_number = 8;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300555static int txq_number = 8;
556
557static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300558
willy tarreauf19fadf2014-01-16 08:20:17 +0100559static int rx_copybreak __read_mostly = 256;
560
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300561#define MVNETA_DRIVER_NAME "mvneta"
562#define MVNETA_DRIVER_VERSION "1.0"
563
564/* Utility/helper methods */
565
566/* Write helper method */
567static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
568{
569 writel(data, pp->base + offset);
570}
571
572/* Read helper method */
573static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
574{
575 return readl(pp->base + offset);
576}
577
578/* Increment txq get counter */
579static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
580{
581 txq->txq_get_index++;
582 if (txq->txq_get_index == txq->size)
583 txq->txq_get_index = 0;
584}
585
586/* Increment txq put counter */
587static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
588{
589 txq->txq_put_index++;
590 if (txq->txq_put_index == txq->size)
591 txq->txq_put_index = 0;
592}
593
594
595/* Clear all MIB counters */
596static void mvneta_mib_counters_clear(struct mvneta_port *pp)
597{
598 int i;
599 u32 dummy;
600
601 /* Perform dummy reads from MIB counters */
602 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
603 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
Andrew Lunne4839112015-10-22 18:37:36 +0100604 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
605 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300606}
607
608/* Get System Network Statistics */
609struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
610 struct rtnl_link_stats64 *stats)
611{
612 struct mvneta_port *pp = netdev_priv(dev);
613 unsigned int start;
willy tarreau74c41b02014-01-16 08:20:08 +0100614 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300615
willy tarreau74c41b02014-01-16 08:20:08 +0100616 for_each_possible_cpu(cpu) {
617 struct mvneta_pcpu_stats *cpu_stats;
618 u64 rx_packets;
619 u64 rx_bytes;
620 u64 tx_packets;
621 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300622
willy tarreau74c41b02014-01-16 08:20:08 +0100623 cpu_stats = per_cpu_ptr(pp->stats, cpu);
624 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
willy tarreau74c41b02014-01-16 08:20:08 +0100626 rx_packets = cpu_stats->rx_packets;
627 rx_bytes = cpu_stats->rx_bytes;
628 tx_packets = cpu_stats->tx_packets;
629 tx_bytes = cpu_stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700630 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300631
willy tarreau74c41b02014-01-16 08:20:08 +0100632 stats->rx_packets += rx_packets;
633 stats->rx_bytes += rx_bytes;
634 stats->tx_packets += tx_packets;
635 stats->tx_bytes += tx_bytes;
636 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300637
638 stats->rx_errors = dev->stats.rx_errors;
639 stats->rx_dropped = dev->stats.rx_dropped;
640
641 stats->tx_dropped = dev->stats.tx_dropped;
642
643 return stats;
644}
645
646/* Rx descriptors helper methods */
647
willy tarreau54282132014-01-16 08:20:14 +0100648/* Checks whether the RX descriptor having this status is both the first
649 * and the last descriptor for the RX packet. Each RX packet is currently
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300650 * received through a single RX descriptor, so not having each RX
651 * descriptor with its first and last bits set is an error
652 */
willy tarreau54282132014-01-16 08:20:14 +0100653static int mvneta_rxq_desc_is_first_last(u32 status)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300654{
willy tarreau54282132014-01-16 08:20:14 +0100655 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300656 MVNETA_RXD_FIRST_LAST_DESC;
657}
658
659/* Add number of descriptors ready to receive new packets */
660static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
661 struct mvneta_rx_queue *rxq,
662 int ndescs)
663{
664 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100665 * be added at once
666 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300667 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
668 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
669 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
670 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
671 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
672 }
673
674 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
675 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
676}
677
678/* Get number of RX descriptors occupied by received packets */
679static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
680 struct mvneta_rx_queue *rxq)
681{
682 u32 val;
683
684 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
685 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
686}
687
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100688/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300689 * from mvneta_rxq_drop_pkts().
690 */
691static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
692 struct mvneta_rx_queue *rxq,
693 int rx_done, int rx_filled)
694{
695 u32 val;
696
697 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
698 val = rx_done |
699 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
700 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
701 return;
702 }
703
704 /* Only 255 descriptors can be added at once */
705 while ((rx_done > 0) || (rx_filled > 0)) {
706 if (rx_done <= 0xff) {
707 val = rx_done;
708 rx_done = 0;
709 } else {
710 val = 0xff;
711 rx_done -= 0xff;
712 }
713 if (rx_filled <= 0xff) {
714 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
715 rx_filled = 0;
716 } else {
717 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
718 rx_filled -= 0xff;
719 }
720 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
721 }
722}
723
724/* Get pointer to next RX descriptor to be processed by SW */
725static struct mvneta_rx_desc *
726mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
727{
728 int rx_desc = rxq->next_desc_to_proc;
729
730 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
willy tarreau34e41792014-01-16 08:20:15 +0100731 prefetch(rxq->descs + rxq->next_desc_to_proc);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300732 return rxq->descs + rx_desc;
733}
734
735/* Change maximum receive size of the port. */
736static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
737{
738 u32 val;
739
740 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
741 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
742 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
743 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
744 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
745}
746
747
748/* Set rx queue offset */
749static void mvneta_rxq_offset_set(struct mvneta_port *pp,
750 struct mvneta_rx_queue *rxq,
751 int offset)
752{
753 u32 val;
754
755 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
756 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
757
758 /* Offset is in */
759 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
760 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
761}
762
763
764/* Tx descriptors helper methods */
765
766/* Update HW with number of TX descriptors to be sent */
767static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
768 struct mvneta_tx_queue *txq,
769 int pend_desc)
770{
771 u32 val;
772
773 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100774 * process TX desriptors in quanta less than 256
775 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300776 val = pend_desc;
777 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
778}
779
780/* Get pointer to next TX descriptor to be processed (send) by HW */
781static struct mvneta_tx_desc *
782mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
783{
784 int tx_desc = txq->next_desc_to_proc;
785
786 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
787 return txq->descs + tx_desc;
788}
789
790/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100791 * mapping failures in the TX path.
792 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300793static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
794{
795 if (txq->next_desc_to_proc == 0)
796 txq->next_desc_to_proc = txq->last_desc - 1;
797 else
798 txq->next_desc_to_proc--;
799}
800
801/* Set rxq buf size */
802static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
803 struct mvneta_rx_queue *rxq,
804 int buf_size)
805{
806 u32 val;
807
808 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
809
810 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
811 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
812
813 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
814}
815
816/* Disable buffer management (BM) */
817static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
818 struct mvneta_rx_queue *rxq)
819{
820 u32 val;
821
822 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
823 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
824 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
825}
826
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300827/* Start the Ethernet port RX and TX activity */
828static void mvneta_port_up(struct mvneta_port *pp)
829{
830 int queue;
831 u32 q_map;
832
833 /* Enable all initialized TXs. */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300834 q_map = 0;
835 for (queue = 0; queue < txq_number; queue++) {
836 struct mvneta_tx_queue *txq = &pp->txqs[queue];
837 if (txq->descs != NULL)
838 q_map |= (1 << queue);
839 }
840 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
841
842 /* Enable all initialized RXQs. */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100843 for (queue = 0; queue < rxq_number; queue++) {
844 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
845
846 if (rxq->descs != NULL)
847 q_map |= (1 << queue);
848 }
849 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300850}
851
852/* Stop the Ethernet port activity */
853static void mvneta_port_down(struct mvneta_port *pp)
854{
855 u32 val;
856 int count;
857
858 /* Stop Rx port activity. Check port Rx activity. */
859 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
860
861 /* Issue stop command for active channels only */
862 if (val != 0)
863 mvreg_write(pp, MVNETA_RXQ_CMD,
864 val << MVNETA_RXQ_DISABLE_SHIFT);
865
866 /* Wait for all Rx activity to terminate. */
867 count = 0;
868 do {
869 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
870 netdev_warn(pp->dev,
871 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
872 val);
873 break;
874 }
875 mdelay(1);
876
877 val = mvreg_read(pp, MVNETA_RXQ_CMD);
878 } while (val & 0xff);
879
880 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100881 * command for active channels only
882 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300883 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
884
885 if (val != 0)
886 mvreg_write(pp, MVNETA_TXQ_CMD,
887 (val << MVNETA_TXQ_DISABLE_SHIFT));
888
889 /* Wait for all Tx activity to terminate. */
890 count = 0;
891 do {
892 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
893 netdev_warn(pp->dev,
894 "TIMEOUT for TX stopped status=0x%08x\n",
895 val);
896 break;
897 }
898 mdelay(1);
899
900 /* Check TX Command reg that all Txqs are stopped */
901 val = mvreg_read(pp, MVNETA_TXQ_CMD);
902
903 } while (val & 0xff);
904
905 /* Double check to verify that TX FIFO is empty */
906 count = 0;
907 do {
908 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
909 netdev_warn(pp->dev,
910 "TX FIFO empty timeout status=0x08%x\n",
911 val);
912 break;
913 }
914 mdelay(1);
915
916 val = mvreg_read(pp, MVNETA_PORT_STATUS);
917 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
918 (val & MVNETA_TX_IN_PRGRS));
919
920 udelay(200);
921}
922
923/* Enable the port by setting the port enable bit of the MAC control register */
924static void mvneta_port_enable(struct mvneta_port *pp)
925{
926 u32 val;
927
928 /* Enable port */
929 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
930 val |= MVNETA_GMAC0_PORT_ENABLE;
931 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
932}
933
934/* Disable the port and wait for about 200 usec before retuning */
935static void mvneta_port_disable(struct mvneta_port *pp)
936{
937 u32 val;
938
939 /* Reset the Enable bit in the Serial Control Register */
940 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
941 val &= ~MVNETA_GMAC0_PORT_ENABLE;
942 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
943
944 udelay(200);
945}
946
947/* Multicast tables methods */
948
949/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
950static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
951{
952 int offset;
953 u32 val;
954
955 if (queue == -1) {
956 val = 0;
957 } else {
958 val = 0x1 | (queue << 1);
959 val |= (val << 24) | (val << 16) | (val << 8);
960 }
961
962 for (offset = 0; offset <= 0xc; offset += 4)
963 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
964}
965
966/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
967static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
968{
969 int offset;
970 u32 val;
971
972 if (queue == -1) {
973 val = 0;
974 } else {
975 val = 0x1 | (queue << 1);
976 val |= (val << 24) | (val << 16) | (val << 8);
977 }
978
979 for (offset = 0; offset <= 0xfc; offset += 4)
980 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
981
982}
983
984/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
985static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
986{
987 int offset;
988 u32 val;
989
990 if (queue == -1) {
991 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
992 val = 0;
993 } else {
994 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
995 val = 0x1 | (queue << 1);
996 val |= (val << 24) | (val << 16) | (val << 8);
997 }
998
999 for (offset = 0; offset <= 0xfc; offset += 4)
1000 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1001}
1002
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001003static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1004{
1005 u32 val;
1006
1007 if (enable) {
1008 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1009 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1010 MVNETA_GMAC_FORCE_LINK_DOWN |
1011 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1012 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1013 MVNETA_GMAC_AN_SPEED_EN |
1014 MVNETA_GMAC_AN_DUPLEX_EN;
1015 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1016
1017 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1018 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1019 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1020
1021 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1022 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1023 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1024 } else {
1025 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1026 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1027 MVNETA_GMAC_AN_SPEED_EN |
1028 MVNETA_GMAC_AN_DUPLEX_EN);
1029 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1030
1031 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1032 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1033 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1034
1035 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1036 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1037 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1038 }
1039}
1040
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001041/* This method sets defaults to the NETA port:
1042 * Clears interrupt Cause and Mask registers.
1043 * Clears all MAC tables.
1044 * Sets defaults to all registers.
1045 * Resets RX and TX descriptor rings.
1046 * Resets PHY.
1047 * This method can be called after mvneta_port_down() to return the port
1048 * settings to defaults.
1049 */
1050static void mvneta_defaults_set(struct mvneta_port *pp)
1051{
1052 int cpu;
1053 int queue;
1054 u32 val;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001055 int max_cpu = num_present_cpus();
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001056
1057 /* Clear all Cause registers */
1058 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1059 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1060 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1061
1062 /* Mask all interrupts */
1063 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1064 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1065 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1066 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1067
1068 /* Enable MBUS Retry bit16 */
1069 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1070
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001071 /* Set CPU queue access map. CPUs are assigned to the RX and
1072 * TX queues modulo their number. If there is only one TX
1073 * queue then it is assigned to the CPU associated to the
1074 * default RX queue.
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001075 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001076 for_each_present_cpu(cpu) {
1077 int rxq_map = 0, txq_map = 0;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001078 int rxq, txq;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001079
1080 for (rxq = 0; rxq < rxq_number; rxq++)
1081 if ((rxq % max_cpu) == cpu)
1082 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1083
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001084 for (txq = 0; txq < txq_number; txq++)
1085 if ((txq % max_cpu) == cpu)
1086 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1087
1088 /* With only one TX queue we configure a special case
1089 * which will allow to get all the irq on a single
1090 * CPU
1091 */
1092 if (txq_number == 1)
1093 txq_map = (cpu == pp->rxq_def) ?
1094 MVNETA_CPU_TXQ_ACCESS(1) : 0;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001095
1096 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1097 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001098
1099 /* Reset RX and TX DMAs */
1100 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1101 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1102
1103 /* Disable Legacy WRR, Disable EJP, Release from reset */
1104 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1105 for (queue = 0; queue < txq_number; queue++) {
1106 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1107 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1108 }
1109
1110 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1111 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1112
1113 /* Set Port Acceleration Mode */
1114 val = MVNETA_ACC_MODE_EXT;
1115 mvreg_write(pp, MVNETA_ACC_MODE, val);
1116
1117 /* Update val of portCfg register accordingly with all RxQueue types */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01001118 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001119 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1120
1121 val = 0;
1122 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1123 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1124
1125 /* Build PORT_SDMA_CONFIG_REG */
1126 val = 0;
1127
1128 /* Default burst size */
1129 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1130 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001131 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001132
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001133#if defined(__BIG_ENDIAN)
1134 val |= MVNETA_DESC_SWAP;
1135#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001136
1137 /* Assign port SDMA configuration */
1138 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1139
Thomas Petazzoni71408602013-09-04 16:21:18 +02001140 /* Disable PHY polling in hardware, since we're using the
1141 * kernel phylib to do this.
1142 */
1143 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1144 val &= ~MVNETA_PHY_POLLING_ENABLE;
1145 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1146
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001147 mvneta_set_autoneg(pp, pp->use_inband_status);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001148 mvneta_set_ucast_table(pp, -1);
1149 mvneta_set_special_mcast_table(pp, -1);
1150 mvneta_set_other_mcast_table(pp, -1);
1151
1152 /* Set port interrupt enable register - default enable all */
1153 mvreg_write(pp, MVNETA_INTR_ENABLE,
1154 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1155 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
Andrew Lunne4839112015-10-22 18:37:36 +01001156
1157 mvneta_mib_counters_clear(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001158}
1159
1160/* Set max sizes for tx queues */
1161static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1162
1163{
1164 u32 val, size, mtu;
1165 int queue;
1166
1167 mtu = max_tx_size * 8;
1168 if (mtu > MVNETA_TX_MTU_MAX)
1169 mtu = MVNETA_TX_MTU_MAX;
1170
1171 /* Set MTU */
1172 val = mvreg_read(pp, MVNETA_TX_MTU);
1173 val &= ~MVNETA_TX_MTU_MAX;
1174 val |= mtu;
1175 mvreg_write(pp, MVNETA_TX_MTU, val);
1176
1177 /* TX token size and all TXQs token size must be larger that MTU */
1178 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1179
1180 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1181 if (size < mtu) {
1182 size = mtu;
1183 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1184 val |= size;
1185 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1186 }
1187 for (queue = 0; queue < txq_number; queue++) {
1188 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1189
1190 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1191 if (size < mtu) {
1192 size = mtu;
1193 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1194 val |= size;
1195 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1196 }
1197 }
1198}
1199
1200/* Set unicast address */
1201static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1202 int queue)
1203{
1204 unsigned int unicast_reg;
1205 unsigned int tbl_offset;
1206 unsigned int reg_offset;
1207
1208 /* Locate the Unicast table entry */
1209 last_nibble = (0xf & last_nibble);
1210
1211 /* offset from unicast tbl base */
1212 tbl_offset = (last_nibble / 4) * 4;
1213
1214 /* offset within the above reg */
1215 reg_offset = last_nibble % 4;
1216
1217 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1218
1219 if (queue == -1) {
1220 /* Clear accepts frame bit at specified unicast DA tbl entry */
1221 unicast_reg &= ~(0xff << (8 * reg_offset));
1222 } else {
1223 unicast_reg &= ~(0xff << (8 * reg_offset));
1224 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1225 }
1226
1227 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1228}
1229
1230/* Set mac address */
1231static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1232 int queue)
1233{
1234 unsigned int mac_h;
1235 unsigned int mac_l;
1236
1237 if (queue != -1) {
1238 mac_l = (addr[4] << 8) | (addr[5]);
1239 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1240 (addr[2] << 8) | (addr[3] << 0);
1241
1242 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1243 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1244 }
1245
1246 /* Accept frames of this address */
1247 mvneta_set_ucast_addr(pp, addr[5], queue);
1248}
1249
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001250/* Set the number of packets that will be received before RX interrupt
1251 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001252 */
1253static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1254 struct mvneta_rx_queue *rxq, u32 value)
1255{
1256 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1257 value | MVNETA_RXQ_NON_OCCUPIED(0));
1258 rxq->pkts_coal = value;
1259}
1260
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001261/* Set the time delay in usec before RX interrupt will be generated by
1262 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001263 */
1264static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1265 struct mvneta_rx_queue *rxq, u32 value)
1266{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001267 u32 val;
1268 unsigned long clk_rate;
1269
1270 clk_rate = clk_get_rate(pp->clk);
1271 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001272
1273 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1274 rxq->time_coal = value;
1275}
1276
1277/* Set threshold for TX_DONE pkts coalescing */
1278static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1279 struct mvneta_tx_queue *txq, u32 value)
1280{
1281 u32 val;
1282
1283 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1284
1285 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1286 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1287
1288 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1289
1290 txq->done_pkts_coal = value;
1291}
1292
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001293/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1294static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1295 u32 phys_addr, u32 cookie)
1296{
1297 rx_desc->buf_cookie = cookie;
1298 rx_desc->buf_phys_addr = phys_addr;
1299}
1300
1301/* Decrement sent descriptors counter */
1302static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1303 struct mvneta_tx_queue *txq,
1304 int sent_desc)
1305{
1306 u32 val;
1307
1308 /* Only 255 TX descriptors can be updated at once */
1309 while (sent_desc > 0xff) {
1310 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1311 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1312 sent_desc = sent_desc - 0xff;
1313 }
1314
1315 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1316 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1317}
1318
1319/* Get number of TX descriptors already sent by HW */
1320static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1321 struct mvneta_tx_queue *txq)
1322{
1323 u32 val;
1324 int sent_desc;
1325
1326 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1327 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1328 MVNETA_TXQ_SENT_DESC_SHIFT;
1329
1330 return sent_desc;
1331}
1332
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001333/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001334 * The number of sent descriptors is returned.
1335 */
1336static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1337 struct mvneta_tx_queue *txq)
1338{
1339 int sent_desc;
1340
1341 /* Get number of sent descriptors */
1342 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1343
1344 /* Decrement sent descriptors counter */
1345 if (sent_desc)
1346 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1347
1348 return sent_desc;
1349}
1350
1351/* Set TXQ descriptors fields relevant for CSUM calculation */
1352static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1353 int ip_hdr_len, int l4_proto)
1354{
1355 u32 command;
1356
1357 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001358 * G_L4_chk, L4_type; required only for checksum
1359 * calculation
1360 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001361 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1362 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1363
Thomas Fitzsimmons0a198582014-07-08 19:44:07 -04001364 if (l3_proto == htons(ETH_P_IP))
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001365 command |= MVNETA_TXD_IP_CSUM;
1366 else
1367 command |= MVNETA_TX_L3_IP6;
1368
1369 if (l4_proto == IPPROTO_TCP)
1370 command |= MVNETA_TX_L4_CSUM_FULL;
1371 else if (l4_proto == IPPROTO_UDP)
1372 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1373 else
1374 command |= MVNETA_TX_L4_CSUM_NOT;
1375
1376 return command;
1377}
1378
1379
1380/* Display more error info */
1381static void mvneta_rx_error(struct mvneta_port *pp,
1382 struct mvneta_rx_desc *rx_desc)
1383{
1384 u32 status = rx_desc->status;
1385
willy tarreau54282132014-01-16 08:20:14 +01001386 if (!mvneta_rxq_desc_is_first_last(status)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001387 netdev_err(pp->dev,
1388 "bad rx status %08x (buffer oversize), size=%d\n",
willy tarreau54282132014-01-16 08:20:14 +01001389 status, rx_desc->data_size);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001390 return;
1391 }
1392
1393 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1394 case MVNETA_RXD_ERR_CRC:
1395 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1396 status, rx_desc->data_size);
1397 break;
1398 case MVNETA_RXD_ERR_OVERRUN:
1399 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1400 status, rx_desc->data_size);
1401 break;
1402 case MVNETA_RXD_ERR_LEN:
1403 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1404 status, rx_desc->data_size);
1405 break;
1406 case MVNETA_RXD_ERR_RESOURCE:
1407 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1408 status, rx_desc->data_size);
1409 break;
1410 }
1411}
1412
willy tarreau54282132014-01-16 08:20:14 +01001413/* Handle RX checksum offload based on the descriptor's status */
1414static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001415 struct sk_buff *skb)
1416{
willy tarreau54282132014-01-16 08:20:14 +01001417 if ((status & MVNETA_RXD_L3_IP4) &&
1418 (status & MVNETA_RXD_L4_CSUM_OK)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001419 skb->csum = 0;
1420 skb->ip_summed = CHECKSUM_UNNECESSARY;
1421 return;
1422 }
1423
1424 skb->ip_summed = CHECKSUM_NONE;
1425}
1426
willy tarreau6c498972014-01-16 08:20:12 +01001427/* Return tx queue pointer (find last set bit) according to <cause> returned
1428 * form tx_done reg. <cause> must not be null. The return value is always a
1429 * valid queue for matching the first one found in <cause>.
1430 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001431static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1432 u32 cause)
1433{
1434 int queue = fls(cause) - 1;
1435
willy tarreau6c498972014-01-16 08:20:12 +01001436 return &pp->txqs[queue];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001437}
1438
1439/* Free tx queue skbuffs */
1440static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1441 struct mvneta_tx_queue *txq, int num)
1442{
1443 int i;
1444
1445 for (i = 0; i < num; i++) {
1446 struct mvneta_tx_desc *tx_desc = txq->descs +
1447 txq->txq_get_index;
1448 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1449
1450 mvneta_txq_inc_get(txq);
1451
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001452 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1453 dma_unmap_single(pp->dev->dev.parent,
1454 tx_desc->buf_phys_addr,
1455 tx_desc->data_size, DMA_TO_DEVICE);
Ezequiel Garciaba7e46e2014-05-30 13:40:06 -03001456 if (!skb)
1457 continue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001458 dev_kfree_skb_any(skb);
1459 }
1460}
1461
1462/* Handle end of transmission */
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001463static void mvneta_txq_done(struct mvneta_port *pp,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001464 struct mvneta_tx_queue *txq)
1465{
1466 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1467 int tx_done;
1468
1469 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001470 if (!tx_done)
1471 return;
1472
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001473 mvneta_txq_bufs_free(pp, txq, tx_done);
1474
1475 txq->count -= tx_done;
1476
1477 if (netif_tx_queue_stopped(nq)) {
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001478 if (txq->count <= txq->tx_wake_threshold)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001479 netif_tx_wake_queue(nq);
1480 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001481}
1482
willy tarreau8ec2cd42014-01-16 08:20:16 +01001483static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1484{
1485 if (likely(pp->frag_size <= PAGE_SIZE))
1486 return netdev_alloc_frag(pp->frag_size);
1487 else
1488 return kmalloc(pp->frag_size, GFP_ATOMIC);
1489}
1490
1491static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1492{
1493 if (likely(pp->frag_size <= PAGE_SIZE))
Alexander Duyck13dc0d22015-05-06 21:12:14 -07001494 skb_free_frag(data);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001495 else
1496 kfree(data);
1497}
1498
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001499/* Refill processing */
1500static int mvneta_rx_refill(struct mvneta_port *pp,
1501 struct mvneta_rx_desc *rx_desc)
1502
1503{
1504 dma_addr_t phys_addr;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001505 void *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001506
willy tarreau8ec2cd42014-01-16 08:20:16 +01001507 data = mvneta_frag_alloc(pp);
1508 if (!data)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001509 return -ENOMEM;
1510
willy tarreau8ec2cd42014-01-16 08:20:16 +01001511 phys_addr = dma_map_single(pp->dev->dev.parent, data,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001512 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1513 DMA_FROM_DEVICE);
1514 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
willy tarreau8ec2cd42014-01-16 08:20:16 +01001515 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001516 return -ENOMEM;
1517 }
1518
willy tarreau8ec2cd42014-01-16 08:20:16 +01001519 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001520 return 0;
1521}
1522
1523/* Handle tx checksum */
1524static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1525{
1526 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1527 int ip_hdr_len = 0;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001528 __be16 l3_proto = vlan_get_protocol(skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001529 u8 l4_proto;
1530
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001531 if (l3_proto == htons(ETH_P_IP)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001532 struct iphdr *ip4h = ip_hdr(skb);
1533
1534 /* Calculate IPv4 checksum and L4 checksum */
1535 ip_hdr_len = ip4h->ihl;
1536 l4_proto = ip4h->protocol;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001537 } else if (l3_proto == htons(ETH_P_IPV6)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001538 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1539
1540 /* Read l4_protocol from one of IPv6 extra headers */
1541 if (skb_network_header_len(skb) > 0)
1542 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1543 l4_proto = ip6h->nexthdr;
1544 } else
1545 return MVNETA_TX_L4_CSUM_NOT;
1546
1547 return mvneta_txq_desc_csum(skb_network_offset(skb),
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001548 l3_proto, ip_hdr_len, l4_proto);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001549 }
1550
1551 return MVNETA_TX_L4_CSUM_NOT;
1552}
1553
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001554/* Drop packets received by the RXQ and free buffers */
1555static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1556 struct mvneta_rx_queue *rxq)
1557{
1558 int rx_done, i;
1559
1560 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1561 for (i = 0; i < rxq->size; i++) {
1562 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001563 void *data = (void *)rx_desc->buf_cookie;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001564
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001565 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001566 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Justin Maggard8c94ddb2015-11-09 17:21:05 -08001567 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001568 }
1569
1570 if (rx_done)
1571 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1572}
1573
1574/* Main rx processing */
1575static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1576 struct mvneta_rx_queue *rxq)
1577{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001578 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001579 struct net_device *dev = pp->dev;
Simon Guinota84e3282015-07-19 13:00:53 +02001580 int rx_done;
willy tarreaudc4277d2014-01-16 08:20:07 +01001581 u32 rcvd_pkts = 0;
1582 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001583
1584 /* Get number of received packets */
1585 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1586
1587 if (rx_todo > rx_done)
1588 rx_todo = rx_done;
1589
1590 rx_done = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001591
1592 /* Fairness NAPI loop */
1593 while (rx_done < rx_todo) {
1594 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1595 struct sk_buff *skb;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001596 unsigned char *data;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001597 dma_addr_t phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001598 u32 rx_status;
1599 int rx_bytes, err;
1600
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001601 rx_done++;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001602 rx_status = rx_desc->status;
willy tarreauf19fadf2014-01-16 08:20:17 +01001603 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001604 data = (unsigned char *)rx_desc->buf_cookie;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001605 phys_addr = rx_desc->buf_phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001606
willy tarreau54282132014-01-16 08:20:14 +01001607 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
willy tarreauf19fadf2014-01-16 08:20:17 +01001608 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1609 err_drop_frame:
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001610 dev->stats.rx_errors++;
1611 mvneta_rx_error(pp, rx_desc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001612 /* leave the descriptor untouched */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001613 continue;
1614 }
1615
willy tarreauf19fadf2014-01-16 08:20:17 +01001616 if (rx_bytes <= rx_copybreak) {
1617 /* better copy a small frame and not unmap the DMA region */
1618 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1619 if (unlikely(!skb))
1620 goto err_drop_frame;
1621
1622 dma_sync_single_range_for_cpu(dev->dev.parent,
1623 rx_desc->buf_phys_addr,
1624 MVNETA_MH_SIZE + NET_SKB_PAD,
1625 rx_bytes,
1626 DMA_FROM_DEVICE);
1627 memcpy(skb_put(skb, rx_bytes),
1628 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1629 rx_bytes);
1630
1631 skb->protocol = eth_type_trans(skb, dev);
1632 mvneta_rx_csum(pp, rx_status, skb);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001633 napi_gro_receive(&port->napi, skb);
willy tarreauf19fadf2014-01-16 08:20:17 +01001634
1635 rcvd_pkts++;
1636 rcvd_bytes += rx_bytes;
1637
1638 /* leave the descriptor and buffer untouched */
1639 continue;
1640 }
1641
Simon Guinota84e3282015-07-19 13:00:53 +02001642 /* Refill processing */
1643 err = mvneta_rx_refill(pp, rx_desc);
1644 if (err) {
1645 netdev_err(dev, "Linux processing - Can't refill\n");
1646 rxq->missed++;
1647 goto err_drop_frame;
1648 }
1649
willy tarreauf19fadf2014-01-16 08:20:17 +01001650 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
willy tarreauf19fadf2014-01-16 08:20:17 +01001651
Marcin Wojtas26c17a172015-11-30 13:27:44 +01001652 /* After refill old buffer has to be unmapped regardless
1653 * the skb is successfully built or not.
1654 */
Simon Guinotdaf158d2015-09-15 22:41:21 +02001655 dma_unmap_single(dev->dev.parent, phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001656 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001657
Marcin Wojtas26c17a172015-11-30 13:27:44 +01001658 if (!skb)
1659 goto err_drop_frame;
1660
willy tarreaudc4277d2014-01-16 08:20:07 +01001661 rcvd_pkts++;
1662 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001663
1664 /* Linux processing */
willy tarreau8ec2cd42014-01-16 08:20:16 +01001665 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001666 skb_put(skb, rx_bytes);
1667
1668 skb->protocol = eth_type_trans(skb, dev);
1669
willy tarreau54282132014-01-16 08:20:14 +01001670 mvneta_rx_csum(pp, rx_status, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001671
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001672 napi_gro_receive(&port->napi, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001673 }
1674
willy tarreaudc4277d2014-01-16 08:20:07 +01001675 if (rcvd_pkts) {
willy tarreau74c41b02014-01-16 08:20:08 +01001676 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1677
1678 u64_stats_update_begin(&stats->syncp);
1679 stats->rx_packets += rcvd_pkts;
1680 stats->rx_bytes += rcvd_bytes;
1681 u64_stats_update_end(&stats->syncp);
willy tarreaudc4277d2014-01-16 08:20:07 +01001682 }
1683
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001684 /* Update rxq management counters */
Simon Guinota84e3282015-07-19 13:00:53 +02001685 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001686
1687 return rx_done;
1688}
1689
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001690static inline void
1691mvneta_tso_put_hdr(struct sk_buff *skb,
1692 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1693{
1694 struct mvneta_tx_desc *tx_desc;
1695 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1696
1697 txq->tx_skb[txq->txq_put_index] = NULL;
1698 tx_desc = mvneta_txq_next_desc_get(txq);
1699 tx_desc->data_size = hdr_len;
1700 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1701 tx_desc->command |= MVNETA_TXD_F_DESC;
1702 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1703 txq->txq_put_index * TSO_HEADER_SIZE;
1704 mvneta_txq_inc_put(txq);
1705}
1706
1707static inline int
1708mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1709 struct sk_buff *skb, char *data, int size,
1710 bool last_tcp, bool is_last)
1711{
1712 struct mvneta_tx_desc *tx_desc;
1713
1714 tx_desc = mvneta_txq_next_desc_get(txq);
1715 tx_desc->data_size = size;
1716 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1717 size, DMA_TO_DEVICE);
1718 if (unlikely(dma_mapping_error(dev->dev.parent,
1719 tx_desc->buf_phys_addr))) {
1720 mvneta_txq_desc_put(txq);
1721 return -ENOMEM;
1722 }
1723
1724 tx_desc->command = 0;
1725 txq->tx_skb[txq->txq_put_index] = NULL;
1726
1727 if (last_tcp) {
1728 /* last descriptor in the TCP packet */
1729 tx_desc->command = MVNETA_TXD_L_DESC;
1730
1731 /* last descriptor in SKB */
1732 if (is_last)
1733 txq->tx_skb[txq->txq_put_index] = skb;
1734 }
1735 mvneta_txq_inc_put(txq);
1736 return 0;
1737}
1738
1739static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1740 struct mvneta_tx_queue *txq)
1741{
1742 int total_len, data_left;
1743 int desc_count = 0;
1744 struct mvneta_port *pp = netdev_priv(dev);
1745 struct tso_t tso;
1746 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1747 int i;
1748
1749 /* Count needed descriptors */
1750 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1751 return 0;
1752
1753 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1754 pr_info("*** Is this even possible???!?!?\n");
1755 return 0;
1756 }
1757
1758 /* Initialize the TSO handler, and prepare the first payload */
1759 tso_start(skb, &tso);
1760
1761 total_len = skb->len - hdr_len;
1762 while (total_len > 0) {
1763 char *hdr;
1764
1765 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1766 total_len -= data_left;
1767 desc_count++;
1768
1769 /* prepare packet headers: MAC + IP + TCP */
1770 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1771 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1772
1773 mvneta_tso_put_hdr(skb, pp, txq);
1774
1775 while (data_left > 0) {
1776 int size;
1777 desc_count++;
1778
1779 size = min_t(int, tso.size, data_left);
1780
1781 if (mvneta_tso_put_data(dev, txq, skb,
1782 tso.data, size,
1783 size == data_left,
1784 total_len == 0))
1785 goto err_release;
1786 data_left -= size;
1787
1788 tso_build_data(skb, &tso, size);
1789 }
1790 }
1791
1792 return desc_count;
1793
1794err_release:
1795 /* Release all used data descriptors; header descriptors must not
1796 * be DMA-unmapped.
1797 */
1798 for (i = desc_count - 1; i >= 0; i--) {
1799 struct mvneta_tx_desc *tx_desc = txq->descs + i;
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001800 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001801 dma_unmap_single(pp->dev->dev.parent,
1802 tx_desc->buf_phys_addr,
1803 tx_desc->data_size,
1804 DMA_TO_DEVICE);
1805 mvneta_txq_desc_put(txq);
1806 }
1807 return 0;
1808}
1809
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001810/* Handle tx fragmentation processing */
1811static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1812 struct mvneta_tx_queue *txq)
1813{
1814 struct mvneta_tx_desc *tx_desc;
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001815 int i, nr_frags = skb_shinfo(skb)->nr_frags;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001816
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001817 for (i = 0; i < nr_frags; i++) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001818 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1819 void *addr = page_address(frag->page.p) + frag->page_offset;
1820
1821 tx_desc = mvneta_txq_next_desc_get(txq);
1822 tx_desc->data_size = frag->size;
1823
1824 tx_desc->buf_phys_addr =
1825 dma_map_single(pp->dev->dev.parent, addr,
1826 tx_desc->data_size, DMA_TO_DEVICE);
1827
1828 if (dma_mapping_error(pp->dev->dev.parent,
1829 tx_desc->buf_phys_addr)) {
1830 mvneta_txq_desc_put(txq);
1831 goto error;
1832 }
1833
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001834 if (i == nr_frags - 1) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001835 /* Last descriptor */
1836 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001837 txq->tx_skb[txq->txq_put_index] = skb;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001838 } else {
1839 /* Descriptor in the middle: Not First, Not Last */
1840 tx_desc->command = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001841 txq->tx_skb[txq->txq_put_index] = NULL;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001842 }
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001843 mvneta_txq_inc_put(txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001844 }
1845
1846 return 0;
1847
1848error:
1849 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001850 * this packet, as well as the corresponding DMA mappings
1851 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001852 for (i = i - 1; i >= 0; i--) {
1853 tx_desc = txq->descs + i;
1854 dma_unmap_single(pp->dev->dev.parent,
1855 tx_desc->buf_phys_addr,
1856 tx_desc->data_size,
1857 DMA_TO_DEVICE);
1858 mvneta_txq_desc_put(txq);
1859 }
1860
1861 return -ENOMEM;
1862}
1863
1864/* Main tx processing */
1865static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1866{
1867 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02001868 u16 txq_id = skb_get_queue_mapping(skb);
1869 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001870 struct mvneta_tx_desc *tx_desc;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001871 int len = skb->len;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001872 int frags = 0;
1873 u32 tx_cmd;
1874
1875 if (!netif_running(dev))
1876 goto out;
1877
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001878 if (skb_is_gso(skb)) {
1879 frags = mvneta_tx_tso(skb, dev, txq);
1880 goto out;
1881 }
1882
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001883 frags = skb_shinfo(skb)->nr_frags + 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001884
1885 /* Get a descriptor for the first part of the packet */
1886 tx_desc = mvneta_txq_next_desc_get(txq);
1887
1888 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1889
1890 tx_desc->data_size = skb_headlen(skb);
1891
1892 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1893 tx_desc->data_size,
1894 DMA_TO_DEVICE);
1895 if (unlikely(dma_mapping_error(dev->dev.parent,
1896 tx_desc->buf_phys_addr))) {
1897 mvneta_txq_desc_put(txq);
1898 frags = 0;
1899 goto out;
1900 }
1901
1902 if (frags == 1) {
1903 /* First and Last descriptor */
1904 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1905 tx_desc->command = tx_cmd;
1906 txq->tx_skb[txq->txq_put_index] = skb;
1907 mvneta_txq_inc_put(txq);
1908 } else {
1909 /* First but not Last */
1910 tx_cmd |= MVNETA_TXD_F_DESC;
1911 txq->tx_skb[txq->txq_put_index] = NULL;
1912 mvneta_txq_inc_put(txq);
1913 tx_desc->command = tx_cmd;
1914 /* Continue with other skb fragments */
1915 if (mvneta_tx_frag_process(pp, skb, txq)) {
1916 dma_unmap_single(dev->dev.parent,
1917 tx_desc->buf_phys_addr,
1918 tx_desc->data_size,
1919 DMA_TO_DEVICE);
1920 mvneta_txq_desc_put(txq);
1921 frags = 0;
1922 goto out;
1923 }
1924 }
1925
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001926out:
1927 if (frags > 0) {
willy tarreau74c41b02014-01-16 08:20:08 +01001928 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001929 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1930
1931 txq->count += frags;
1932 mvneta_txq_pend_desc_add(pp, txq, frags);
1933
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001934 if (txq->count >= txq->tx_stop_threshold)
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001935 netif_tx_stop_queue(nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001936
willy tarreau74c41b02014-01-16 08:20:08 +01001937 u64_stats_update_begin(&stats->syncp);
1938 stats->tx_packets++;
Eric Dumazet5f478b42014-12-02 04:30:59 -08001939 stats->tx_bytes += len;
willy tarreau74c41b02014-01-16 08:20:08 +01001940 u64_stats_update_end(&stats->syncp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001941 } else {
1942 dev->stats.tx_dropped++;
1943 dev_kfree_skb_any(skb);
1944 }
1945
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001946 return NETDEV_TX_OK;
1947}
1948
1949
1950/* Free tx resources, when resetting a port */
1951static void mvneta_txq_done_force(struct mvneta_port *pp,
1952 struct mvneta_tx_queue *txq)
1953
1954{
1955 int tx_done = txq->count;
1956
1957 mvneta_txq_bufs_free(pp, txq, tx_done);
1958
1959 /* reset txq */
1960 txq->count = 0;
1961 txq->txq_put_index = 0;
1962 txq->txq_get_index = 0;
1963}
1964
willy tarreau6c498972014-01-16 08:20:12 +01001965/* Handle tx done - called in softirq context. The <cause_tx_done> argument
1966 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1967 */
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001968static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001969{
1970 struct mvneta_tx_queue *txq;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001971 struct netdev_queue *nq;
1972
willy tarreau6c498972014-01-16 08:20:12 +01001973 while (cause_tx_done) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001974 txq = mvneta_tx_done_policy(pp, cause_tx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001975
1976 nq = netdev_get_tx_queue(pp->dev, txq->id);
1977 __netif_tx_lock(nq, smp_processor_id());
1978
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001979 if (txq->count)
1980 mvneta_txq_done(pp, txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001981
1982 __netif_tx_unlock(nq);
1983 cause_tx_done &= ~((1 << txq->id));
1984 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001985}
1986
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001987/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001988 * according to hw spec, different than generic crc8 algorithm
1989 */
1990static int mvneta_addr_crc(unsigned char *addr)
1991{
1992 int crc = 0;
1993 int i;
1994
1995 for (i = 0; i < ETH_ALEN; i++) {
1996 int j;
1997
1998 crc = (crc ^ addr[i]) << 8;
1999 for (j = 7; j >= 0; j--) {
2000 if (crc & (0x100 << j))
2001 crc ^= 0x107 << j;
2002 }
2003 }
2004
2005 return crc;
2006}
2007
2008/* This method controls the net device special MAC multicast support.
2009 * The Special Multicast Table for MAC addresses supports MAC of the form
2010 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2011 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2012 * Table entries in the DA-Filter table. This method set the Special
2013 * Multicast Table appropriate entry.
2014 */
2015static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2016 unsigned char last_byte,
2017 int queue)
2018{
2019 unsigned int smc_table_reg;
2020 unsigned int tbl_offset;
2021 unsigned int reg_offset;
2022
2023 /* Register offset from SMC table base */
2024 tbl_offset = (last_byte / 4);
2025 /* Entry offset within the above reg */
2026 reg_offset = last_byte % 4;
2027
2028 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2029 + tbl_offset * 4));
2030
2031 if (queue == -1)
2032 smc_table_reg &= ~(0xff << (8 * reg_offset));
2033 else {
2034 smc_table_reg &= ~(0xff << (8 * reg_offset));
2035 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2036 }
2037
2038 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2039 smc_table_reg);
2040}
2041
2042/* This method controls the network device Other MAC multicast support.
2043 * The Other Multicast Table is used for multicast of another type.
2044 * A CRC-8 is used as an index to the Other Multicast Table entries
2045 * in the DA-Filter table.
2046 * The method gets the CRC-8 value from the calling routine and
2047 * sets the Other Multicast Table appropriate entry according to the
2048 * specified CRC-8 .
2049 */
2050static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2051 unsigned char crc8,
2052 int queue)
2053{
2054 unsigned int omc_table_reg;
2055 unsigned int tbl_offset;
2056 unsigned int reg_offset;
2057
2058 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2059 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2060
2061 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2062
2063 if (queue == -1) {
2064 /* Clear accepts frame bit at specified Other DA table entry */
2065 omc_table_reg &= ~(0xff << (8 * reg_offset));
2066 } else {
2067 omc_table_reg &= ~(0xff << (8 * reg_offset));
2068 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2069 }
2070
2071 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2072}
2073
2074/* The network device supports multicast using two tables:
2075 * 1) Special Multicast Table for MAC addresses of the form
2076 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2077 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2078 * Table entries in the DA-Filter table.
2079 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2080 * is used as an index to the Other Multicast Table entries in the
2081 * DA-Filter table.
2082 */
2083static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2084 int queue)
2085{
2086 unsigned char crc_result = 0;
2087
2088 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2089 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2090 return 0;
2091 }
2092
2093 crc_result = mvneta_addr_crc(p_addr);
2094 if (queue == -1) {
2095 if (pp->mcast_count[crc_result] == 0) {
2096 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2097 crc_result);
2098 return -EINVAL;
2099 }
2100
2101 pp->mcast_count[crc_result]--;
2102 if (pp->mcast_count[crc_result] != 0) {
2103 netdev_info(pp->dev,
2104 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2105 pp->mcast_count[crc_result], crc_result);
2106 return -EINVAL;
2107 }
2108 } else
2109 pp->mcast_count[crc_result]++;
2110
2111 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2112
2113 return 0;
2114}
2115
2116/* Configure Fitering mode of Ethernet port */
2117static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2118 int is_promisc)
2119{
2120 u32 port_cfg_reg, val;
2121
2122 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2123
2124 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2125
2126 /* Set / Clear UPM bit in port configuration register */
2127 if (is_promisc) {
2128 /* Accept all Unicast addresses */
2129 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2130 val |= MVNETA_FORCE_UNI;
2131 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2132 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2133 } else {
2134 /* Reject all Unicast addresses */
2135 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2136 val &= ~MVNETA_FORCE_UNI;
2137 }
2138
2139 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2140 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2141}
2142
2143/* register unicast and multicast addresses */
2144static void mvneta_set_rx_mode(struct net_device *dev)
2145{
2146 struct mvneta_port *pp = netdev_priv(dev);
2147 struct netdev_hw_addr *ha;
2148
2149 if (dev->flags & IFF_PROMISC) {
2150 /* Accept all: Multicast + Unicast */
2151 mvneta_rx_unicast_promisc_set(pp, 1);
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002152 mvneta_set_ucast_table(pp, pp->rxq_def);
2153 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2154 mvneta_set_other_mcast_table(pp, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002155 } else {
2156 /* Accept single Unicast */
2157 mvneta_rx_unicast_promisc_set(pp, 0);
2158 mvneta_set_ucast_table(pp, -1);
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002159 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002160
2161 if (dev->flags & IFF_ALLMULTI) {
2162 /* Accept all multicast */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002163 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2164 mvneta_set_other_mcast_table(pp, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002165 } else {
2166 /* Accept only initialized multicast */
2167 mvneta_set_special_mcast_table(pp, -1);
2168 mvneta_set_other_mcast_table(pp, -1);
2169
2170 if (!netdev_mc_empty(dev)) {
2171 netdev_for_each_mc_addr(ha, dev) {
2172 mvneta_mcast_addr_set(pp, ha->addr,
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002173 pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002174 }
2175 }
2176 }
2177 }
2178}
2179
2180/* Interrupt handling - the callback for request_irq() */
2181static irqreturn_t mvneta_isr(int irq, void *dev_id)
2182{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002183 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002184
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002185 disable_percpu_irq(port->pp->dev->irq);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002186 napi_schedule(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002187
2188 return IRQ_HANDLED;
2189}
2190
Stas Sergeev898b2972015-04-01 20:32:49 +03002191static int mvneta_fixed_link_update(struct mvneta_port *pp,
2192 struct phy_device *phy)
2193{
2194 struct fixed_phy_status status;
2195 struct fixed_phy_status changed = {};
2196 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2197
2198 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2199 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2200 status.speed = SPEED_1000;
2201 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2202 status.speed = SPEED_100;
2203 else
2204 status.speed = SPEED_10;
2205 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2206 changed.link = 1;
2207 changed.speed = 1;
2208 changed.duplex = 1;
2209 fixed_phy_update_state(phy, &status, &changed);
2210 return 0;
2211}
2212
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002213/* NAPI handler
2214 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2215 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2216 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2217 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2218 * Each CPU has its own causeRxTx register
2219 */
2220static int mvneta_poll(struct napi_struct *napi, int budget)
2221{
2222 int rx_done = 0;
2223 u32 cause_rx_tx;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002224 int rx_queue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002225 struct mvneta_port *pp = netdev_priv(napi->dev);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002226 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002227
2228 if (!netif_running(pp->dev)) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002229 napi_complete(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002230 return rx_done;
2231 }
2232
2233 /* Read cause register */
Stas Sergeev898b2972015-04-01 20:32:49 +03002234 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2235 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2236 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2237
2238 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2239 if (pp->use_inband_status && (cause_misc &
2240 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2241 MVNETA_CAUSE_LINK_CHANGE |
2242 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
2243 mvneta_fixed_link_update(pp, pp->phy_dev);
2244 }
2245 }
willy tarreau71f6d1b2014-01-16 08:20:11 +01002246
2247 /* Release Tx descriptors */
2248 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002249 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
willy tarreau71f6d1b2014-01-16 08:20:11 +01002250 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2251 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002252
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002253 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002254 * RX packets
2255 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002256 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2257
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002258 cause_rx_tx |= port->cause_rx_tx;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002259
2260 if (rx_queue) {
2261 rx_queue = rx_queue - 1;
2262 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
2263 }
2264
Maxime Ripardd8936652015-09-25 18:09:37 +02002265 budget -= rx_done;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002266
2267 if (budget > 0) {
2268 cause_rx_tx = 0;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002269 napi_complete(&port->napi);
2270 enable_percpu_irq(pp->dev->irq, 0);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002271 }
2272
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002273 port->cause_rx_tx = cause_rx_tx;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002274 return rx_done;
2275}
2276
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002277/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2278static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2279 int num)
2280{
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002281 int i;
2282
2283 for (i = 0; i < num; i++) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002284 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2285 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2286 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002287 __func__, rxq->id, i, num);
2288 break;
2289 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002290 }
2291
2292 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002293 * get packets)
2294 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002295 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2296
2297 return i;
2298}
2299
2300/* Free all packets pending transmit from all TXQs and reset TX port */
2301static void mvneta_tx_reset(struct mvneta_port *pp)
2302{
2303 int queue;
2304
Ezequiel Garcia96728502014-05-22 20:06:59 -03002305 /* free the skb's in the tx ring */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002306 for (queue = 0; queue < txq_number; queue++)
2307 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2308
2309 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2310 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2311}
2312
2313static void mvneta_rx_reset(struct mvneta_port *pp)
2314{
2315 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2316 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2317}
2318
2319/* Rx/Tx queue initialization/cleanup methods */
2320
2321/* Create a specified RX queue */
2322static int mvneta_rxq_init(struct mvneta_port *pp,
2323 struct mvneta_rx_queue *rxq)
2324
2325{
2326 rxq->size = pp->rx_ring_size;
2327
2328 /* Allocate memory for RX descriptors */
2329 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2330 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2331 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002332 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002333 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002334
2335 BUG_ON(rxq->descs !=
2336 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2337
2338 rxq->last_desc = rxq->size - 1;
2339
2340 /* Set Rx descriptors queue starting address */
2341 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2342 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2343
2344 /* Set Offset */
2345 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2346
2347 /* Set coalescing pkts and time */
2348 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2349 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2350
2351 /* Fill RXQ with buffers from RX pool */
2352 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2353 mvneta_rxq_bm_disable(pp, rxq);
2354 mvneta_rxq_fill(pp, rxq, rxq->size);
2355
2356 return 0;
2357}
2358
2359/* Cleanup Rx queue */
2360static void mvneta_rxq_deinit(struct mvneta_port *pp,
2361 struct mvneta_rx_queue *rxq)
2362{
2363 mvneta_rxq_drop_pkts(pp, rxq);
2364
2365 if (rxq->descs)
2366 dma_free_coherent(pp->dev->dev.parent,
2367 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2368 rxq->descs,
2369 rxq->descs_phys);
2370
2371 rxq->descs = NULL;
2372 rxq->last_desc = 0;
2373 rxq->next_desc_to_proc = 0;
2374 rxq->descs_phys = 0;
2375}
2376
2377/* Create and initialize a tx queue */
2378static int mvneta_txq_init(struct mvneta_port *pp,
2379 struct mvneta_tx_queue *txq)
2380{
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002381 int cpu;
2382
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002383 txq->size = pp->tx_ring_size;
2384
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002385 /* A queue must always have room for at least one skb.
2386 * Therefore, stop the queue when the free entries reaches
2387 * the maximum number of descriptors per skb.
2388 */
2389 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2390 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2391
2392
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002393 /* Allocate memory for TX descriptors */
2394 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2395 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2396 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002397 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002398 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002399
2400 /* Make sure descriptor address is cache line size aligned */
2401 BUG_ON(txq->descs !=
2402 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2403
2404 txq->last_desc = txq->size - 1;
2405
2406 /* Set maximum bandwidth for enabled TXQs */
2407 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2408 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2409
2410 /* Set Tx descriptors queue starting address */
2411 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2412 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2413
2414 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2415 if (txq->tx_skb == NULL) {
2416 dma_free_coherent(pp->dev->dev.parent,
2417 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2418 txq->descs, txq->descs_phys);
2419 return -ENOMEM;
2420 }
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002421
2422 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2423 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2424 txq->size * TSO_HEADER_SIZE,
2425 &txq->tso_hdrs_phys, GFP_KERNEL);
2426 if (txq->tso_hdrs == NULL) {
2427 kfree(txq->tx_skb);
2428 dma_free_coherent(pp->dev->dev.parent,
2429 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2430 txq->descs, txq->descs_phys);
2431 return -ENOMEM;
2432 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002433 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2434
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002435 /* Setup XPS mapping */
2436 if (txq_number > 1)
2437 cpu = txq->id % num_present_cpus();
2438 else
2439 cpu = pp->rxq_def % num_present_cpus();
2440 cpumask_set_cpu(cpu, &txq->affinity_mask);
2441 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2442
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002443 return 0;
2444}
2445
2446/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2447static void mvneta_txq_deinit(struct mvneta_port *pp,
2448 struct mvneta_tx_queue *txq)
2449{
2450 kfree(txq->tx_skb);
2451
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002452 if (txq->tso_hdrs)
2453 dma_free_coherent(pp->dev->dev.parent,
2454 txq->size * TSO_HEADER_SIZE,
2455 txq->tso_hdrs, txq->tso_hdrs_phys);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002456 if (txq->descs)
2457 dma_free_coherent(pp->dev->dev.parent,
2458 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2459 txq->descs, txq->descs_phys);
2460
2461 txq->descs = NULL;
2462 txq->last_desc = 0;
2463 txq->next_desc_to_proc = 0;
2464 txq->descs_phys = 0;
2465
2466 /* Set minimum bandwidth for disabled TXQs */
2467 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2468 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2469
2470 /* Set Tx descriptors queue starting address and size */
2471 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2472 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2473}
2474
2475/* Cleanup all Tx queues */
2476static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2477{
2478 int queue;
2479
2480 for (queue = 0; queue < txq_number; queue++)
2481 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2482}
2483
2484/* Cleanup all Rx queues */
2485static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2486{
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002487 int queue;
2488
2489 for (queue = 0; queue < txq_number; queue++)
2490 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002491}
2492
2493
2494/* Init all Rx queues */
2495static int mvneta_setup_rxqs(struct mvneta_port *pp)
2496{
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002497 int queue;
2498
2499 for (queue = 0; queue < rxq_number; queue++) {
2500 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2501
2502 if (err) {
2503 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2504 __func__, queue);
2505 mvneta_cleanup_rxqs(pp);
2506 return err;
2507 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002508 }
2509
2510 return 0;
2511}
2512
2513/* Init all tx queues */
2514static int mvneta_setup_txqs(struct mvneta_port *pp)
2515{
2516 int queue;
2517
2518 for (queue = 0; queue < txq_number; queue++) {
2519 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2520 if (err) {
2521 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2522 __func__, queue);
2523 mvneta_cleanup_txqs(pp);
2524 return err;
2525 }
2526 }
2527
2528 return 0;
2529}
2530
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002531static void mvneta_percpu_unmask_interrupt(void *arg)
2532{
2533 struct mvneta_port *pp = arg;
2534
2535 /* All the queue are unmasked, but actually only the ones
2536 * maped to this CPU will be unmasked
2537 */
2538 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2539 MVNETA_RX_INTR_MASK_ALL |
2540 MVNETA_TX_INTR_MASK_ALL |
2541 MVNETA_MISCINTR_INTR_MASK);
2542}
2543
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01002544static void mvneta_percpu_mask_interrupt(void *arg)
2545{
2546 struct mvneta_port *pp = arg;
2547
2548 /* All the queue are masked, but actually only the ones
2549 * maped to this CPU will be masked
2550 */
2551 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2552 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2553 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2554}
2555
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002556static void mvneta_start_dev(struct mvneta_port *pp)
2557{
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01002558 int cpu;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002559
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002560 mvneta_max_rx_size_set(pp, pp->pkt_size);
2561 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2562
2563 /* start the Rx/Tx activity */
2564 mvneta_port_enable(pp);
2565
2566 /* Enable polling on the port */
Gregory CLEMENT129219e2016-02-04 22:09:23 +01002567 for_each_online_cpu(cpu) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002568 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2569
2570 napi_enable(&port->napi);
2571 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002572
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002573 /* Unmask interrupts. It has to be done from each CPU */
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01002574 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2575
Stas Sergeev898b2972015-04-01 20:32:49 +03002576 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2577 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2578 MVNETA_CAUSE_LINK_CHANGE |
2579 MVNETA_CAUSE_PSC_SYNC_CHANGE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002580
2581 phy_start(pp->phy_dev);
2582 netif_tx_start_all_queues(pp->dev);
2583}
2584
2585static void mvneta_stop_dev(struct mvneta_port *pp)
2586{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002587 unsigned int cpu;
2588
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002589 phy_stop(pp->phy_dev);
2590
Gregory CLEMENT129219e2016-02-04 22:09:23 +01002591 for_each_online_cpu(cpu) {
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002592 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2593
2594 napi_disable(&port->napi);
2595 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002596
2597 netif_carrier_off(pp->dev);
2598
2599 mvneta_port_down(pp);
2600 netif_tx_stop_all_queues(pp->dev);
2601
2602 /* Stop the port activity */
2603 mvneta_port_disable(pp);
2604
2605 /* Clear all ethernet port interrupts */
2606 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2607 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2608
2609 /* Mask all ethernet port interrupts */
2610 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2611 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2612 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2613
2614 mvneta_tx_reset(pp);
2615 mvneta_rx_reset(pp);
2616}
2617
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002618/* Return positive if MTU is valid */
2619static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2620{
2621 if (mtu < 68) {
2622 netdev_err(dev, "cannot change mtu to less than 68\n");
2623 return -EINVAL;
2624 }
2625
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002626 /* 9676 == 9700 - 20 and rounding to 8 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002627 if (mtu > 9676) {
2628 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2629 mtu = 9676;
2630 }
2631
2632 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2633 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2634 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2635 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2636 }
2637
2638 return mtu;
2639}
2640
2641/* Change the device mtu */
2642static int mvneta_change_mtu(struct net_device *dev, int mtu)
2643{
2644 struct mvneta_port *pp = netdev_priv(dev);
2645 int ret;
2646
2647 mtu = mvneta_check_mtu_valid(dev, mtu);
2648 if (mtu < 0)
2649 return -EINVAL;
2650
2651 dev->mtu = mtu;
2652
Simon Guinotb65657f2015-06-30 16:20:22 +02002653 if (!netif_running(dev)) {
2654 netdev_update_features(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002655 return 0;
Simon Guinotb65657f2015-06-30 16:20:22 +02002656 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002657
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002658 /* The interface is running, so we have to force a
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002659 * reallocation of the queues
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002660 */
2661 mvneta_stop_dev(pp);
2662
2663 mvneta_cleanup_txqs(pp);
2664 mvneta_cleanup_rxqs(pp);
2665
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002666 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002667 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2668 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002669
2670 ret = mvneta_setup_rxqs(pp);
2671 if (ret) {
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002672 netdev_err(dev, "unable to setup rxqs after MTU change\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002673 return ret;
2674 }
2675
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002676 ret = mvneta_setup_txqs(pp);
2677 if (ret) {
2678 netdev_err(dev, "unable to setup txqs after MTU change\n");
2679 return ret;
2680 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002681
2682 mvneta_start_dev(pp);
2683 mvneta_port_up(pp);
2684
Simon Guinotb65657f2015-06-30 16:20:22 +02002685 netdev_update_features(dev);
2686
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002687 return 0;
2688}
2689
Simon Guinotb65657f2015-06-30 16:20:22 +02002690static netdev_features_t mvneta_fix_features(struct net_device *dev,
2691 netdev_features_t features)
2692{
2693 struct mvneta_port *pp = netdev_priv(dev);
2694
2695 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2696 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2697 netdev_info(dev,
2698 "Disable IP checksum for MTU greater than %dB\n",
2699 pp->tx_csum_limit);
2700 }
2701
2702 return features;
2703}
2704
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002705/* Get mac address */
2706static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2707{
2708 u32 mac_addr_l, mac_addr_h;
2709
2710 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2711 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2712 addr[0] = (mac_addr_h >> 24) & 0xFF;
2713 addr[1] = (mac_addr_h >> 16) & 0xFF;
2714 addr[2] = (mac_addr_h >> 8) & 0xFF;
2715 addr[3] = mac_addr_h & 0xFF;
2716 addr[4] = (mac_addr_l >> 8) & 0xFF;
2717 addr[5] = mac_addr_l & 0xFF;
2718}
2719
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002720/* Handle setting mac address */
2721static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2722{
2723 struct mvneta_port *pp = netdev_priv(dev);
Ezequiel Garciae68de362014-05-22 20:07:00 -03002724 struct sockaddr *sockaddr = addr;
2725 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002726
Ezequiel Garciae68de362014-05-22 20:07:00 -03002727 ret = eth_prepare_mac_addr_change(dev, addr);
2728 if (ret < 0)
2729 return ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002730 /* Remove previous address table entry */
2731 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2732
2733 /* Set new addr in hw */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002734 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002735
Ezequiel Garciae68de362014-05-22 20:07:00 -03002736 eth_commit_mac_addr_change(dev, addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002737 return 0;
2738}
2739
2740static void mvneta_adjust_link(struct net_device *ndev)
2741{
2742 struct mvneta_port *pp = netdev_priv(ndev);
2743 struct phy_device *phydev = pp->phy_dev;
2744 int status_change = 0;
2745
2746 if (phydev->link) {
2747 if ((pp->speed != phydev->speed) ||
2748 (pp->duplex != phydev->duplex)) {
2749 u32 val;
2750
2751 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2752 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2753 MVNETA_GMAC_CONFIG_GMII_SPEED |
Stas Sergeev898b2972015-04-01 20:32:49 +03002754 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002755
2756 if (phydev->duplex)
2757 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2758
2759 if (phydev->speed == SPEED_1000)
2760 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni4d12bc62014-07-08 10:49:43 +02002761 else if (phydev->speed == SPEED_100)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002762 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2763
2764 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2765
2766 pp->duplex = phydev->duplex;
2767 pp->speed = phydev->speed;
2768 }
2769 }
2770
2771 if (phydev->link != pp->link) {
2772 if (!phydev->link) {
2773 pp->duplex = -1;
2774 pp->speed = 0;
2775 }
2776
2777 pp->link = phydev->link;
2778 status_change = 1;
2779 }
2780
2781 if (status_change) {
2782 if (phydev->link) {
Stas Sergeev898b2972015-04-01 20:32:49 +03002783 if (!pp->use_inband_status) {
2784 u32 val = mvreg_read(pp,
2785 MVNETA_GMAC_AUTONEG_CONFIG);
2786 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
2787 val |= MVNETA_GMAC_FORCE_LINK_PASS;
2788 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2789 val);
2790 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002791 mvneta_port_up(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002792 } else {
Stas Sergeev898b2972015-04-01 20:32:49 +03002793 if (!pp->use_inband_status) {
2794 u32 val = mvreg_read(pp,
2795 MVNETA_GMAC_AUTONEG_CONFIG);
2796 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
2797 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
2798 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
2799 val);
2800 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002801 mvneta_port_down(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002802 }
Ezequiel Garcia0089b742014-10-31 12:57:20 -03002803 phy_print_status(phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002804 }
2805}
2806
2807static int mvneta_mdio_probe(struct mvneta_port *pp)
2808{
2809 struct phy_device *phy_dev;
2810
2811 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2812 pp->phy_interface);
2813 if (!phy_dev) {
2814 netdev_err(pp->dev, "could not find the PHY\n");
2815 return -ENODEV;
2816 }
2817
2818 phy_dev->supported &= PHY_GBIT_FEATURES;
2819 phy_dev->advertising = phy_dev->supported;
2820
2821 pp->phy_dev = phy_dev;
2822 pp->link = 0;
2823 pp->duplex = 0;
2824 pp->speed = 0;
2825
2826 return 0;
2827}
2828
2829static void mvneta_mdio_remove(struct mvneta_port *pp)
2830{
2831 phy_disconnect(pp->phy_dev);
2832 pp->phy_dev = NULL;
2833}
2834
Maxime Ripardf8642882015-09-25 18:09:38 +02002835static void mvneta_percpu_enable(void *arg)
2836{
2837 struct mvneta_port *pp = arg;
2838
2839 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
2840}
2841
2842static void mvneta_percpu_disable(void *arg)
2843{
2844 struct mvneta_port *pp = arg;
2845
2846 disable_percpu_irq(pp->dev->irq);
2847}
2848
2849static void mvneta_percpu_elect(struct mvneta_port *pp)
2850{
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002851 int elected_cpu = 0, max_cpu, cpu, i = 0;
Maxime Ripardf8642882015-09-25 18:09:38 +02002852
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002853 /* Use the cpu associated to the rxq when it is online, in all
2854 * the other cases, use the cpu 0 which can't be offline.
2855 */
2856 if (cpu_online(pp->rxq_def))
2857 elected_cpu = pp->rxq_def;
2858
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002859 max_cpu = num_present_cpus();
Maxime Ripardf8642882015-09-25 18:09:38 +02002860
2861 for_each_online_cpu(cpu) {
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002862 int rxq_map = 0, txq_map = 0;
2863 int rxq;
2864
2865 for (rxq = 0; rxq < rxq_number; rxq++)
2866 if ((rxq % max_cpu) == cpu)
2867 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2868
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002869 if (cpu == elected_cpu)
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002870 /* Map the default receive queue queue to the
2871 * elected CPU
Maxime Ripardf8642882015-09-25 18:09:38 +02002872 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002873 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002874
2875 /* We update the TX queue map only if we have one
2876 * queue. In this case we associate the TX queue to
2877 * the CPU bound to the default RX queue
2878 */
2879 if (txq_number == 1)
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01002880 txq_map = (cpu == elected_cpu) ?
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002881 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2882 else
2883 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
2884 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
2885
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002886 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
2887
2888 /* Update the interrupt mask on each CPU according the
2889 * new mapping
2890 */
2891 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
2892 pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02002893 i++;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002894
Maxime Ripardf8642882015-09-25 18:09:38 +02002895 }
2896};
2897
2898static int mvneta_percpu_notifier(struct notifier_block *nfb,
2899 unsigned long action, void *hcpu)
2900{
2901 struct mvneta_port *pp = container_of(nfb, struct mvneta_port,
2902 cpu_notifier);
2903 int cpu = (unsigned long)hcpu, other_cpu;
2904 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2905
2906 switch (action) {
2907 case CPU_ONLINE:
2908 case CPU_ONLINE_FROZEN:
2909 netif_tx_stop_all_queues(pp->dev);
2910
2911 /* We have to synchronise on tha napi of each CPU
2912 * except the one just being waked up
2913 */
2914 for_each_online_cpu(other_cpu) {
2915 if (other_cpu != cpu) {
2916 struct mvneta_pcpu_port *other_port =
2917 per_cpu_ptr(pp->ports, other_cpu);
2918
2919 napi_synchronize(&other_port->napi);
2920 }
2921 }
2922
2923 /* Mask all ethernet port interrupts */
2924 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2925 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2926 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2927 napi_enable(&port->napi);
2928
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002929
2930 /* Enable per-CPU interrupts on the CPU that is
2931 * brought up.
2932 */
2933 smp_call_function_single(cpu, mvneta_percpu_enable,
2934 pp, true);
2935
Maxime Ripardf8642882015-09-25 18:09:38 +02002936 /* Enable per-CPU interrupt on the one CPU we care
2937 * about.
2938 */
2939 mvneta_percpu_elect(pp);
2940
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002941 /* Unmask all ethernet port interrupts, as this
2942 * notifier is called for each CPU then the CPU to
2943 * Queue mapping is applied
2944 */
Maxime Ripardf8642882015-09-25 18:09:38 +02002945 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2946 MVNETA_RX_INTR_MASK(rxq_number) |
2947 MVNETA_TX_INTR_MASK(txq_number) |
2948 MVNETA_MISCINTR_INTR_MASK);
2949 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2950 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2951 MVNETA_CAUSE_LINK_CHANGE |
2952 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2953 netif_tx_start_all_queues(pp->dev);
2954 break;
2955 case CPU_DOWN_PREPARE:
2956 case CPU_DOWN_PREPARE_FROZEN:
2957 netif_tx_stop_all_queues(pp->dev);
2958 /* Mask all ethernet port interrupts */
2959 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2960 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2961 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2962
2963 napi_synchronize(&port->napi);
2964 napi_disable(&port->napi);
2965 /* Disable per-CPU interrupts on the CPU that is
2966 * brought down.
2967 */
2968 smp_call_function_single(cpu, mvneta_percpu_disable,
2969 pp, true);
2970
2971 break;
2972 case CPU_DEAD:
2973 case CPU_DEAD_FROZEN:
2974 /* Check if a new CPU must be elected now this on is down */
2975 mvneta_percpu_elect(pp);
2976 /* Unmask all ethernet port interrupts */
2977 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2978 MVNETA_RX_INTR_MASK(rxq_number) |
2979 MVNETA_TX_INTR_MASK(txq_number) |
2980 MVNETA_MISCINTR_INTR_MASK);
2981 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2982 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2983 MVNETA_CAUSE_LINK_CHANGE |
2984 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2985 netif_tx_start_all_queues(pp->dev);
2986 break;
2987 }
2988
2989 return NOTIFY_OK;
2990}
2991
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002992static int mvneta_open(struct net_device *dev)
2993{
2994 struct mvneta_port *pp = netdev_priv(dev);
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01002995 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002996
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002997 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002998 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2999 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003000
3001 ret = mvneta_setup_rxqs(pp);
3002 if (ret)
3003 return ret;
3004
3005 ret = mvneta_setup_txqs(pp);
3006 if (ret)
3007 goto err_cleanup_rxqs;
3008
3009 /* Connect to port interrupt line */
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003010 ret = request_percpu_irq(pp->dev->irq, mvneta_isr,
3011 MVNETA_DRIVER_NAME, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003012 if (ret) {
3013 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3014 goto err_cleanup_txqs;
3015 }
3016
Maxime Ripardf8642882015-09-25 18:09:38 +02003017 /* Even though the documentation says that request_percpu_irq
3018 * doesn't enable the interrupts automatically, it actually
3019 * does so on the local CPU.
3020 *
3021 * Make sure it's disabled.
3022 */
3023 mvneta_percpu_disable(pp);
3024
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003025 /* Enable per-CPU interrupt on all the CPU to handle our RX
3026 * queue interrupts
3027 */
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003028 on_each_cpu(mvneta_percpu_enable, pp, true);
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003029
Maxime Ripardf8642882015-09-25 18:09:38 +02003030
3031 /* Register a CPU notifier to handle the case where our CPU
3032 * might be taken offline.
3033 */
3034 register_cpu_notifier(&pp->cpu_notifier);
3035
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003036 /* In default link is down */
3037 netif_carrier_off(pp->dev);
3038
3039 ret = mvneta_mdio_probe(pp);
3040 if (ret < 0) {
3041 netdev_err(dev, "cannot probe MDIO bus\n");
3042 goto err_free_irq;
3043 }
3044
3045 mvneta_start_dev(pp);
3046
3047 return 0;
3048
3049err_free_irq:
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003050 free_percpu_irq(pp->dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003051err_cleanup_txqs:
3052 mvneta_cleanup_txqs(pp);
3053err_cleanup_rxqs:
3054 mvneta_cleanup_rxqs(pp);
3055 return ret;
3056}
3057
3058/* Stop the port, free port interrupt line */
3059static int mvneta_stop(struct net_device *dev)
3060{
3061 struct mvneta_port *pp = netdev_priv(dev);
3062
3063 mvneta_stop_dev(pp);
3064 mvneta_mdio_remove(pp);
Maxime Ripardf8642882015-09-25 18:09:38 +02003065 unregister_cpu_notifier(&pp->cpu_notifier);
Gregory CLEMENT129219e2016-02-04 22:09:23 +01003066 on_each_cpu(mvneta_percpu_disable, pp, true);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003067 free_percpu_irq(dev->irq, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003068 mvneta_cleanup_rxqs(pp);
3069 mvneta_cleanup_txqs(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003070
3071 return 0;
3072}
3073
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003074static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3075{
3076 struct mvneta_port *pp = netdev_priv(dev);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003077
3078 if (!pp->phy_dev)
3079 return -ENOTSUPP;
3080
Stas Sergeevecf7b362015-04-01 19:23:29 +03003081 return phy_mii_ioctl(pp->phy_dev, ifr, cmd);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003082}
3083
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003084/* Ethtool methods */
3085
3086/* Get settings (phy address, speed) for ethtools */
3087int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3088{
3089 struct mvneta_port *pp = netdev_priv(dev);
3090
3091 if (!pp->phy_dev)
3092 return -ENODEV;
3093
3094 return phy_ethtool_gset(pp->phy_dev, cmd);
3095}
3096
3097/* Set settings (phy address, speed) for ethtools */
3098int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3099{
3100 struct mvneta_port *pp = netdev_priv(dev);
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003101 struct phy_device *phydev = pp->phy_dev;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003102
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003103 if (!phydev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003104 return -ENODEV;
3105
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003106 if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
3107 u32 val;
3108
3109 mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE);
3110
3111 if (cmd->autoneg == AUTONEG_DISABLE) {
3112 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3113 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3114 MVNETA_GMAC_CONFIG_GMII_SPEED |
3115 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3116
3117 if (phydev->duplex)
3118 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3119
3120 if (phydev->speed == SPEED_1000)
3121 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3122 else if (phydev->speed == SPEED_100)
3123 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3124
3125 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3126 }
3127
3128 pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE);
3129 netdev_info(pp->dev, "autoneg status set to %i\n",
3130 pp->use_inband_status);
3131
3132 if (netif_running(dev)) {
3133 mvneta_port_down(pp);
3134 mvneta_port_up(pp);
3135 }
3136 }
3137
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003138 return phy_ethtool_sset(pp->phy_dev, cmd);
3139}
3140
3141/* Set interrupt coalescing for ethtools */
3142static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3143 struct ethtool_coalesce *c)
3144{
3145 struct mvneta_port *pp = netdev_priv(dev);
3146 int queue;
3147
3148 for (queue = 0; queue < rxq_number; queue++) {
3149 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3150 rxq->time_coal = c->rx_coalesce_usecs;
3151 rxq->pkts_coal = c->rx_max_coalesced_frames;
3152 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3153 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3154 }
3155
3156 for (queue = 0; queue < txq_number; queue++) {
3157 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3158 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3159 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3160 }
3161
3162 return 0;
3163}
3164
3165/* get coalescing for ethtools */
3166static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3167 struct ethtool_coalesce *c)
3168{
3169 struct mvneta_port *pp = netdev_priv(dev);
3170
3171 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3172 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3173
3174 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3175 return 0;
3176}
3177
3178
3179static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3180 struct ethtool_drvinfo *drvinfo)
3181{
3182 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3183 sizeof(drvinfo->driver));
3184 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3185 sizeof(drvinfo->version));
3186 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3187 sizeof(drvinfo->bus_info));
3188}
3189
3190
3191static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3192 struct ethtool_ringparam *ring)
3193{
3194 struct mvneta_port *pp = netdev_priv(netdev);
3195
3196 ring->rx_max_pending = MVNETA_MAX_RXD;
3197 ring->tx_max_pending = MVNETA_MAX_TXD;
3198 ring->rx_pending = pp->rx_ring_size;
3199 ring->tx_pending = pp->tx_ring_size;
3200}
3201
3202static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3203 struct ethtool_ringparam *ring)
3204{
3205 struct mvneta_port *pp = netdev_priv(dev);
3206
3207 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3208 return -EINVAL;
3209 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3210 ring->rx_pending : MVNETA_MAX_RXD;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003211
3212 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3213 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3214 if (pp->tx_ring_size != ring->tx_pending)
3215 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3216 pp->tx_ring_size, ring->tx_pending);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003217
3218 if (netif_running(dev)) {
3219 mvneta_stop(dev);
3220 if (mvneta_open(dev)) {
3221 netdev_err(dev,
3222 "error on opening device after ring param change\n");
3223 return -ENOMEM;
3224 }
3225 }
3226
3227 return 0;
3228}
3229
Russell King9b0cdef2015-10-22 18:37:30 +01003230static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3231 u8 *data)
3232{
3233 if (sset == ETH_SS_STATS) {
3234 int i;
3235
3236 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3237 memcpy(data + i * ETH_GSTRING_LEN,
3238 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3239 }
3240}
3241
3242static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3243{
3244 const struct mvneta_statistic *s;
3245 void __iomem *base = pp->base;
3246 u32 high, low, val;
Jisheng Zhang2c832292016-01-20 16:36:25 +08003247 u64 val64;
Russell King9b0cdef2015-10-22 18:37:30 +01003248 int i;
3249
3250 for (i = 0, s = mvneta_statistics;
3251 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3252 s++, i++) {
Russell King9b0cdef2015-10-22 18:37:30 +01003253 switch (s->type) {
3254 case T_REG_32:
3255 val = readl_relaxed(base + s->offset);
Jisheng Zhang2c832292016-01-20 16:36:25 +08003256 pp->ethtool_stats[i] += val;
Russell King9b0cdef2015-10-22 18:37:30 +01003257 break;
3258 case T_REG_64:
3259 /* Docs say to read low 32-bit then high */
3260 low = readl_relaxed(base + s->offset);
3261 high = readl_relaxed(base + s->offset + 4);
Jisheng Zhang2c832292016-01-20 16:36:25 +08003262 val64 = (u64)high << 32 | low;
3263 pp->ethtool_stats[i] += val64;
Russell King9b0cdef2015-10-22 18:37:30 +01003264 break;
3265 }
Russell King9b0cdef2015-10-22 18:37:30 +01003266 }
3267}
3268
3269static void mvneta_ethtool_get_stats(struct net_device *dev,
3270 struct ethtool_stats *stats, u64 *data)
3271{
3272 struct mvneta_port *pp = netdev_priv(dev);
3273 int i;
3274
3275 mvneta_ethtool_update_stats(pp);
3276
3277 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3278 *data++ = pp->ethtool_stats[i];
3279}
3280
3281static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3282{
3283 if (sset == ETH_SS_STATS)
3284 return ARRAY_SIZE(mvneta_statistics);
3285 return -EOPNOTSUPP;
3286}
3287
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003288static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3289{
3290 return MVNETA_RSS_LU_TABLE_SIZE;
3291}
3292
3293static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3294 struct ethtool_rxnfc *info,
3295 u32 *rules __always_unused)
3296{
3297 switch (info->cmd) {
3298 case ETHTOOL_GRXRINGS:
3299 info->data = rxq_number;
3300 return 0;
3301 case ETHTOOL_GRXFH:
3302 return -EOPNOTSUPP;
3303 default:
3304 return -EOPNOTSUPP;
3305 }
3306}
3307
3308static int mvneta_config_rss(struct mvneta_port *pp)
3309{
3310 int cpu;
3311 u32 val;
3312
3313 netif_tx_stop_all_queues(pp->dev);
3314
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003315 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003316
3317 /* We have to synchronise on the napi of each CPU */
3318 for_each_online_cpu(cpu) {
3319 struct mvneta_pcpu_port *pcpu_port =
3320 per_cpu_ptr(pp->ports, cpu);
3321
3322 napi_synchronize(&pcpu_port->napi);
3323 napi_disable(&pcpu_port->napi);
3324 }
3325
3326 pp->rxq_def = pp->indir[0];
3327
3328 /* Update unicast mapping */
3329 mvneta_set_rx_mode(pp->dev);
3330
3331 /* Update val of portCfg register accordingly with all RxQueue types */
3332 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3333 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3334
3335 /* Update the elected CPU matching the new rxq_def */
3336 mvneta_percpu_elect(pp);
3337
3338 /* We have to synchronise on the napi of each CPU */
3339 for_each_online_cpu(cpu) {
3340 struct mvneta_pcpu_port *pcpu_port =
3341 per_cpu_ptr(pp->ports, cpu);
3342
3343 napi_enable(&pcpu_port->napi);
3344 }
3345
3346 netif_tx_start_all_queues(pp->dev);
3347
3348 return 0;
3349}
3350
3351static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3352 const u8 *key, const u8 hfunc)
3353{
3354 struct mvneta_port *pp = netdev_priv(dev);
3355 /* We require at least one supported parameter to be changed
3356 * and no change in any of the unsupported parameters
3357 */
3358 if (key ||
3359 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3360 return -EOPNOTSUPP;
3361
3362 if (!indir)
3363 return 0;
3364
3365 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3366
3367 return mvneta_config_rss(pp);
3368}
3369
3370static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3371 u8 *hfunc)
3372{
3373 struct mvneta_port *pp = netdev_priv(dev);
3374
3375 if (hfunc)
3376 *hfunc = ETH_RSS_HASH_TOP;
3377
3378 if (!indir)
3379 return 0;
3380
3381 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3382
3383 return 0;
3384}
3385
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003386static const struct net_device_ops mvneta_netdev_ops = {
3387 .ndo_open = mvneta_open,
3388 .ndo_stop = mvneta_stop,
3389 .ndo_start_xmit = mvneta_tx,
3390 .ndo_set_rx_mode = mvneta_set_rx_mode,
3391 .ndo_set_mac_address = mvneta_set_mac_addr,
3392 .ndo_change_mtu = mvneta_change_mtu,
Simon Guinotb65657f2015-06-30 16:20:22 +02003393 .ndo_fix_features = mvneta_fix_features,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003394 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003395 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003396};
3397
3398const struct ethtool_ops mvneta_eth_tool_ops = {
3399 .get_link = ethtool_op_get_link,
3400 .get_settings = mvneta_ethtool_get_settings,
3401 .set_settings = mvneta_ethtool_set_settings,
3402 .set_coalesce = mvneta_ethtool_set_coalesce,
3403 .get_coalesce = mvneta_ethtool_get_coalesce,
3404 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3405 .get_ringparam = mvneta_ethtool_get_ringparam,
3406 .set_ringparam = mvneta_ethtool_set_ringparam,
Russell King9b0cdef2015-10-22 18:37:30 +01003407 .get_strings = mvneta_ethtool_get_strings,
3408 .get_ethtool_stats = mvneta_ethtool_get_stats,
3409 .get_sset_count = mvneta_ethtool_get_sset_count,
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003410 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3411 .get_rxnfc = mvneta_ethtool_get_rxnfc,
3412 .get_rxfh = mvneta_ethtool_get_rxfh,
3413 .set_rxfh = mvneta_ethtool_set_rxfh,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003414};
3415
3416/* Initialize hw */
Ezequiel Garcia96728502014-05-22 20:06:59 -03003417static int mvneta_init(struct device *dev, struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003418{
3419 int queue;
3420
3421 /* Disable port */
3422 mvneta_port_disable(pp);
3423
3424 /* Set port default values */
3425 mvneta_defaults_set(pp);
3426
Ezequiel Garcia96728502014-05-22 20:06:59 -03003427 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3428 GFP_KERNEL);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003429 if (!pp->txqs)
3430 return -ENOMEM;
3431
3432 /* Initialize TX descriptor rings */
3433 for (queue = 0; queue < txq_number; queue++) {
3434 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3435 txq->id = queue;
3436 txq->size = pp->tx_ring_size;
3437 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
3438 }
3439
Ezequiel Garcia96728502014-05-22 20:06:59 -03003440 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
3441 GFP_KERNEL);
3442 if (!pp->rxqs)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003443 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003444
3445 /* Create Rx descriptor rings */
3446 for (queue = 0; queue < rxq_number; queue++) {
3447 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3448 rxq->id = queue;
3449 rxq->size = pp->rx_ring_size;
3450 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
3451 rxq->time_coal = MVNETA_RX_COAL_USEC;
3452 }
3453
3454 return 0;
3455}
3456
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003457/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00003458static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
3459 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003460{
3461 u32 win_enable;
3462 u32 win_protect;
3463 int i;
3464
3465 for (i = 0; i < 6; i++) {
3466 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
3467 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
3468
3469 if (i < 4)
3470 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
3471 }
3472
3473 win_enable = 0x3f;
3474 win_protect = 0;
3475
3476 for (i = 0; i < dram->num_cs; i++) {
3477 const struct mbus_dram_window *cs = dram->cs + i;
3478 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
3479 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
3480
3481 mvreg_write(pp, MVNETA_WIN_SIZE(i),
3482 (cs->size - 1) & 0xffff0000);
3483
3484 win_enable &= ~(1 << i);
3485 win_protect |= 3 << (2 * i);
3486 }
3487
3488 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
Marcin Wojtasdb6ba9a2015-11-30 13:27:41 +01003489 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003490}
3491
3492/* Power up the port */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003493static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003494{
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003495 u32 ctrl;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003496
3497 /* MAC Cause register should be cleared */
3498 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
3499
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003500 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003501
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003502 /* Even though it might look weird, when we're configured in
3503 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3504 */
3505 switch(phy_mode) {
3506 case PHY_INTERFACE_MODE_QSGMII:
3507 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
3508 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3509 break;
3510 case PHY_INTERFACE_MODE_SGMII:
3511 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
3512 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
3513 break;
3514 case PHY_INTERFACE_MODE_RGMII:
3515 case PHY_INTERFACE_MODE_RGMII_ID:
3516 ctrl |= MVNETA_GMAC2_PORT_RGMII;
3517 break;
3518 default:
3519 return -EINVAL;
3520 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003521
3522 /* Cancel Port Reset */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003523 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
3524 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003525
3526 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3527 MVNETA_GMAC2_PORT_RESET) != 0)
3528 continue;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003529
3530 return 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003531}
3532
3533/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00003534static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003535{
3536 const struct mbus_dram_target_info *dram_target_info;
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003537 struct resource *res;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003538 struct device_node *dn = pdev->dev.of_node;
3539 struct device_node *phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003540 struct mvneta_port *pp;
3541 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003542 const char *dt_mac_addr;
3543 char hw_mac_addr[ETH_ALEN];
3544 const char *mac_from;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003545 const char *managed;
Marcin Wojtas9110ee02015-11-30 13:27:45 +01003546 int tx_csum_limit;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003547 int phy_mode;
3548 int err;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003549 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003550
Willy Tarreauee40a112013-04-11 23:00:37 +02003551 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003552 if (!dev)
3553 return -ENOMEM;
3554
3555 dev->irq = irq_of_parse_and_map(dn, 0);
3556 if (dev->irq == 0) {
3557 err = -EINVAL;
3558 goto err_free_netdev;
3559 }
3560
3561 phy_node = of_parse_phandle(dn, "phy", 0);
3562 if (!phy_node) {
Thomas Petazzoni83895be2014-05-16 16:14:06 +02003563 if (!of_phy_is_fixed_link(dn)) {
3564 dev_err(&pdev->dev, "no PHY specified\n");
3565 err = -ENODEV;
3566 goto err_free_irq;
3567 }
3568
3569 err = of_phy_register_fixed_link(dn);
3570 if (err < 0) {
3571 dev_err(&pdev->dev, "cannot register fixed PHY\n");
3572 goto err_free_irq;
3573 }
3574
3575 /* In the case of a fixed PHY, the DT node associated
3576 * to the PHY is the Ethernet MAC DT node.
3577 */
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003578 phy_node = of_node_get(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003579 }
3580
3581 phy_mode = of_get_phy_mode(dn);
3582 if (phy_mode < 0) {
3583 dev_err(&pdev->dev, "incorrect phy-mode\n");
3584 err = -EINVAL;
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003585 goto err_put_phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003586 }
3587
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003588 dev->tx_queue_len = MVNETA_MAX_TXD;
3589 dev->watchdog_timeo = 5 * HZ;
3590 dev->netdev_ops = &mvneta_netdev_ops;
3591
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003592 dev->ethtool_ops = &mvneta_eth_tool_ops;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003593
3594 pp = netdev_priv(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003595 pp->phy_node = phy_node;
3596 pp->phy_interface = phy_mode;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07003597
3598 err = of_property_read_string(dn, "managed", &managed);
3599 pp->use_inband_status = (err == 0 &&
3600 strcmp(managed, "in-band-status") == 0);
Maxime Ripardf8642882015-09-25 18:09:38 +02003601 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003602
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01003603 pp->rxq_def = rxq_def;
3604
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003605 pp->indir[0] = rxq_def;
3606
Jisheng Zhang2804ba42016-01-20 19:27:23 +08003607 pp->clk = devm_clk_get(&pdev->dev, "core");
3608 if (IS_ERR(pp->clk))
3609 pp->clk = devm_clk_get(&pdev->dev, NULL);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003610 if (IS_ERR(pp->clk)) {
3611 err = PTR_ERR(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003612 goto err_put_phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003613 }
3614
3615 clk_prepare_enable(pp->clk);
3616
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08003617 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
3618 if (!IS_ERR(pp->clk_bus))
3619 clk_prepare_enable(pp->clk_bus);
3620
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003621 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3622 pp->base = devm_ioremap_resource(&pdev->dev, res);
3623 if (IS_ERR(pp->base)) {
3624 err = PTR_ERR(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003625 goto err_clk;
3626 }
3627
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003628 /* Alloc per-cpu port structure */
3629 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
3630 if (!pp->ports) {
3631 err = -ENOMEM;
3632 goto err_clk;
3633 }
3634
willy tarreau74c41b02014-01-16 08:20:08 +01003635 /* Alloc per-cpu stats */
WANG Cong1c213bd2014-02-13 11:46:28 -08003636 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
willy tarreau74c41b02014-01-16 08:20:08 +01003637 if (!pp->stats) {
3638 err = -ENOMEM;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003639 goto err_free_ports;
willy tarreau74c41b02014-01-16 08:20:08 +01003640 }
3641
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003642 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01003643 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003644 mac_from = "device tree";
3645 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3646 } else {
3647 mvneta_get_mac_addr(pp, hw_mac_addr);
3648 if (is_valid_ether_addr(hw_mac_addr)) {
3649 mac_from = "hardware";
3650 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3651 } else {
3652 mac_from = "random";
3653 eth_hw_addr_random(dev);
3654 }
3655 }
3656
Marcin Wojtas9110ee02015-11-30 13:27:45 +01003657 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
3658 if (tx_csum_limit < 0 ||
3659 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
3660 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3661 dev_info(&pdev->dev,
3662 "Wrong TX csum limit in DT, set to %dB\n",
3663 MVNETA_TX_CSUM_DEF_SIZE);
3664 }
3665 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
3666 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
3667 } else {
3668 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
3669 }
3670
3671 pp->tx_csum_limit = tx_csum_limit;
Simon Guinotb65657f2015-06-30 16:20:22 +02003672
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003673 pp->tx_ring_size = MVNETA_MAX_TXD;
3674 pp->rx_ring_size = MVNETA_MAX_RXD;
3675
3676 pp->dev = dev;
3677 SET_NETDEV_DEV(dev, &pdev->dev);
3678
Ezequiel Garcia96728502014-05-22 20:06:59 -03003679 err = mvneta_init(&pdev->dev, pp);
3680 if (err < 0)
willy tarreau74c41b02014-01-16 08:20:08 +01003681 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003682
3683 err = mvneta_port_power_up(pp, phy_mode);
3684 if (err < 0) {
3685 dev_err(&pdev->dev, "can't power up port\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003686 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003687 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003688
3689 dram_target_info = mv_mbus_dram_info();
3690 if (dram_target_info)
3691 mvneta_conf_mbus_windows(pp, dram_target_info);
3692
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003693 for_each_present_cpu(cpu) {
3694 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3695
3696 netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3697 port->pp = pp;
3698 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003699
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03003700 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Ezequiel Garcia01ef26c2014-05-19 13:59:53 -03003701 dev->hw_features |= dev->features;
3702 dev->vlan_features |= dev->features;
willy tarreaub50b72d2013-04-06 08:47:01 +00003703 dev->priv_flags |= IFF_UNICAST_FLT;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003704 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
willy tarreaub50b72d2013-04-06 08:47:01 +00003705
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003706 err = register_netdev(dev);
3707 if (err < 0) {
3708 dev_err(&pdev->dev, "failed to register\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003709 goto err_free_stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003710 }
3711
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003712 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3713 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003714
3715 platform_set_drvdata(pdev, pp->dev);
3716
Stas Sergeev898b2972015-04-01 20:32:49 +03003717 if (pp->use_inband_status) {
3718 struct phy_device *phy = of_phy_find_device(dn);
3719
3720 mvneta_fixed_link_update(pp, phy);
Russell King04d53b22015-09-24 20:36:18 +01003721
Andrew Lunne5a03bf2016-01-06 20:11:16 +01003722 put_device(&phy->mdio.dev);
Stas Sergeev898b2972015-04-01 20:32:49 +03003723 }
3724
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003725 return 0;
3726
willy tarreau74c41b02014-01-16 08:20:08 +01003727err_free_stats:
3728 free_percpu(pp->stats);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003729err_free_ports:
3730 free_percpu(pp->ports);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003731err_clk:
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08003732 clk_disable_unprepare(pp->clk_bus);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003733 clk_disable_unprepare(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003734err_put_phy_node:
3735 of_node_put(phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003736err_free_irq:
3737 irq_dispose_mapping(dev->irq);
3738err_free_netdev:
3739 free_netdev(dev);
3740 return err;
3741}
3742
3743/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00003744static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003745{
3746 struct net_device *dev = platform_get_drvdata(pdev);
3747 struct mvneta_port *pp = netdev_priv(dev);
3748
3749 unregister_netdev(dev);
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08003750 clk_disable_unprepare(pp->clk_bus);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003751 clk_disable_unprepare(pp->clk);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003752 free_percpu(pp->ports);
willy tarreau74c41b02014-01-16 08:20:08 +01003753 free_percpu(pp->stats);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003754 irq_dispose_mapping(dev->irq);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003755 of_node_put(pp->phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003756 free_netdev(dev);
3757
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003758 return 0;
3759}
3760
3761static const struct of_device_id mvneta_match[] = {
3762 { .compatible = "marvell,armada-370-neta" },
Simon Guinotf522a972015-06-30 16:20:20 +02003763 { .compatible = "marvell,armada-xp-neta" },
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003764 { }
3765};
3766MODULE_DEVICE_TABLE(of, mvneta_match);
3767
3768static struct platform_driver mvneta_driver = {
3769 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00003770 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003771 .driver = {
3772 .name = MVNETA_DRIVER_NAME,
3773 .of_match_table = mvneta_match,
3774 },
3775};
3776
3777module_platform_driver(mvneta_driver);
3778
3779MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3780MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3781MODULE_LICENSE("GPL");
3782
3783module_param(rxq_number, int, S_IRUGO);
3784module_param(txq_number, int, S_IRUGO);
3785
3786module_param(rxq_def, int, S_IRUGO);
willy tarreauf19fadf2014-01-16 08:20:17 +01003787module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);