blob: 34a3686d2ce669091510f5dc85773a84d7880c92 [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
Jisheng Zhang0e03f562016-01-20 19:27:22 +080014#include <linux/clk.h>
15#include <linux/cpu.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030016#include <linux/etherdevice.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080017#include <linux/if_vlan.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030018#include <linux/inetdevice.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080019#include <linux/interrupt.h>
20#include <linux/io.h>
21#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030022#include <linux/mbus.h>
23#include <linux/module.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080024#include <linux/netdevice.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030025#include <linux/of.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080026#include <linux/of_address.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030027#include <linux/of_irq.h>
28#include <linux/of_mdio.h>
29#include <linux/of_net.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030030#include <linux/phy.h>
Russell King9303ab22017-02-07 15:03:00 -080031#include <linux/phy_fixed.h>
Jisheng Zhang0e03f562016-01-20 19:27:22 +080032#include <linux/platform_device.h>
33#include <linux/skbuff.h>
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +010034#include <net/hwbm.h>
Marcin Wojtasdc35a102016-03-14 09:39:03 +010035#include "mvneta_bm.h"
Jisheng Zhang0e03f562016-01-20 19:27:22 +080036#include <net/ip.h>
37#include <net/ipv6.h>
38#include <net/tso.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030039
40/* Registers */
41#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
Marcin Wojtase5bdf682015-11-30 13:27:42 +010042#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
Marcin Wojtasdc35a102016-03-14 09:39:03 +010043#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
44#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
45#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
46#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
Thomas Petazzonic5aff182012-08-17 14:04:28 +030047#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
48#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
49#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
50#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
51#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
52#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
53#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
54#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
55#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
56#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
57#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
58#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
59#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
Marcin Wojtasdc35a102016-03-14 09:39:03 +010060#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
61#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
62#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
Thomas Petazzonic5aff182012-08-17 14:04:28 +030063#define MVNETA_PORT_RX_RESET 0x1cc0
64#define MVNETA_PORT_RX_DMA_RESET BIT(0)
65#define MVNETA_PHY_ADDR 0x2000
66#define MVNETA_PHY_ADDR_MASK 0x1f
67#define MVNETA_MBUS_RETRY 0x2010
68#define MVNETA_UNIT_INTR_CAUSE 0x2080
69#define MVNETA_UNIT_CONTROL 0x20B0
70#define MVNETA_PHY_POLLING_ENABLE BIT(1)
71#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
72#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
73#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
74#define MVNETA_BASE_ADDR_ENABLE 0x2290
Marcin Wojtasdb6ba9a2015-11-30 13:27:41 +010075#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
Thomas Petazzonic5aff182012-08-17 14:04:28 +030076#define MVNETA_PORT_CONFIG 0x2400
77#define MVNETA_UNI_PROMISC_MODE BIT(0)
78#define MVNETA_DEF_RXQ(q) ((q) << 1)
79#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
80#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
81#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
82#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
83#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
84#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
85#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
86 MVNETA_DEF_RXQ_ARP(q) | \
87 MVNETA_DEF_RXQ_TCP(q) | \
88 MVNETA_DEF_RXQ_UDP(q) | \
89 MVNETA_DEF_RXQ_BPDU(q) | \
90 MVNETA_TX_UNSET_ERR_SUM | \
91 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92#define MVNETA_PORT_CONFIG_EXTEND 0x2404
93#define MVNETA_MAC_ADDR_LOW 0x2414
94#define MVNETA_MAC_ADDR_HIGH 0x2418
95#define MVNETA_SDMA_CONFIG 0x241c
96#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030097#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
98#define MVNETA_RX_NO_DATA_SWAP BIT(4)
99#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200100#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300101#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
102#define MVNETA_PORT_STATUS 0x2444
103#define MVNETA_TX_IN_PRGRS BIT(1)
104#define MVNETA_TX_FIFO_EMPTY BIT(8)
105#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +0200106#define MVNETA_SERDES_CFG 0x24A0
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +0200107#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +0200108#define MVNETA_QSGMII_SERDES_PROTO 0x0667
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300109#define MVNETA_TYPE_PRIO 0x24bc
110#define MVNETA_FORCE_UNI BIT(21)
111#define MVNETA_TXQ_CMD_1 0x24e4
112#define MVNETA_TXQ_CMD 0x2448
113#define MVNETA_TXQ_DISABLE_SHIFT 8
114#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
Andrew Lunne4839112015-10-22 18:37:36 +0100115#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
116#define MVNETA_OVERRUN_FRAME_COUNT 0x2488
Stas Sergeev898b29702015-04-01 20:32:49 +0300117#define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
118#define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300119#define MVNETA_ACC_MODE 0x2500
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100120#define MVNETA_BM_ADDRESS 0x2504
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300121#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
122#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
123#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100124#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +0100125#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300126#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
willy tarreau40ba35e2014-01-16 08:20:10 +0100127
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +0100128/* Exception Interrupt Port/Queue Cause register
129 *
130 * Their behavior depend of the mapping done using the PCPX2Q
131 * registers. For a given CPU if the bit associated to a queue is not
132 * set, then for the register a read from this CPU will always return
133 * 0 and a write won't do anything
134 */
willy tarreau40ba35e2014-01-16 08:20:10 +0100135
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300136#define MVNETA_INTR_NEW_CAUSE 0x25a0
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300137#define MVNETA_INTR_NEW_MASK 0x25a4
willy tarreau40ba35e2014-01-16 08:20:10 +0100138
139/* bits 0..7 = TXQ SENT, one bit per queue.
140 * bits 8..15 = RXQ OCCUP, one bit per queue.
141 * bits 16..23 = RXQ FREE, one bit per queue.
142 * bit 29 = OLD_REG_SUM, see old reg ?
143 * bit 30 = TX_ERR_SUM, one bit for 4 ports
144 * bit 31 = MISC_SUM, one bit for 4 ports
145 */
146#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
147#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
148#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
149#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
Stas Sergeev898b29702015-04-01 20:32:49 +0300150#define MVNETA_MISCINTR_INTR_MASK BIT(31)
willy tarreau40ba35e2014-01-16 08:20:10 +0100151
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300152#define MVNETA_INTR_OLD_CAUSE 0x25a8
153#define MVNETA_INTR_OLD_MASK 0x25ac
willy tarreau40ba35e2014-01-16 08:20:10 +0100154
155/* Data Path Port/Queue Cause Register */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300156#define MVNETA_INTR_MISC_CAUSE 0x25b0
157#define MVNETA_INTR_MISC_MASK 0x25b4
willy tarreau40ba35e2014-01-16 08:20:10 +0100158
159#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
160#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
161#define MVNETA_CAUSE_PTP BIT(4)
162
163#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
164#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
165#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
166#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
167#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
168#define MVNETA_CAUSE_PRBS_ERR BIT(12)
169#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
170#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
171
172#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
173#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
175
176#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
177#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
179
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300180#define MVNETA_INTR_ENABLE 0x25b8
181#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
Marcin Wojtasdc1aadf2015-11-30 13:27:43 +0100182#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
willy tarreau40ba35e2014-01-16 08:20:10 +0100183
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300184#define MVNETA_RXQ_CMD 0x2680
185#define MVNETA_RXQ_DISABLE_SHIFT 8
186#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
187#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
188#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
189#define MVNETA_GMAC_CTRL_0 0x2c00
190#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
191#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
192#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
193#define MVNETA_GMAC_CTRL_2 0x2c08
Stas Sergeev898b29702015-04-01 20:32:49 +0300194#define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
Thomas Petazzonia79121d2014-03-26 00:25:41 +0100195#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300196#define MVNETA_GMAC2_PORT_RGMII BIT(4)
197#define MVNETA_GMAC2_PORT_RESET BIT(6)
198#define MVNETA_GMAC_STATUS 0x2c10
199#define MVNETA_GMAC_LINK_UP BIT(0)
200#define MVNETA_GMAC_SPEED_1000 BIT(1)
201#define MVNETA_GMAC_SPEED_100 BIT(2)
202#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
203#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
204#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
205#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
206#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
207#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
208#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
209#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
Stas Sergeev898b29702015-04-01 20:32:49 +0300210#define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300211#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
212#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200213#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Stas Sergeev898b29702015-04-01 20:32:49 +0300214#define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300215#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200216#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Andrew Lunne4839112015-10-22 18:37:36 +0100217#define MVNETA_MIB_COUNTERS_BASE 0x3000
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300218#define MVNETA_MIB_LATE_COLLISION 0x7c
219#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
220#define MVNETA_DA_FILT_OTH_MCAST 0x3500
221#define MVNETA_DA_FILT_UCAST_BASE 0x3600
222#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
223#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
224#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
225#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
226#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
227#define MVNETA_TXQ_DEC_SENT_SHIFT 16
Simon Guinot2a90f7e2017-01-16 18:08:31 +0100228#define MVNETA_TXQ_DEC_SENT_MASK 0xff
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300229#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
230#define MVNETA_TXQ_SENT_DESC_SHIFT 16
231#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
232#define MVNETA_PORT_TX_RESET 0x3cf0
233#define MVNETA_PORT_TX_DMA_RESET BIT(0)
234#define MVNETA_TX_MTU 0x3e0c
235#define MVNETA_TX_TOKEN_SIZE 0x3e14
236#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
237#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
238#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
239
240#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
241
242/* Descriptor ring Macros */
243#define MVNETA_QUEUE_NEXT_DESC(q, index) \
244 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
245
246/* Various constants */
247
248/* Coalescing */
Dmitri Epshtein06708f82016-07-06 04:18:58 +0200249#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300250#define MVNETA_RX_COAL_PKTS 32
251#define MVNETA_RX_COAL_USEC 100
252
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100253/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300254 * by Marvell switches when a specific hardware mode is enabled (not
255 * supported by this driver) or is filled automatically by zeroes on
256 * the RX side. Those two bytes being at the front of the Ethernet
257 * header, they allow to have the IP header aligned on a 4 bytes
258 * boundary automatically: the hardware skips those two bytes on its
259 * own.
260 */
261#define MVNETA_MH_SIZE 2
262
263#define MVNETA_VLAN_TAG_LEN 4
264
Marcin Wojtas9110ee02015-11-30 13:27:45 +0100265#define MVNETA_TX_CSUM_DEF_SIZE 1600
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300266#define MVNETA_TX_CSUM_MAX_SIZE 9800
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100267#define MVNETA_ACC_MODE_EXT1 1
268#define MVNETA_ACC_MODE_EXT2 2
269
270#define MVNETA_MAX_DECODE_WIN 6
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300271
272/* Timeout constants */
273#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
274#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
275#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
276
277#define MVNETA_TX_MTU_MAX 0x3ffff
278
Gregory CLEMENT9a401de2015-12-09 18:23:50 +0100279/* The RSS lookup table actually has 256 entries but we do not use
280 * them yet
281 */
282#define MVNETA_RSS_LU_TABLE_SIZE 1
283
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -0300284/* TSO header size */
285#define TSO_HEADER_SIZE 128
286
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300287/* Max number of Rx descriptors */
288#define MVNETA_MAX_RXD 128
289
290/* Max number of Tx descriptors */
291#define MVNETA_MAX_TXD 532
292
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300293/* Max number of allowed TCP segments for software TSO */
294#define MVNETA_MAX_TSO_SEGS 100
295
296#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
297
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300298/* descriptor aligned size */
299#define MVNETA_DESC_ALIGNED_SIZE 32
300
Marcin Wojtas8d5047c2016-12-01 18:03:07 +0100301/* Number of bytes to be taken into account by HW when putting incoming data
302 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
303 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
304 */
305#define MVNETA_RX_PKT_OFFSET_CORRECTION 64
306
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300307#define MVNETA_RX_PKT_SIZE(mtu) \
308 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
309 ETH_HLEN + ETH_FCS_LEN, \
Jisheng Zhangc66e98c2016-04-01 17:12:49 +0800310 cache_line_size())
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300311
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -0300312#define IS_TSO_HEADER(txq, addr) \
313 ((addr >= txq->tso_hdrs_phys) && \
314 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
315
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100316#define MVNETA_RX_GET_BM_POOL_ID(rxd) \
317 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300318
Russell King9b0cdef2015-10-22 18:37:30 +0100319struct mvneta_statistic {
320 unsigned short offset;
321 unsigned short type;
322 const char name[ETH_GSTRING_LEN];
323};
324
325#define T_REG_32 32
326#define T_REG_64 64
327
328static const struct mvneta_statistic mvneta_statistics[] = {
329 { 0x3000, T_REG_64, "good_octets_received", },
330 { 0x3010, T_REG_32, "good_frames_received", },
331 { 0x3008, T_REG_32, "bad_octets_received", },
332 { 0x3014, T_REG_32, "bad_frames_received", },
333 { 0x3018, T_REG_32, "broadcast_frames_received", },
334 { 0x301c, T_REG_32, "multicast_frames_received", },
335 { 0x3050, T_REG_32, "unrec_mac_control_received", },
336 { 0x3058, T_REG_32, "good_fc_received", },
337 { 0x305c, T_REG_32, "bad_fc_received", },
338 { 0x3060, T_REG_32, "undersize_received", },
339 { 0x3064, T_REG_32, "fragments_received", },
340 { 0x3068, T_REG_32, "oversize_received", },
341 { 0x306c, T_REG_32, "jabber_received", },
342 { 0x3070, T_REG_32, "mac_receive_error", },
343 { 0x3074, T_REG_32, "bad_crc_event", },
344 { 0x3078, T_REG_32, "collision", },
345 { 0x307c, T_REG_32, "late_collision", },
346 { 0x2484, T_REG_32, "rx_discard", },
347 { 0x2488, T_REG_32, "rx_overrun", },
348 { 0x3020, T_REG_32, "frames_64_octets", },
349 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
350 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
351 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
352 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
353 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
354 { 0x3038, T_REG_64, "good_octets_sent", },
355 { 0x3040, T_REG_32, "good_frames_sent", },
356 { 0x3044, T_REG_32, "excessive_collision", },
357 { 0x3048, T_REG_32, "multicast_frames_sent", },
358 { 0x304c, T_REG_32, "broadcast_frames_sent", },
359 { 0x3054, T_REG_32, "fc_sent", },
360 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
361};
362
willy tarreau74c41b02014-01-16 08:20:08 +0100363struct mvneta_pcpu_stats {
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300364 struct u64_stats_sync syncp;
willy tarreau74c41b02014-01-16 08:20:08 +0100365 u64 rx_packets;
366 u64 rx_bytes;
367 u64 tx_packets;
368 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300369};
370
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200371struct mvneta_pcpu_port {
372 /* Pointer to the shared port */
373 struct mvneta_port *pp;
374
375 /* Pointer to the CPU-local NAPI struct */
376 struct napi_struct napi;
377
378 /* Cause of the previous interrupt */
379 u32 cause_rx_tx;
380};
381
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300382struct mvneta_port {
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100383 u8 id;
Maxime Ripard12bb03b2015-09-25 18:09:36 +0200384 struct mvneta_pcpu_port __percpu *ports;
385 struct mvneta_pcpu_stats __percpu *stats;
386
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300387 int pkt_size;
willy tarreau8ec2cd42014-01-16 08:20:16 +0100388 unsigned int frag_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300389 void __iomem *base;
390 struct mvneta_rx_queue *rxqs;
391 struct mvneta_tx_queue *txqs;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300392 struct net_device *dev;
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +0200393 struct hlist_node node_online;
394 struct hlist_node node_dead;
Gregory CLEMENT90b74c02015-12-09 18:23:48 +0100395 int rxq_def;
Gregory CLEMENT58885112016-02-04 22:09:28 +0100396 /* Protect the access to the percpu interrupt registers,
397 * ensuring that the configuration remains coherent.
398 */
399 spinlock_t lock;
Gregory CLEMENT120cfa52016-02-04 22:09:29 +0100400 bool is_stopped;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300401
Marcin Wojtas2636ac32016-12-01 18:03:09 +0100402 u32 cause_rx_tx;
403 struct napi_struct napi;
404
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300405 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100406 struct clk *clk;
Jisheng Zhang15cc4a42016-01-20 19:27:24 +0800407 /* AXI clock */
408 struct clk *clk_bus;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300409 u8 mcast_count[256];
410 u16 tx_ring_size;
411 u16 rx_ring_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300412
413 struct mii_bus *mii_bus;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300414 phy_interface_t phy_interface;
415 struct device_node *phy_node;
416 unsigned int link;
417 unsigned int duplex;
418 unsigned int speed;
Simon Guinotb65657f2015-06-30 16:20:22 +0200419 unsigned int tx_csum_limit;
Stas Sergeev0c0744f2015-12-02 20:35:11 +0300420 unsigned int use_inband_status:1;
Russell King9b0cdef2015-10-22 18:37:30 +0100421
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100422 struct mvneta_bm *bm_priv;
423 struct mvneta_bm_pool *pool_long;
424 struct mvneta_bm_pool *pool_short;
425 int bm_win_id;
426
Russell King9b0cdef2015-10-22 18:37:30 +0100427 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
Gregory CLEMENT9a401de2015-12-09 18:23:50 +0100428
429 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
Marcin Wojtas2636ac32016-12-01 18:03:09 +0100430
431 /* Flags for special SoC configurations */
432 bool neta_armada3700;
Marcin Wojtas8d5047c2016-12-01 18:03:07 +0100433 u16 rx_offset_correction;
Jane Li9768b452017-03-16 16:22:28 +0800434 const struct mbus_dram_target_info *dram_target_info;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300435};
436
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100437/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300438 * layout of the transmit and reception DMA descriptors, and their
439 * layout is therefore defined by the hardware design
440 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200441
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300442#define MVNETA_TX_L3_OFF_SHIFT 0
443#define MVNETA_TX_IP_HLEN_SHIFT 8
444#define MVNETA_TX_L4_UDP BIT(16)
445#define MVNETA_TX_L3_IP6 BIT(17)
446#define MVNETA_TXD_IP_CSUM BIT(18)
447#define MVNETA_TXD_Z_PAD BIT(19)
448#define MVNETA_TXD_L_DESC BIT(20)
449#define MVNETA_TXD_F_DESC BIT(21)
450#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
451 MVNETA_TXD_L_DESC | \
452 MVNETA_TXD_F_DESC)
453#define MVNETA_TX_L4_CSUM_FULL BIT(30)
454#define MVNETA_TX_L4_CSUM_NOT BIT(31)
455
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300456#define MVNETA_RXD_ERR_CRC 0x0
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100457#define MVNETA_RXD_BM_POOL_SHIFT 13
458#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300459#define MVNETA_RXD_ERR_SUMMARY BIT(16)
460#define MVNETA_RXD_ERR_OVERRUN BIT(17)
461#define MVNETA_RXD_ERR_LEN BIT(18)
462#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
463#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
464#define MVNETA_RXD_L3_IP4 BIT(25)
465#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
466#define MVNETA_RXD_L4_CSUM_OK BIT(30)
467
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200468#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200469struct mvneta_tx_desc {
470 u32 command; /* Options used by HW for packet transmitting.*/
471 u16 reserverd1; /* csum_l4 (for future use) */
472 u16 data_size; /* Data size of transmitted packet in bytes */
473 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
474 u32 reserved2; /* hw_cmd - (for future use, PMT) */
475 u32 reserved3[4]; /* Reserved - (for future use) */
476};
477
478struct mvneta_rx_desc {
479 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300480 u16 reserved1; /* pnc_info - (for future use, PnC) */
481 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200482
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300483 u32 buf_phys_addr; /* Physical address of the buffer */
484 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200485
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300486 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
487 u16 reserved3; /* prefetch_cmd, for future use */
488 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200489
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300490 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
491 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
492};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200493#else
494struct mvneta_tx_desc {
495 u16 data_size; /* Data size of transmitted packet in bytes */
496 u16 reserverd1; /* csum_l4 (for future use) */
497 u32 command; /* Options used by HW for packet transmitting.*/
498 u32 reserved2; /* hw_cmd - (for future use, PMT) */
499 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
500 u32 reserved3[4]; /* Reserved - (for future use) */
501};
502
503struct mvneta_rx_desc {
504 u16 data_size; /* Size of received packet in bytes */
505 u16 reserved1; /* pnc_info - (for future use, PnC) */
506 u32 status; /* Info about received packet */
507
508 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
509 u32 buf_phys_addr; /* Physical address of the buffer */
510
511 u16 reserved4; /* csum_l4 - (for future use, PnC) */
512 u16 reserved3; /* prefetch_cmd, for future use */
513 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
514
515 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
516 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
517};
518#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300519
520struct mvneta_tx_queue {
521 /* Number of this TX queue, in the range 0-7 */
522 u8 id;
523
524 /* Number of TX DMA descriptors in the descriptor ring */
525 int size;
526
527 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100528 * descriptor ring
529 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300530 int count;
Simon Guinot2a90f7e2017-01-16 18:08:31 +0100531 int pending;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300532 int tx_stop_threshold;
533 int tx_wake_threshold;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300534
535 /* Array of transmitted skb */
536 struct sk_buff **tx_skb;
537
538 /* Index of last TX DMA descriptor that was inserted */
539 int txq_put_index;
540
541 /* Index of the TX DMA descriptor to be cleaned up */
542 int txq_get_index;
543
544 u32 done_pkts_coal;
545
546 /* Virtual address of the TX DMA descriptors array */
547 struct mvneta_tx_desc *descs;
548
549 /* DMA address of the TX DMA descriptors array */
550 dma_addr_t descs_phys;
551
552 /* Index of the last TX DMA descriptor */
553 int last_desc;
554
555 /* Index of the next TX DMA descriptor to process */
556 int next_desc_to_proc;
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -0300557
558 /* DMA buffers for TSO headers */
559 char *tso_hdrs;
560
561 /* DMA address of TSO headers */
562 dma_addr_t tso_hdrs_phys;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +0100563
564 /* Affinity mask for CPUs*/
565 cpumask_t affinity_mask;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300566};
567
568struct mvneta_rx_queue {
569 /* rx queue number, in the range 0-7 */
570 u8 id;
571
572 /* num of rx descriptors in the rx descriptor ring */
573 int size;
574
575 /* counter of times when mvneta_refill() failed */
576 int missed;
577
578 u32 pkts_coal;
579 u32 time_coal;
580
Gregory CLEMENTf88bee12016-12-01 18:03:06 +0100581 /* Virtual address of the RX buffer */
582 void **buf_virt_addr;
583
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300584 /* Virtual address of the RX DMA descriptors array */
585 struct mvneta_rx_desc *descs;
586
587 /* DMA address of the RX DMA descriptors array */
588 dma_addr_t descs_phys;
589
590 /* Index of the last RX DMA descriptor */
591 int last_desc;
592
593 /* Index of the next RX DMA descriptor to process */
594 int next_desc_to_proc;
595};
596
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +0200597static enum cpuhp_state online_hpstate;
Ezequiel Garciaedadb7f2014-05-22 20:07:01 -0300598/* The hardware supports eight (8) rx queues, but we are only allowing
599 * the first one to be used. Therefore, let's just allocate one queue.
600 */
Maxime Ripardd8936652015-09-25 18:09:37 +0200601static int rxq_number = 8;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300602static int txq_number = 8;
603
604static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300605
willy tarreauf19fadf2014-01-16 08:20:17 +0100606static int rx_copybreak __read_mostly = 256;
607
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100608/* HW BM need that each port be identify by a unique ID */
609static int global_port_id;
610
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300611#define MVNETA_DRIVER_NAME "mvneta"
612#define MVNETA_DRIVER_VERSION "1.0"
613
614/* Utility/helper methods */
615
616/* Write helper method */
617static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
618{
619 writel(data, pp->base + offset);
620}
621
622/* Read helper method */
623static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
624{
625 return readl(pp->base + offset);
626}
627
628/* Increment txq get counter */
629static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
630{
631 txq->txq_get_index++;
632 if (txq->txq_get_index == txq->size)
633 txq->txq_get_index = 0;
634}
635
636/* Increment txq put counter */
637static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
638{
639 txq->txq_put_index++;
640 if (txq->txq_put_index == txq->size)
641 txq->txq_put_index = 0;
642}
643
644
645/* Clear all MIB counters */
646static void mvneta_mib_counters_clear(struct mvneta_port *pp)
647{
648 int i;
649 u32 dummy;
650
651 /* Perform dummy reads from MIB counters */
652 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
653 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
Andrew Lunne4839112015-10-22 18:37:36 +0100654 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
655 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300656}
657
658/* Get System Network Statistics */
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800659static void
Baoyou Xie2dc0d2b2016-09-25 17:20:41 +0800660mvneta_get_stats64(struct net_device *dev,
661 struct rtnl_link_stats64 *stats)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300662{
663 struct mvneta_port *pp = netdev_priv(dev);
664 unsigned int start;
willy tarreau74c41b02014-01-16 08:20:08 +0100665 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300666
willy tarreau74c41b02014-01-16 08:20:08 +0100667 for_each_possible_cpu(cpu) {
668 struct mvneta_pcpu_stats *cpu_stats;
669 u64 rx_packets;
670 u64 rx_bytes;
671 u64 tx_packets;
672 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300673
willy tarreau74c41b02014-01-16 08:20:08 +0100674 cpu_stats = per_cpu_ptr(pp->stats, cpu);
675 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700676 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
willy tarreau74c41b02014-01-16 08:20:08 +0100677 rx_packets = cpu_stats->rx_packets;
678 rx_bytes = cpu_stats->rx_bytes;
679 tx_packets = cpu_stats->tx_packets;
680 tx_bytes = cpu_stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700681 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300682
willy tarreau74c41b02014-01-16 08:20:08 +0100683 stats->rx_packets += rx_packets;
684 stats->rx_bytes += rx_bytes;
685 stats->tx_packets += tx_packets;
686 stats->tx_bytes += tx_bytes;
687 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300688
689 stats->rx_errors = dev->stats.rx_errors;
690 stats->rx_dropped = dev->stats.rx_dropped;
691
692 stats->tx_dropped = dev->stats.tx_dropped;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300693}
694
695/* Rx descriptors helper methods */
696
willy tarreau54282132014-01-16 08:20:14 +0100697/* Checks whether the RX descriptor having this status is both the first
698 * and the last descriptor for the RX packet. Each RX packet is currently
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300699 * received through a single RX descriptor, so not having each RX
700 * descriptor with its first and last bits set is an error
701 */
willy tarreau54282132014-01-16 08:20:14 +0100702static int mvneta_rxq_desc_is_first_last(u32 status)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300703{
willy tarreau54282132014-01-16 08:20:14 +0100704 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300705 MVNETA_RXD_FIRST_LAST_DESC;
706}
707
708/* Add number of descriptors ready to receive new packets */
709static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
710 struct mvneta_rx_queue *rxq,
711 int ndescs)
712{
713 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100714 * be added at once
715 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300716 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
717 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
718 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
719 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
720 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
721 }
722
723 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
724 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
725}
726
727/* Get number of RX descriptors occupied by received packets */
728static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
729 struct mvneta_rx_queue *rxq)
730{
731 u32 val;
732
733 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
734 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
735}
736
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100737/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300738 * from mvneta_rxq_drop_pkts().
739 */
740static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
741 struct mvneta_rx_queue *rxq,
742 int rx_done, int rx_filled)
743{
744 u32 val;
745
746 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
747 val = rx_done |
748 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
749 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
750 return;
751 }
752
753 /* Only 255 descriptors can be added at once */
754 while ((rx_done > 0) || (rx_filled > 0)) {
755 if (rx_done <= 0xff) {
756 val = rx_done;
757 rx_done = 0;
758 } else {
759 val = 0xff;
760 rx_done -= 0xff;
761 }
762 if (rx_filled <= 0xff) {
763 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
764 rx_filled = 0;
765 } else {
766 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
767 rx_filled -= 0xff;
768 }
769 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
770 }
771}
772
773/* Get pointer to next RX descriptor to be processed by SW */
774static struct mvneta_rx_desc *
775mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
776{
777 int rx_desc = rxq->next_desc_to_proc;
778
779 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
willy tarreau34e41792014-01-16 08:20:15 +0100780 prefetch(rxq->descs + rxq->next_desc_to_proc);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300781 return rxq->descs + rx_desc;
782}
783
784/* Change maximum receive size of the port. */
785static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
786{
787 u32 val;
788
789 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
790 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
791 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
792 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
793 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
794}
795
796
797/* Set rx queue offset */
798static void mvneta_rxq_offset_set(struct mvneta_port *pp,
799 struct mvneta_rx_queue *rxq,
800 int offset)
801{
802 u32 val;
803
804 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
805 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
806
807 /* Offset is in */
808 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
809 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
810}
811
812
813/* Tx descriptors helper methods */
814
815/* Update HW with number of TX descriptors to be sent */
816static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
817 struct mvneta_tx_queue *txq,
818 int pend_desc)
819{
820 u32 val;
821
822 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100823 * process TX desriptors in quanta less than 256
824 */
Simon Guinot2a90f7e2017-01-16 18:08:31 +0100825 val = pend_desc + txq->pending;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300826 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
Simon Guinot2a90f7e2017-01-16 18:08:31 +0100827 txq->pending = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300828}
829
830/* Get pointer to next TX descriptor to be processed (send) by HW */
831static struct mvneta_tx_desc *
832mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
833{
834 int tx_desc = txq->next_desc_to_proc;
835
836 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
837 return txq->descs + tx_desc;
838}
839
840/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100841 * mapping failures in the TX path.
842 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300843static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
844{
845 if (txq->next_desc_to_proc == 0)
846 txq->next_desc_to_proc = txq->last_desc - 1;
847 else
848 txq->next_desc_to_proc--;
849}
850
851/* Set rxq buf size */
852static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
853 struct mvneta_rx_queue *rxq,
854 int buf_size)
855{
856 u32 val;
857
858 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
859
860 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
861 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
862
863 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
864}
865
866/* Disable buffer management (BM) */
867static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
868 struct mvneta_rx_queue *rxq)
869{
870 u32 val;
871
872 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
873 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
874 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
875}
876
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100877/* Enable buffer management (BM) */
878static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
879 struct mvneta_rx_queue *rxq)
880{
881 u32 val;
882
883 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
884 val |= MVNETA_RXQ_HW_BUF_ALLOC;
885 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
886}
887
888/* Notify HW about port's assignment of pool for bigger packets */
889static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
890 struct mvneta_rx_queue *rxq)
891{
892 u32 val;
893
894 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
895 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
896 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
897
898 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
899}
900
901/* Notify HW about port's assignment of pool for smaller packets */
902static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
903 struct mvneta_rx_queue *rxq)
904{
905 u32 val;
906
907 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
908 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
909 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
910
911 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
912}
913
914/* Set port's receive buffer size for assigned BM pool */
915static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
916 int buf_size,
917 u8 pool_id)
918{
919 u32 val;
920
921 if (!IS_ALIGNED(buf_size, 8)) {
922 dev_warn(pp->dev->dev.parent,
923 "illegal buf_size value %d, round to %d\n",
924 buf_size, ALIGN(buf_size, 8));
925 buf_size = ALIGN(buf_size, 8);
926 }
927
928 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
929 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
930 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
931}
932
933/* Configure MBUS window in order to enable access BM internal SRAM */
934static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
935 u8 target, u8 attr)
936{
937 u32 win_enable, win_protect;
938 int i;
939
940 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
941
942 if (pp->bm_win_id < 0) {
943 /* Find first not occupied window */
944 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
945 if (win_enable & (1 << i)) {
946 pp->bm_win_id = i;
947 break;
948 }
949 }
950 if (i == MVNETA_MAX_DECODE_WIN)
951 return -ENOMEM;
952 } else {
953 i = pp->bm_win_id;
954 }
955
956 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
957 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
958
959 if (i < 4)
960 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
961
962 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
963 (attr << 8) | target);
964
965 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
966
967 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
968 win_protect |= 3 << (2 * i);
969 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
970
971 win_enable &= ~(1 << i);
972 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
973
974 return 0;
975}
976
Marcin Wojtas2636ac32016-12-01 18:03:09 +0100977static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100978{
Marcin Wojtas2636ac32016-12-01 18:03:09 +0100979 u32 wsize;
Marcin Wojtasdc35a102016-03-14 09:39:03 +0100980 u8 target, attr;
981 int err;
982
983 /* Get BM window information */
984 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
985 &target, &attr);
986 if (err < 0)
987 return err;
988
989 pp->bm_win_id = -1;
990
991 /* Open NETA -> BM window */
992 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
993 target, attr);
994 if (err < 0) {
995 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
996 return err;
997 }
Marcin Wojtas2636ac32016-12-01 18:03:09 +0100998 return 0;
999}
1000
1001/* Assign and initialize pools for port. In case of fail
1002 * buffer manager will remain disabled for current port.
1003 */
1004static int mvneta_bm_port_init(struct platform_device *pdev,
1005 struct mvneta_port *pp)
1006{
1007 struct device_node *dn = pdev->dev.of_node;
1008 u32 long_pool_id, short_pool_id;
1009
1010 if (!pp->neta_armada3700) {
1011 int ret;
1012
1013 ret = mvneta_bm_port_mbus_init(pp);
1014 if (ret)
1015 return ret;
1016 }
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001017
1018 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1019 netdev_info(pp->dev, "missing long pool id\n");
1020 return -EINVAL;
1021 }
1022
1023 /* Create port's long pool depending on mtu */
1024 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1025 MVNETA_BM_LONG, pp->id,
1026 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1027 if (!pp->pool_long) {
1028 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1029 return -ENOMEM;
1030 }
1031
1032 pp->pool_long->port_map |= 1 << pp->id;
1033
1034 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1035 pp->pool_long->id);
1036
1037 /* If short pool id is not defined, assume using single pool */
1038 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1039 short_pool_id = long_pool_id;
1040
1041 /* Create port's short pool */
1042 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1043 MVNETA_BM_SHORT, pp->id,
1044 MVNETA_BM_SHORT_PKT_SIZE);
1045 if (!pp->pool_short) {
1046 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1047 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1048 return -ENOMEM;
1049 }
1050
1051 if (short_pool_id != long_pool_id) {
1052 pp->pool_short->port_map |= 1 << pp->id;
1053 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1054 pp->pool_short->id);
1055 }
1056
1057 return 0;
1058}
1059
1060/* Update settings of a pool for bigger packets */
1061static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1062{
1063 struct mvneta_bm_pool *bm_pool = pp->pool_long;
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01001064 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001065 int num;
1066
1067 /* Release all buffers from long pool */
1068 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01001069 if (hwbm_pool->buf_num) {
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001070 WARN(1, "cannot free all buffers in pool %d\n",
1071 bm_pool->id);
1072 goto bm_mtu_err;
1073 }
1074
1075 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1076 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01001077 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1078 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001079
1080 /* Fill entire long pool */
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01001081 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
1082 if (num != hwbm_pool->size) {
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001083 WARN(1, "pool %d: %d of %d allocated\n",
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01001084 bm_pool->id, num, hwbm_pool->size);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001085 goto bm_mtu_err;
1086 }
1087 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1088
1089 return;
1090
1091bm_mtu_err:
1092 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1093 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1094
1095 pp->bm_priv = NULL;
1096 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1097 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1098}
1099
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001100/* Start the Ethernet port RX and TX activity */
1101static void mvneta_port_up(struct mvneta_port *pp)
1102{
1103 int queue;
1104 u32 q_map;
1105
1106 /* Enable all initialized TXs. */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001107 q_map = 0;
1108 for (queue = 0; queue < txq_number; queue++) {
1109 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1110 if (txq->descs != NULL)
1111 q_map |= (1 << queue);
1112 }
1113 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1114
1115 /* Enable all initialized RXQs. */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001116 for (queue = 0; queue < rxq_number; queue++) {
1117 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1118
1119 if (rxq->descs != NULL)
1120 q_map |= (1 << queue);
1121 }
1122 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001123}
1124
1125/* Stop the Ethernet port activity */
1126static void mvneta_port_down(struct mvneta_port *pp)
1127{
1128 u32 val;
1129 int count;
1130
1131 /* Stop Rx port activity. Check port Rx activity. */
1132 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1133
1134 /* Issue stop command for active channels only */
1135 if (val != 0)
1136 mvreg_write(pp, MVNETA_RXQ_CMD,
1137 val << MVNETA_RXQ_DISABLE_SHIFT);
1138
1139 /* Wait for all Rx activity to terminate. */
1140 count = 0;
1141 do {
1142 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1143 netdev_warn(pp->dev,
Dmitri Epshtein0838abb32016-03-12 18:44:19 +01001144 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001145 val);
1146 break;
1147 }
1148 mdelay(1);
1149
1150 val = mvreg_read(pp, MVNETA_RXQ_CMD);
Dmitri Epshteina3703fb2016-03-12 18:44:20 +01001151 } while (val & MVNETA_RXQ_ENABLE_MASK);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001152
1153 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001154 * command for active channels only
1155 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001156 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1157
1158 if (val != 0)
1159 mvreg_write(pp, MVNETA_TXQ_CMD,
1160 (val << MVNETA_TXQ_DISABLE_SHIFT));
1161
1162 /* Wait for all Tx activity to terminate. */
1163 count = 0;
1164 do {
1165 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1166 netdev_warn(pp->dev,
1167 "TIMEOUT for TX stopped status=0x%08x\n",
1168 val);
1169 break;
1170 }
1171 mdelay(1);
1172
1173 /* Check TX Command reg that all Txqs are stopped */
1174 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1175
Dmitri Epshteina3703fb2016-03-12 18:44:20 +01001176 } while (val & MVNETA_TXQ_ENABLE_MASK);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001177
1178 /* Double check to verify that TX FIFO is empty */
1179 count = 0;
1180 do {
1181 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1182 netdev_warn(pp->dev,
Dmitri Epshtein0838abb32016-03-12 18:44:19 +01001183 "TX FIFO empty timeout status=0x%08x\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001184 val);
1185 break;
1186 }
1187 mdelay(1);
1188
1189 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1190 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1191 (val & MVNETA_TX_IN_PRGRS));
1192
1193 udelay(200);
1194}
1195
1196/* Enable the port by setting the port enable bit of the MAC control register */
1197static void mvneta_port_enable(struct mvneta_port *pp)
1198{
1199 u32 val;
1200
1201 /* Enable port */
1202 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1203 val |= MVNETA_GMAC0_PORT_ENABLE;
1204 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1205}
1206
1207/* Disable the port and wait for about 200 usec before retuning */
1208static void mvneta_port_disable(struct mvneta_port *pp)
1209{
1210 u32 val;
1211
1212 /* Reset the Enable bit in the Serial Control Register */
1213 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1214 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1215 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1216
1217 udelay(200);
1218}
1219
1220/* Multicast tables methods */
1221
1222/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1223static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1224{
1225 int offset;
1226 u32 val;
1227
1228 if (queue == -1) {
1229 val = 0;
1230 } else {
1231 val = 0x1 | (queue << 1);
1232 val |= (val << 24) | (val << 16) | (val << 8);
1233 }
1234
1235 for (offset = 0; offset <= 0xc; offset += 4)
1236 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1237}
1238
1239/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1240static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1241{
1242 int offset;
1243 u32 val;
1244
1245 if (queue == -1) {
1246 val = 0;
1247 } else {
1248 val = 0x1 | (queue << 1);
1249 val |= (val << 24) | (val << 16) | (val << 8);
1250 }
1251
1252 for (offset = 0; offset <= 0xfc; offset += 4)
1253 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1254
1255}
1256
1257/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1258static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1259{
1260 int offset;
1261 u32 val;
1262
1263 if (queue == -1) {
1264 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1265 val = 0;
1266 } else {
1267 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1268 val = 0x1 | (queue << 1);
1269 val |= (val << 24) | (val << 16) | (val << 8);
1270 }
1271
1272 for (offset = 0; offset <= 0xfc; offset += 4)
1273 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1274}
1275
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001276static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1277{
1278 u32 val;
1279
1280 if (enable) {
1281 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1282 val &= ~(MVNETA_GMAC_FORCE_LINK_PASS |
1283 MVNETA_GMAC_FORCE_LINK_DOWN |
1284 MVNETA_GMAC_AN_FLOW_CTRL_EN);
1285 val |= MVNETA_GMAC_INBAND_AN_ENABLE |
1286 MVNETA_GMAC_AN_SPEED_EN |
1287 MVNETA_GMAC_AN_DUPLEX_EN;
1288 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1289
1290 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1291 val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
1292 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1293
1294 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1295 val |= MVNETA_GMAC2_INBAND_AN_ENABLE;
1296 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1297 } else {
1298 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
1299 val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
1300 MVNETA_GMAC_AN_SPEED_EN |
1301 MVNETA_GMAC_AN_DUPLEX_EN);
1302 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
1303
1304 val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
1305 val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
1306 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
1307
1308 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
1309 val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE;
1310 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
1311 }
1312}
1313
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01001314static void mvneta_percpu_unmask_interrupt(void *arg)
1315{
1316 struct mvneta_port *pp = arg;
1317
1318 /* All the queue are unmasked, but actually only the ones
1319 * mapped to this CPU will be unmasked
1320 */
1321 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1322 MVNETA_RX_INTR_MASK_ALL |
1323 MVNETA_TX_INTR_MASK_ALL |
1324 MVNETA_MISCINTR_INTR_MASK);
1325}
1326
1327static void mvneta_percpu_mask_interrupt(void *arg)
1328{
1329 struct mvneta_port *pp = arg;
1330
1331 /* All the queue are masked, but actually only the ones
1332 * mapped to this CPU will be masked
1333 */
1334 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1335 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1336 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1337}
1338
1339static void mvneta_percpu_clear_intr_cause(void *arg)
1340{
1341 struct mvneta_port *pp = arg;
1342
1343 /* All the queue are cleared, but actually only the ones
1344 * mapped to this CPU will be cleared
1345 */
1346 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1347 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1348 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1349}
1350
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001351/* This method sets defaults to the NETA port:
1352 * Clears interrupt Cause and Mask registers.
1353 * Clears all MAC tables.
1354 * Sets defaults to all registers.
1355 * Resets RX and TX descriptor rings.
1356 * Resets PHY.
1357 * This method can be called after mvneta_port_down() to return the port
1358 * settings to defaults.
1359 */
1360static void mvneta_defaults_set(struct mvneta_port *pp)
1361{
1362 int cpu;
1363 int queue;
1364 u32 val;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001365 int max_cpu = num_present_cpus();
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001366
1367 /* Clear all Cause registers */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01001368 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001369
1370 /* Mask all interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01001371 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001372 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1373
1374 /* Enable MBUS Retry bit16 */
1375 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1376
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001377 /* Set CPU queue access map. CPUs are assigned to the RX and
1378 * TX queues modulo their number. If there is only one TX
1379 * queue then it is assigned to the CPU associated to the
1380 * default RX queue.
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001381 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001382 for_each_present_cpu(cpu) {
1383 int rxq_map = 0, txq_map = 0;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001384 int rxq, txq;
Marcin Wojtas2636ac32016-12-01 18:03:09 +01001385 if (!pp->neta_armada3700) {
1386 for (rxq = 0; rxq < rxq_number; rxq++)
1387 if ((rxq % max_cpu) == cpu)
1388 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001389
Marcin Wojtas2636ac32016-12-01 18:03:09 +01001390 for (txq = 0; txq < txq_number; txq++)
1391 if ((txq % max_cpu) == cpu)
1392 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001393
Marcin Wojtas2636ac32016-12-01 18:03:09 +01001394 /* With only one TX queue we configure a special case
1395 * which will allow to get all the irq on a single
1396 * CPU
1397 */
1398 if (txq_number == 1)
1399 txq_map = (cpu == pp->rxq_def) ?
1400 MVNETA_CPU_TXQ_ACCESS(1) : 0;
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01001401
Marcin Wojtas2636ac32016-12-01 18:03:09 +01001402 } else {
1403 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1404 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1405 }
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01001406
1407 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1408 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001409
1410 /* Reset RX and TX DMAs */
1411 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1412 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1413
1414 /* Disable Legacy WRR, Disable EJP, Release from reset */
1415 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1416 for (queue = 0; queue < txq_number; queue++) {
1417 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1418 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1419 }
1420
1421 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1422 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1423
1424 /* Set Port Acceleration Mode */
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001425 if (pp->bm_priv)
1426 /* HW buffer management + legacy parser */
1427 val = MVNETA_ACC_MODE_EXT2;
1428 else
1429 /* SW buffer management + legacy parser */
1430 val = MVNETA_ACC_MODE_EXT1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001431 mvreg_write(pp, MVNETA_ACC_MODE, val);
1432
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001433 if (pp->bm_priv)
1434 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1435
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001436 /* Update val of portCfg register accordingly with all RxQueue types */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01001437 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001438 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1439
1440 val = 0;
1441 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1442 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1443
1444 /* Build PORT_SDMA_CONFIG_REG */
1445 val = 0;
1446
1447 /* Default burst size */
1448 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1449 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001450 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001451
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +02001452#if defined(__BIG_ENDIAN)
1453 val |= MVNETA_DESC_SWAP;
1454#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001455
1456 /* Assign port SDMA configuration */
1457 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1458
Thomas Petazzoni71408602013-09-04 16:21:18 +02001459 /* Disable PHY polling in hardware, since we're using the
1460 * kernel phylib to do this.
1461 */
1462 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1463 val &= ~MVNETA_PHY_POLLING_ENABLE;
1464 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1465
Stas Sergeev0c0744f2015-12-02 20:35:11 +03001466 mvneta_set_autoneg(pp, pp->use_inband_status);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001467 mvneta_set_ucast_table(pp, -1);
1468 mvneta_set_special_mcast_table(pp, -1);
1469 mvneta_set_other_mcast_table(pp, -1);
1470
1471 /* Set port interrupt enable register - default enable all */
1472 mvreg_write(pp, MVNETA_INTR_ENABLE,
1473 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1474 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
Andrew Lunne4839112015-10-22 18:37:36 +01001475
1476 mvneta_mib_counters_clear(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001477}
1478
1479/* Set max sizes for tx queues */
1480static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1481
1482{
1483 u32 val, size, mtu;
1484 int queue;
1485
1486 mtu = max_tx_size * 8;
1487 if (mtu > MVNETA_TX_MTU_MAX)
1488 mtu = MVNETA_TX_MTU_MAX;
1489
1490 /* Set MTU */
1491 val = mvreg_read(pp, MVNETA_TX_MTU);
1492 val &= ~MVNETA_TX_MTU_MAX;
1493 val |= mtu;
1494 mvreg_write(pp, MVNETA_TX_MTU, val);
1495
1496 /* TX token size and all TXQs token size must be larger that MTU */
1497 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1498
1499 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1500 if (size < mtu) {
1501 size = mtu;
1502 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1503 val |= size;
1504 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1505 }
1506 for (queue = 0; queue < txq_number; queue++) {
1507 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1508
1509 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1510 if (size < mtu) {
1511 size = mtu;
1512 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1513 val |= size;
1514 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1515 }
1516 }
1517}
1518
1519/* Set unicast address */
1520static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1521 int queue)
1522{
1523 unsigned int unicast_reg;
1524 unsigned int tbl_offset;
1525 unsigned int reg_offset;
1526
1527 /* Locate the Unicast table entry */
1528 last_nibble = (0xf & last_nibble);
1529
1530 /* offset from unicast tbl base */
1531 tbl_offset = (last_nibble / 4) * 4;
1532
1533 /* offset within the above reg */
1534 reg_offset = last_nibble % 4;
1535
1536 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1537
1538 if (queue == -1) {
1539 /* Clear accepts frame bit at specified unicast DA tbl entry */
1540 unicast_reg &= ~(0xff << (8 * reg_offset));
1541 } else {
1542 unicast_reg &= ~(0xff << (8 * reg_offset));
1543 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1544 }
1545
1546 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1547}
1548
1549/* Set mac address */
1550static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1551 int queue)
1552{
1553 unsigned int mac_h;
1554 unsigned int mac_l;
1555
1556 if (queue != -1) {
1557 mac_l = (addr[4] << 8) | (addr[5]);
1558 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1559 (addr[2] << 8) | (addr[3] << 0);
1560
1561 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1562 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1563 }
1564
1565 /* Accept frames of this address */
1566 mvneta_set_ucast_addr(pp, addr[5], queue);
1567}
1568
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001569/* Set the number of packets that will be received before RX interrupt
1570 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001571 */
1572static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1573 struct mvneta_rx_queue *rxq, u32 value)
1574{
1575 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1576 value | MVNETA_RXQ_NON_OCCUPIED(0));
1577 rxq->pkts_coal = value;
1578}
1579
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001580/* Set the time delay in usec before RX interrupt will be generated by
1581 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001582 */
1583static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1584 struct mvneta_rx_queue *rxq, u32 value)
1585{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001586 u32 val;
1587 unsigned long clk_rate;
1588
1589 clk_rate = clk_get_rate(pp->clk);
1590 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001591
1592 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1593 rxq->time_coal = value;
1594}
1595
1596/* Set threshold for TX_DONE pkts coalescing */
1597static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1598 struct mvneta_tx_queue *txq, u32 value)
1599{
1600 u32 val;
1601
1602 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1603
1604 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1605 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1606
1607 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1608
1609 txq->done_pkts_coal = value;
1610}
1611
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001612/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1613static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001614 u32 phys_addr, void *virt_addr,
1615 struct mvneta_rx_queue *rxq)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001616{
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001617 int i;
1618
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001619 rx_desc->buf_phys_addr = phys_addr;
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001620 i = rx_desc - rxq->descs;
1621 rxq->buf_virt_addr[i] = virt_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001622}
1623
1624/* Decrement sent descriptors counter */
1625static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1626 struct mvneta_tx_queue *txq,
1627 int sent_desc)
1628{
1629 u32 val;
1630
1631 /* Only 255 TX descriptors can be updated at once */
1632 while (sent_desc > 0xff) {
1633 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1634 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1635 sent_desc = sent_desc - 0xff;
1636 }
1637
1638 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1639 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1640}
1641
1642/* Get number of TX descriptors already sent by HW */
1643static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1644 struct mvneta_tx_queue *txq)
1645{
1646 u32 val;
1647 int sent_desc;
1648
1649 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1650 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1651 MVNETA_TXQ_SENT_DESC_SHIFT;
1652
1653 return sent_desc;
1654}
1655
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001656/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001657 * The number of sent descriptors is returned.
1658 */
1659static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1660 struct mvneta_tx_queue *txq)
1661{
1662 int sent_desc;
1663
1664 /* Get number of sent descriptors */
1665 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1666
1667 /* Decrement sent descriptors counter */
1668 if (sent_desc)
1669 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1670
1671 return sent_desc;
1672}
1673
1674/* Set TXQ descriptors fields relevant for CSUM calculation */
1675static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1676 int ip_hdr_len, int l4_proto)
1677{
1678 u32 command;
1679
1680 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001681 * G_L4_chk, L4_type; required only for checksum
1682 * calculation
1683 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001684 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1685 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1686
Thomas Fitzsimmons0a198582014-07-08 19:44:07 -04001687 if (l3_proto == htons(ETH_P_IP))
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001688 command |= MVNETA_TXD_IP_CSUM;
1689 else
1690 command |= MVNETA_TX_L3_IP6;
1691
1692 if (l4_proto == IPPROTO_TCP)
1693 command |= MVNETA_TX_L4_CSUM_FULL;
1694 else if (l4_proto == IPPROTO_UDP)
1695 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1696 else
1697 command |= MVNETA_TX_L4_CSUM_NOT;
1698
1699 return command;
1700}
1701
1702
1703/* Display more error info */
1704static void mvneta_rx_error(struct mvneta_port *pp,
1705 struct mvneta_rx_desc *rx_desc)
1706{
1707 u32 status = rx_desc->status;
1708
willy tarreau54282132014-01-16 08:20:14 +01001709 if (!mvneta_rxq_desc_is_first_last(status)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001710 netdev_err(pp->dev,
1711 "bad rx status %08x (buffer oversize), size=%d\n",
willy tarreau54282132014-01-16 08:20:14 +01001712 status, rx_desc->data_size);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001713 return;
1714 }
1715
1716 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1717 case MVNETA_RXD_ERR_CRC:
1718 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1719 status, rx_desc->data_size);
1720 break;
1721 case MVNETA_RXD_ERR_OVERRUN:
1722 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1723 status, rx_desc->data_size);
1724 break;
1725 case MVNETA_RXD_ERR_LEN:
1726 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1727 status, rx_desc->data_size);
1728 break;
1729 case MVNETA_RXD_ERR_RESOURCE:
1730 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1731 status, rx_desc->data_size);
1732 break;
1733 }
1734}
1735
willy tarreau54282132014-01-16 08:20:14 +01001736/* Handle RX checksum offload based on the descriptor's status */
1737static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001738 struct sk_buff *skb)
1739{
willy tarreau54282132014-01-16 08:20:14 +01001740 if ((status & MVNETA_RXD_L3_IP4) &&
1741 (status & MVNETA_RXD_L4_CSUM_OK)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001742 skb->csum = 0;
1743 skb->ip_summed = CHECKSUM_UNNECESSARY;
1744 return;
1745 }
1746
1747 skb->ip_summed = CHECKSUM_NONE;
1748}
1749
willy tarreau6c498972014-01-16 08:20:12 +01001750/* Return tx queue pointer (find last set bit) according to <cause> returned
1751 * form tx_done reg. <cause> must not be null. The return value is always a
1752 * valid queue for matching the first one found in <cause>.
1753 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001754static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1755 u32 cause)
1756{
1757 int queue = fls(cause) - 1;
1758
willy tarreau6c498972014-01-16 08:20:12 +01001759 return &pp->txqs[queue];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001760}
1761
1762/* Free tx queue skbuffs */
1763static void mvneta_txq_bufs_free(struct mvneta_port *pp,
Marcin Wojtasa29b6232017-01-16 18:08:32 +01001764 struct mvneta_tx_queue *txq, int num,
1765 struct netdev_queue *nq)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001766{
Marcin Wojtasa29b6232017-01-16 18:08:32 +01001767 unsigned int bytes_compl = 0, pkts_compl = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001768 int i;
1769
1770 for (i = 0; i < num; i++) {
1771 struct mvneta_tx_desc *tx_desc = txq->descs +
1772 txq->txq_get_index;
1773 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1774
Marcin Wojtasa29b6232017-01-16 18:08:32 +01001775 if (skb) {
1776 bytes_compl += skb->len;
1777 pkts_compl++;
1778 }
1779
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001780 mvneta_txq_inc_get(txq);
1781
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001782 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1783 dma_unmap_single(pp->dev->dev.parent,
1784 tx_desc->buf_phys_addr,
1785 tx_desc->data_size, DMA_TO_DEVICE);
Ezequiel Garciaba7e46e2014-05-30 13:40:06 -03001786 if (!skb)
1787 continue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001788 dev_kfree_skb_any(skb);
1789 }
Marcin Wojtasa29b6232017-01-16 18:08:32 +01001790
1791 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001792}
1793
1794/* Handle end of transmission */
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001795static void mvneta_txq_done(struct mvneta_port *pp,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001796 struct mvneta_tx_queue *txq)
1797{
1798 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1799 int tx_done;
1800
1801 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001802 if (!tx_done)
1803 return;
1804
Marcin Wojtasa29b6232017-01-16 18:08:32 +01001805 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001806
1807 txq->count -= tx_done;
1808
1809 if (netif_tx_queue_stopped(nq)) {
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001810 if (txq->count <= txq->tx_wake_threshold)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001811 netif_tx_wake_queue(nq);
1812 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001813}
1814
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001815void *mvneta_frag_alloc(unsigned int frag_size)
willy tarreau8ec2cd42014-01-16 08:20:16 +01001816{
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001817 if (likely(frag_size <= PAGE_SIZE))
1818 return netdev_alloc_frag(frag_size);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001819 else
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001820 return kmalloc(frag_size, GFP_ATOMIC);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001821}
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001822EXPORT_SYMBOL_GPL(mvneta_frag_alloc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001823
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001824void mvneta_frag_free(unsigned int frag_size, void *data)
willy tarreau8ec2cd42014-01-16 08:20:16 +01001825{
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001826 if (likely(frag_size <= PAGE_SIZE))
Alexander Duyck13dc0d22015-05-06 21:12:14 -07001827 skb_free_frag(data);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001828 else
1829 kfree(data);
1830}
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001831EXPORT_SYMBOL_GPL(mvneta_frag_free);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001832
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001833/* Refill processing for SW buffer management */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001834static int mvneta_rx_refill(struct mvneta_port *pp,
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001835 struct mvneta_rx_desc *rx_desc,
1836 struct mvneta_rx_queue *rxq)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001837
1838{
1839 dma_addr_t phys_addr;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001840 void *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001841
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001842 data = mvneta_frag_alloc(pp->frag_size);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001843 if (!data)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001844 return -ENOMEM;
1845
willy tarreau8ec2cd42014-01-16 08:20:16 +01001846 phys_addr = dma_map_single(pp->dev->dev.parent, data,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001847 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1848 DMA_FROM_DEVICE);
1849 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001850 mvneta_frag_free(pp->frag_size, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001851 return -ENOMEM;
1852 }
1853
Marcin Wojtas8d5047c2016-12-01 18:03:07 +01001854 phys_addr += pp->rx_offset_correction;
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001855 mvneta_rx_desc_fill(rx_desc, phys_addr, data, rxq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001856 return 0;
1857}
1858
1859/* Handle tx checksum */
1860static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1861{
1862 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1863 int ip_hdr_len = 0;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001864 __be16 l3_proto = vlan_get_protocol(skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001865 u8 l4_proto;
1866
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001867 if (l3_proto == htons(ETH_P_IP)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001868 struct iphdr *ip4h = ip_hdr(skb);
1869
1870 /* Calculate IPv4 checksum and L4 checksum */
1871 ip_hdr_len = ip4h->ihl;
1872 l4_proto = ip4h->protocol;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001873 } else if (l3_proto == htons(ETH_P_IPV6)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001874 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1875
1876 /* Read l4_protocol from one of IPv6 extra headers */
1877 if (skb_network_header_len(skb) > 0)
1878 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1879 l4_proto = ip6h->nexthdr;
1880 } else
1881 return MVNETA_TX_L4_CSUM_NOT;
1882
1883 return mvneta_txq_desc_csum(skb_network_offset(skb),
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001884 l3_proto, ip_hdr_len, l4_proto);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001885 }
1886
1887 return MVNETA_TX_L4_CSUM_NOT;
1888}
1889
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001890/* Drop packets received by the RXQ and free buffers */
1891static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1892 struct mvneta_rx_queue *rxq)
1893{
1894 int rx_done, i;
1895
1896 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001897 if (rx_done)
1898 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1899
1900 if (pp->bm_priv) {
1901 for (i = 0; i < rx_done; i++) {
1902 struct mvneta_rx_desc *rx_desc =
1903 mvneta_rxq_next_desc_get(rxq);
1904 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1905 struct mvneta_bm_pool *bm_pool;
1906
1907 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1908 /* Return dropped buffer to the pool */
1909 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1910 rx_desc->buf_phys_addr);
1911 }
1912 return;
1913 }
1914
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001915 for (i = 0; i < rxq->size; i++) {
1916 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001917 void *data = rxq->buf_virt_addr[i];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001918
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001919 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001920 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001921 mvneta_frag_free(pp->frag_size, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001922 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001923}
1924
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001925/* Main rx processing when using software buffer management */
1926static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
1927 struct mvneta_rx_queue *rxq)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001928{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001929 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001930 struct net_device *dev = pp->dev;
Simon Guinota84e3282015-07-19 13:00:53 +02001931 int rx_done;
willy tarreaudc4277d2014-01-16 08:20:07 +01001932 u32 rcvd_pkts = 0;
1933 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001934
1935 /* Get number of received packets */
1936 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1937
1938 if (rx_todo > rx_done)
1939 rx_todo = rx_done;
1940
1941 rx_done = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001942
1943 /* Fairness NAPI loop */
1944 while (rx_done < rx_todo) {
1945 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1946 struct sk_buff *skb;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001947 unsigned char *data;
Simon Guinotdaf158d2015-09-15 22:41:21 +02001948 dma_addr_t phys_addr;
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001949 u32 rx_status, frag_size;
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001950 int rx_bytes, err, index;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001951
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001952 rx_done++;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001953 rx_status = rx_desc->status;
willy tarreauf19fadf2014-01-16 08:20:17 +01001954 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001955 index = rx_desc - rxq->descs;
1956 data = rxq->buf_virt_addr[index];
Simon Guinotdaf158d2015-09-15 22:41:21 +02001957 phys_addr = rx_desc->buf_phys_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001958
willy tarreau54282132014-01-16 08:20:14 +01001959 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
willy tarreauf19fadf2014-01-16 08:20:17 +01001960 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001961err_drop_frame:
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001962 dev->stats.rx_errors++;
1963 mvneta_rx_error(pp, rx_desc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001964 /* leave the descriptor untouched */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001965 continue;
1966 }
1967
willy tarreauf19fadf2014-01-16 08:20:17 +01001968 if (rx_bytes <= rx_copybreak) {
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001969 /* better copy a small frame and not unmap the DMA region */
willy tarreauf19fadf2014-01-16 08:20:17 +01001970 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1971 if (unlikely(!skb))
1972 goto err_drop_frame;
1973
1974 dma_sync_single_range_for_cpu(dev->dev.parent,
Gregory CLEMENTac83b7d2016-12-01 18:03:04 +01001975 phys_addr,
Marcin Wojtasdc35a102016-03-14 09:39:03 +01001976 MVNETA_MH_SIZE + NET_SKB_PAD,
1977 rx_bytes,
1978 DMA_FROM_DEVICE);
willy tarreauf19fadf2014-01-16 08:20:17 +01001979 memcpy(skb_put(skb, rx_bytes),
1980 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1981 rx_bytes);
1982
1983 skb->protocol = eth_type_trans(skb, dev);
1984 mvneta_rx_csum(pp, rx_status, skb);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02001985 napi_gro_receive(&port->napi, skb);
willy tarreauf19fadf2014-01-16 08:20:17 +01001986
1987 rcvd_pkts++;
1988 rcvd_bytes += rx_bytes;
1989
1990 /* leave the descriptor and buffer untouched */
1991 continue;
1992 }
1993
Simon Guinota84e3282015-07-19 13:00:53 +02001994 /* Refill processing */
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01001995 err = mvneta_rx_refill(pp, rx_desc, rxq);
Simon Guinota84e3282015-07-19 13:00:53 +02001996 if (err) {
1997 netdev_err(dev, "Linux processing - Can't refill\n");
1998 rxq->missed++;
1999 goto err_drop_frame;
2000 }
2001
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002002 frag_size = pp->frag_size;
2003
2004 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
willy tarreauf19fadf2014-01-16 08:20:17 +01002005
Marcin Wojtas26c17a172015-11-30 13:27:44 +01002006 /* After refill old buffer has to be unmapped regardless
2007 * the skb is successfully built or not.
2008 */
Simon Guinotdaf158d2015-09-15 22:41:21 +02002009 dma_unmap_single(dev->dev.parent, phys_addr,
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002010 MVNETA_RX_BUF_SIZE(pp->pkt_size),
2011 DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002012
Marcin Wojtas26c17a172015-11-30 13:27:44 +01002013 if (!skb)
2014 goto err_drop_frame;
2015
willy tarreaudc4277d2014-01-16 08:20:07 +01002016 rcvd_pkts++;
2017 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002018
2019 /* Linux processing */
willy tarreau8ec2cd42014-01-16 08:20:16 +01002020 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002021 skb_put(skb, rx_bytes);
2022
2023 skb->protocol = eth_type_trans(skb, dev);
2024
willy tarreau54282132014-01-16 08:20:14 +01002025 mvneta_rx_csum(pp, rx_status, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002026
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002027 napi_gro_receive(&port->napi, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002028 }
2029
willy tarreaudc4277d2014-01-16 08:20:07 +01002030 if (rcvd_pkts) {
willy tarreau74c41b02014-01-16 08:20:08 +01002031 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2032
2033 u64_stats_update_begin(&stats->syncp);
2034 stats->rx_packets += rcvd_pkts;
2035 stats->rx_bytes += rcvd_bytes;
2036 u64_stats_update_end(&stats->syncp);
willy tarreaudc4277d2014-01-16 08:20:07 +01002037 }
2038
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002039 /* Update rxq management counters */
Simon Guinota84e3282015-07-19 13:00:53 +02002040 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002041
2042 return rx_done;
2043}
2044
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002045/* Main rx processing when using hardware buffer management */
2046static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
2047 struct mvneta_rx_queue *rxq)
2048{
2049 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
2050 struct net_device *dev = pp->dev;
2051 int rx_done;
2052 u32 rcvd_pkts = 0;
2053 u32 rcvd_bytes = 0;
2054
2055 /* Get number of received packets */
2056 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2057
2058 if (rx_todo > rx_done)
2059 rx_todo = rx_done;
2060
2061 rx_done = 0;
2062
2063 /* Fairness NAPI loop */
2064 while (rx_done < rx_todo) {
2065 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2066 struct mvneta_bm_pool *bm_pool = NULL;
2067 struct sk_buff *skb;
2068 unsigned char *data;
2069 dma_addr_t phys_addr;
2070 u32 rx_status, frag_size;
2071 int rx_bytes, err;
2072 u8 pool_id;
2073
2074 rx_done++;
2075 rx_status = rx_desc->status;
2076 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01002077 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002078 phys_addr = rx_desc->buf_phys_addr;
2079 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2080 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2081
2082 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2083 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2084err_drop_frame_ret_pool:
2085 /* Return the buffer to the pool */
2086 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2087 rx_desc->buf_phys_addr);
2088err_drop_frame:
2089 dev->stats.rx_errors++;
2090 mvneta_rx_error(pp, rx_desc);
2091 /* leave the descriptor untouched */
2092 continue;
2093 }
2094
2095 if (rx_bytes <= rx_copybreak) {
2096 /* better copy a small frame and not unmap the DMA region */
2097 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2098 if (unlikely(!skb))
2099 goto err_drop_frame_ret_pool;
2100
2101 dma_sync_single_range_for_cpu(dev->dev.parent,
2102 rx_desc->buf_phys_addr,
2103 MVNETA_MH_SIZE + NET_SKB_PAD,
2104 rx_bytes,
2105 DMA_FROM_DEVICE);
2106 memcpy(skb_put(skb, rx_bytes),
2107 data + MVNETA_MH_SIZE + NET_SKB_PAD,
2108 rx_bytes);
2109
2110 skb->protocol = eth_type_trans(skb, dev);
2111 mvneta_rx_csum(pp, rx_status, skb);
2112 napi_gro_receive(&port->napi, skb);
2113
2114 rcvd_pkts++;
2115 rcvd_bytes += rx_bytes;
2116
2117 /* Return the buffer to the pool */
2118 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2119 rx_desc->buf_phys_addr);
2120
2121 /* leave the descriptor and buffer untouched */
2122 continue;
2123 }
2124
2125 /* Refill processing */
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01002126 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002127 if (err) {
2128 netdev_err(dev, "Linux processing - Can't refill\n");
2129 rxq->missed++;
2130 goto err_drop_frame_ret_pool;
2131 }
2132
Gregory CLEMENTbaa11eb2016-03-14 09:39:05 +01002133 frag_size = bm_pool->hwbm_pool.frag_size;
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002134
2135 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2136
2137 /* After refill old buffer has to be unmapped regardless
2138 * the skb is successfully built or not.
2139 */
2140 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2141 bm_pool->buf_size, DMA_FROM_DEVICE);
2142 if (!skb)
2143 goto err_drop_frame;
2144
2145 rcvd_pkts++;
2146 rcvd_bytes += rx_bytes;
2147
2148 /* Linux processing */
2149 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2150 skb_put(skb, rx_bytes);
2151
2152 skb->protocol = eth_type_trans(skb, dev);
2153
2154 mvneta_rx_csum(pp, rx_status, skb);
2155
2156 napi_gro_receive(&port->napi, skb);
2157 }
2158
2159 if (rcvd_pkts) {
2160 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2161
2162 u64_stats_update_begin(&stats->syncp);
2163 stats->rx_packets += rcvd_pkts;
2164 stats->rx_bytes += rcvd_bytes;
2165 u64_stats_update_end(&stats->syncp);
2166 }
2167
2168 /* Update rxq management counters */
2169 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2170
2171 return rx_done;
2172}
2173
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002174static inline void
2175mvneta_tso_put_hdr(struct sk_buff *skb,
2176 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2177{
2178 struct mvneta_tx_desc *tx_desc;
2179 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2180
2181 txq->tx_skb[txq->txq_put_index] = NULL;
2182 tx_desc = mvneta_txq_next_desc_get(txq);
2183 tx_desc->data_size = hdr_len;
2184 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2185 tx_desc->command |= MVNETA_TXD_F_DESC;
2186 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2187 txq->txq_put_index * TSO_HEADER_SIZE;
2188 mvneta_txq_inc_put(txq);
2189}
2190
2191static inline int
2192mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2193 struct sk_buff *skb, char *data, int size,
2194 bool last_tcp, bool is_last)
2195{
2196 struct mvneta_tx_desc *tx_desc;
2197
2198 tx_desc = mvneta_txq_next_desc_get(txq);
2199 tx_desc->data_size = size;
2200 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2201 size, DMA_TO_DEVICE);
2202 if (unlikely(dma_mapping_error(dev->dev.parent,
2203 tx_desc->buf_phys_addr))) {
2204 mvneta_txq_desc_put(txq);
2205 return -ENOMEM;
2206 }
2207
2208 tx_desc->command = 0;
2209 txq->tx_skb[txq->txq_put_index] = NULL;
2210
2211 if (last_tcp) {
2212 /* last descriptor in the TCP packet */
2213 tx_desc->command = MVNETA_TXD_L_DESC;
2214
2215 /* last descriptor in SKB */
2216 if (is_last)
2217 txq->tx_skb[txq->txq_put_index] = skb;
2218 }
2219 mvneta_txq_inc_put(txq);
2220 return 0;
2221}
2222
2223static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2224 struct mvneta_tx_queue *txq)
2225{
2226 int total_len, data_left;
2227 int desc_count = 0;
2228 struct mvneta_port *pp = netdev_priv(dev);
2229 struct tso_t tso;
2230 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2231 int i;
2232
2233 /* Count needed descriptors */
2234 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2235 return 0;
2236
2237 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2238 pr_info("*** Is this even possible???!?!?\n");
2239 return 0;
2240 }
2241
2242 /* Initialize the TSO handler, and prepare the first payload */
2243 tso_start(skb, &tso);
2244
2245 total_len = skb->len - hdr_len;
2246 while (total_len > 0) {
2247 char *hdr;
2248
2249 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2250 total_len -= data_left;
2251 desc_count++;
2252
2253 /* prepare packet headers: MAC + IP + TCP */
2254 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2255 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2256
2257 mvneta_tso_put_hdr(skb, pp, txq);
2258
2259 while (data_left > 0) {
2260 int size;
2261 desc_count++;
2262
2263 size = min_t(int, tso.size, data_left);
2264
2265 if (mvneta_tso_put_data(dev, txq, skb,
2266 tso.data, size,
2267 size == data_left,
2268 total_len == 0))
2269 goto err_release;
2270 data_left -= size;
2271
2272 tso_build_data(skb, &tso, size);
2273 }
2274 }
2275
2276 return desc_count;
2277
2278err_release:
2279 /* Release all used data descriptors; header descriptors must not
2280 * be DMA-unmapped.
2281 */
2282 for (i = desc_count - 1; i >= 0; i--) {
2283 struct mvneta_tx_desc *tx_desc = txq->descs + i;
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03002284 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002285 dma_unmap_single(pp->dev->dev.parent,
2286 tx_desc->buf_phys_addr,
2287 tx_desc->data_size,
2288 DMA_TO_DEVICE);
2289 mvneta_txq_desc_put(txq);
2290 }
2291 return 0;
2292}
2293
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002294/* Handle tx fragmentation processing */
2295static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2296 struct mvneta_tx_queue *txq)
2297{
2298 struct mvneta_tx_desc *tx_desc;
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03002299 int i, nr_frags = skb_shinfo(skb)->nr_frags;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002300
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03002301 for (i = 0; i < nr_frags; i++) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002302 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2303 void *addr = page_address(frag->page.p) + frag->page_offset;
2304
2305 tx_desc = mvneta_txq_next_desc_get(txq);
2306 tx_desc->data_size = frag->size;
2307
2308 tx_desc->buf_phys_addr =
2309 dma_map_single(pp->dev->dev.parent, addr,
2310 tx_desc->data_size, DMA_TO_DEVICE);
2311
2312 if (dma_mapping_error(pp->dev->dev.parent,
2313 tx_desc->buf_phys_addr)) {
2314 mvneta_txq_desc_put(txq);
2315 goto error;
2316 }
2317
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03002318 if (i == nr_frags - 1) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002319 /* Last descriptor */
2320 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002321 txq->tx_skb[txq->txq_put_index] = skb;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002322 } else {
2323 /* Descriptor in the middle: Not First, Not Last */
2324 tx_desc->command = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002325 txq->tx_skb[txq->txq_put_index] = NULL;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002326 }
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03002327 mvneta_txq_inc_put(txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002328 }
2329
2330 return 0;
2331
2332error:
2333 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002334 * this packet, as well as the corresponding DMA mappings
2335 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002336 for (i = i - 1; i >= 0; i--) {
2337 tx_desc = txq->descs + i;
2338 dma_unmap_single(pp->dev->dev.parent,
2339 tx_desc->buf_phys_addr,
2340 tx_desc->data_size,
2341 DMA_TO_DEVICE);
2342 mvneta_txq_desc_put(txq);
2343 }
2344
2345 return -ENOMEM;
2346}
2347
2348/* Main tx processing */
2349static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2350{
2351 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02002352 u16 txq_id = skb_get_queue_mapping(skb);
2353 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002354 struct mvneta_tx_desc *tx_desc;
Eric Dumazet5f478b42014-12-02 04:30:59 -08002355 int len = skb->len;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002356 int frags = 0;
2357 u32 tx_cmd;
2358
2359 if (!netif_running(dev))
2360 goto out;
2361
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002362 if (skb_is_gso(skb)) {
2363 frags = mvneta_tx_tso(skb, dev, txq);
2364 goto out;
2365 }
2366
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002367 frags = skb_shinfo(skb)->nr_frags + 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002368
2369 /* Get a descriptor for the first part of the packet */
2370 tx_desc = mvneta_txq_next_desc_get(txq);
2371
2372 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2373
2374 tx_desc->data_size = skb_headlen(skb);
2375
2376 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2377 tx_desc->data_size,
2378 DMA_TO_DEVICE);
2379 if (unlikely(dma_mapping_error(dev->dev.parent,
2380 tx_desc->buf_phys_addr))) {
2381 mvneta_txq_desc_put(txq);
2382 frags = 0;
2383 goto out;
2384 }
2385
2386 if (frags == 1) {
2387 /* First and Last descriptor */
2388 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2389 tx_desc->command = tx_cmd;
2390 txq->tx_skb[txq->txq_put_index] = skb;
2391 mvneta_txq_inc_put(txq);
2392 } else {
2393 /* First but not Last */
2394 tx_cmd |= MVNETA_TXD_F_DESC;
2395 txq->tx_skb[txq->txq_put_index] = NULL;
2396 mvneta_txq_inc_put(txq);
2397 tx_desc->command = tx_cmd;
2398 /* Continue with other skb fragments */
2399 if (mvneta_tx_frag_process(pp, skb, txq)) {
2400 dma_unmap_single(dev->dev.parent,
2401 tx_desc->buf_phys_addr,
2402 tx_desc->data_size,
2403 DMA_TO_DEVICE);
2404 mvneta_txq_desc_put(txq);
2405 frags = 0;
2406 goto out;
2407 }
2408 }
2409
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002410out:
2411 if (frags > 0) {
willy tarreau74c41b02014-01-16 08:20:08 +01002412 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03002413 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2414
Marcin Wojtasa29b6232017-01-16 18:08:32 +01002415 netdev_tx_sent_queue(nq, len);
2416
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03002417 txq->count += frags;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002418 if (txq->count >= txq->tx_stop_threshold)
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03002419 netif_tx_stop_queue(nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002420
Simon Guinot2a90f7e2017-01-16 18:08:31 +01002421 if (!skb->xmit_more || netif_xmit_stopped(nq) ||
2422 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2423 mvneta_txq_pend_desc_add(pp, txq, frags);
2424 else
2425 txq->pending += frags;
2426
willy tarreau74c41b02014-01-16 08:20:08 +01002427 u64_stats_update_begin(&stats->syncp);
2428 stats->tx_packets++;
Eric Dumazet5f478b42014-12-02 04:30:59 -08002429 stats->tx_bytes += len;
willy tarreau74c41b02014-01-16 08:20:08 +01002430 u64_stats_update_end(&stats->syncp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002431 } else {
2432 dev->stats.tx_dropped++;
2433 dev_kfree_skb_any(skb);
2434 }
2435
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002436 return NETDEV_TX_OK;
2437}
2438
2439
2440/* Free tx resources, when resetting a port */
2441static void mvneta_txq_done_force(struct mvneta_port *pp,
2442 struct mvneta_tx_queue *txq)
2443
2444{
Marcin Wojtasa29b6232017-01-16 18:08:32 +01002445 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002446 int tx_done = txq->count;
2447
Marcin Wojtasa29b6232017-01-16 18:08:32 +01002448 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002449
2450 /* reset txq */
2451 txq->count = 0;
2452 txq->txq_put_index = 0;
2453 txq->txq_get_index = 0;
2454}
2455
willy tarreau6c498972014-01-16 08:20:12 +01002456/* Handle tx done - called in softirq context. The <cause_tx_done> argument
2457 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2458 */
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002459static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002460{
2461 struct mvneta_tx_queue *txq;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002462 struct netdev_queue *nq;
2463
willy tarreau6c498972014-01-16 08:20:12 +01002464 while (cause_tx_done) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002465 txq = mvneta_tx_done_policy(pp, cause_tx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002466
2467 nq = netdev_get_tx_queue(pp->dev, txq->id);
2468 __netif_tx_lock(nq, smp_processor_id());
2469
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002470 if (txq->count)
2471 mvneta_txq_done(pp, txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002472
2473 __netif_tx_unlock(nq);
2474 cause_tx_done &= ~((1 << txq->id));
2475 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002476}
2477
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002478/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002479 * according to hw spec, different than generic crc8 algorithm
2480 */
2481static int mvneta_addr_crc(unsigned char *addr)
2482{
2483 int crc = 0;
2484 int i;
2485
2486 for (i = 0; i < ETH_ALEN; i++) {
2487 int j;
2488
2489 crc = (crc ^ addr[i]) << 8;
2490 for (j = 7; j >= 0; j--) {
2491 if (crc & (0x100 << j))
2492 crc ^= 0x107 << j;
2493 }
2494 }
2495
2496 return crc;
2497}
2498
2499/* This method controls the net device special MAC multicast support.
2500 * The Special Multicast Table for MAC addresses supports MAC of the form
2501 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2502 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2503 * Table entries in the DA-Filter table. This method set the Special
2504 * Multicast Table appropriate entry.
2505 */
2506static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2507 unsigned char last_byte,
2508 int queue)
2509{
2510 unsigned int smc_table_reg;
2511 unsigned int tbl_offset;
2512 unsigned int reg_offset;
2513
2514 /* Register offset from SMC table base */
2515 tbl_offset = (last_byte / 4);
2516 /* Entry offset within the above reg */
2517 reg_offset = last_byte % 4;
2518
2519 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2520 + tbl_offset * 4));
2521
2522 if (queue == -1)
2523 smc_table_reg &= ~(0xff << (8 * reg_offset));
2524 else {
2525 smc_table_reg &= ~(0xff << (8 * reg_offset));
2526 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2527 }
2528
2529 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2530 smc_table_reg);
2531}
2532
2533/* This method controls the network device Other MAC multicast support.
2534 * The Other Multicast Table is used for multicast of another type.
2535 * A CRC-8 is used as an index to the Other Multicast Table entries
2536 * in the DA-Filter table.
2537 * The method gets the CRC-8 value from the calling routine and
2538 * sets the Other Multicast Table appropriate entry according to the
2539 * specified CRC-8 .
2540 */
2541static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2542 unsigned char crc8,
2543 int queue)
2544{
2545 unsigned int omc_table_reg;
2546 unsigned int tbl_offset;
2547 unsigned int reg_offset;
2548
2549 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2550 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2551
2552 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2553
2554 if (queue == -1) {
2555 /* Clear accepts frame bit at specified Other DA table entry */
2556 omc_table_reg &= ~(0xff << (8 * reg_offset));
2557 } else {
2558 omc_table_reg &= ~(0xff << (8 * reg_offset));
2559 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2560 }
2561
2562 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2563}
2564
2565/* The network device supports multicast using two tables:
2566 * 1) Special Multicast Table for MAC addresses of the form
2567 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2568 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2569 * Table entries in the DA-Filter table.
2570 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2571 * is used as an index to the Other Multicast Table entries in the
2572 * DA-Filter table.
2573 */
2574static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2575 int queue)
2576{
2577 unsigned char crc_result = 0;
2578
2579 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2580 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2581 return 0;
2582 }
2583
2584 crc_result = mvneta_addr_crc(p_addr);
2585 if (queue == -1) {
2586 if (pp->mcast_count[crc_result] == 0) {
2587 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2588 crc_result);
2589 return -EINVAL;
2590 }
2591
2592 pp->mcast_count[crc_result]--;
2593 if (pp->mcast_count[crc_result] != 0) {
2594 netdev_info(pp->dev,
2595 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2596 pp->mcast_count[crc_result], crc_result);
2597 return -EINVAL;
2598 }
2599 } else
2600 pp->mcast_count[crc_result]++;
2601
2602 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2603
2604 return 0;
2605}
2606
2607/* Configure Fitering mode of Ethernet port */
2608static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2609 int is_promisc)
2610{
2611 u32 port_cfg_reg, val;
2612
2613 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2614
2615 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2616
2617 /* Set / Clear UPM bit in port configuration register */
2618 if (is_promisc) {
2619 /* Accept all Unicast addresses */
2620 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2621 val |= MVNETA_FORCE_UNI;
2622 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2623 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2624 } else {
2625 /* Reject all Unicast addresses */
2626 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2627 val &= ~MVNETA_FORCE_UNI;
2628 }
2629
2630 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
2631 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
2632}
2633
2634/* register unicast and multicast addresses */
2635static void mvneta_set_rx_mode(struct net_device *dev)
2636{
2637 struct mvneta_port *pp = netdev_priv(dev);
2638 struct netdev_hw_addr *ha;
2639
2640 if (dev->flags & IFF_PROMISC) {
2641 /* Accept all: Multicast + Unicast */
2642 mvneta_rx_unicast_promisc_set(pp, 1);
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002643 mvneta_set_ucast_table(pp, pp->rxq_def);
2644 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2645 mvneta_set_other_mcast_table(pp, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002646 } else {
2647 /* Accept single Unicast */
2648 mvneta_rx_unicast_promisc_set(pp, 0);
2649 mvneta_set_ucast_table(pp, -1);
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002650 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002651
2652 if (dev->flags & IFF_ALLMULTI) {
2653 /* Accept all multicast */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002654 mvneta_set_special_mcast_table(pp, pp->rxq_def);
2655 mvneta_set_other_mcast_table(pp, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002656 } else {
2657 /* Accept only initialized multicast */
2658 mvneta_set_special_mcast_table(pp, -1);
2659 mvneta_set_other_mcast_table(pp, -1);
2660
2661 if (!netdev_mc_empty(dev)) {
2662 netdev_for_each_mc_addr(ha, dev) {
2663 mvneta_mcast_addr_set(pp, ha->addr,
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01002664 pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002665 }
2666 }
2667 }
2668 }
2669}
2670
2671/* Interrupt handling - the callback for request_irq() */
2672static irqreturn_t mvneta_isr(int irq, void *dev_id)
2673{
Marcin Wojtas2636ac32016-12-01 18:03:09 +01002674 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2675
2676 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2677 napi_schedule(&pp->napi);
2678
2679 return IRQ_HANDLED;
2680}
2681
2682/* Interrupt handling - the callback for request_percpu_irq() */
2683static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
2684{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002685 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002686
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002687 disable_percpu_irq(port->pp->dev->irq);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002688 napi_schedule(&port->napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002689
2690 return IRQ_HANDLED;
2691}
2692
Stas Sergeev898b29702015-04-01 20:32:49 +03002693static int mvneta_fixed_link_update(struct mvneta_port *pp,
2694 struct phy_device *phy)
2695{
2696 struct fixed_phy_status status;
2697 struct fixed_phy_status changed = {};
2698 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
2699
2700 status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
2701 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
2702 status.speed = SPEED_1000;
2703 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
2704 status.speed = SPEED_100;
2705 else
2706 status.speed = SPEED_10;
2707 status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
2708 changed.link = 1;
2709 changed.speed = 1;
2710 changed.duplex = 1;
2711 fixed_phy_update_state(phy, &status, &changed);
2712 return 0;
2713}
2714
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002715/* NAPI handler
2716 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2717 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2718 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2719 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2720 * Each CPU has its own causeRxTx register
2721 */
2722static int mvneta_poll(struct napi_struct *napi, int budget)
2723{
2724 int rx_done = 0;
2725 u32 cause_rx_tx;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002726 int rx_queue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002727 struct mvneta_port *pp = netdev_priv(napi->dev);
Philippe Reynesc6c022e2016-07-30 17:42:11 +02002728 struct net_device *ndev = pp->dev;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02002729 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002730
2731 if (!netif_running(pp->dev)) {
Marcin Wojtas2636ac32016-12-01 18:03:09 +01002732 napi_complete(napi);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002733 return rx_done;
2734 }
2735
2736 /* Read cause register */
Stas Sergeev898b29702015-04-01 20:32:49 +03002737 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
2738 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
2739 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
2740
2741 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2742 if (pp->use_inband_status && (cause_misc &
2743 (MVNETA_CAUSE_PHY_STATUS_CHANGE |
2744 MVNETA_CAUSE_LINK_CHANGE |
2745 MVNETA_CAUSE_PSC_SYNC_CHANGE))) {
Philippe Reynesc6c022e2016-07-30 17:42:11 +02002746 mvneta_fixed_link_update(pp, ndev->phydev);
Stas Sergeev898b29702015-04-01 20:32:49 +03002747 }
2748 }
willy tarreau71f6d1b2014-01-16 08:20:11 +01002749
2750 /* Release Tx descriptors */
2751 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002752 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
willy tarreau71f6d1b2014-01-16 08:20:11 +01002753 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2754 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002755
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002756 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002757 * RX packets
2758 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002759 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2760
Marcin Wojtas2636ac32016-12-01 18:03:09 +01002761 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2762 port->cause_rx_tx;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002763
2764 if (rx_queue) {
2765 rx_queue = rx_queue - 1;
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002766 if (pp->bm_priv)
2767 rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
2768 else
2769 rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01002770 }
2771
Eric Dumazet6ad20162017-01-30 08:22:01 -08002772 if (rx_done < budget) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002773 cause_rx_tx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08002774 napi_complete_done(napi, rx_done);
Marcin Wojtas2636ac32016-12-01 18:03:09 +01002775
2776 if (pp->neta_armada3700) {
2777 unsigned long flags;
2778
2779 local_irq_save(flags);
2780 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2781 MVNETA_RX_INTR_MASK(rxq_number) |
2782 MVNETA_TX_INTR_MASK(txq_number) |
2783 MVNETA_MISCINTR_INTR_MASK);
2784 local_irq_restore(flags);
2785 } else {
2786 enable_percpu_irq(pp->dev->irq, 0);
2787 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002788 }
2789
Marcin Wojtas2636ac32016-12-01 18:03:09 +01002790 if (pp->neta_armada3700)
2791 pp->cause_rx_tx = cause_rx_tx;
2792 else
2793 port->cause_rx_tx = cause_rx_tx;
2794
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002795 return rx_done;
2796}
2797
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002798/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2799static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2800 int num)
2801{
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002802 int i;
2803
2804 for (i = 0; i < num; i++) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002805 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01002806 if (mvneta_rx_refill(pp, rxq->descs + i, rxq) != 0) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002807 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002808 __func__, rxq->id, i, num);
2809 break;
2810 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002811 }
2812
2813 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002814 * get packets)
2815 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002816 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2817
2818 return i;
2819}
2820
2821/* Free all packets pending transmit from all TXQs and reset TX port */
2822static void mvneta_tx_reset(struct mvneta_port *pp)
2823{
2824 int queue;
2825
Ezequiel Garcia96728502014-05-22 20:06:59 -03002826 /* free the skb's in the tx ring */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002827 for (queue = 0; queue < txq_number; queue++)
2828 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2829
2830 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2831 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2832}
2833
2834static void mvneta_rx_reset(struct mvneta_port *pp)
2835{
2836 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2837 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2838}
2839
2840/* Rx/Tx queue initialization/cleanup methods */
2841
2842/* Create a specified RX queue */
2843static int mvneta_rxq_init(struct mvneta_port *pp,
2844 struct mvneta_rx_queue *rxq)
2845
2846{
2847 rxq->size = pp->rx_ring_size;
2848
2849 /* Allocate memory for RX descriptors */
2850 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2851 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2852 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002853 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002854 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002855
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002856 rxq->last_desc = rxq->size - 1;
2857
2858 /* Set Rx descriptors queue starting address */
2859 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2860 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2861
2862 /* Set Offset */
Marcin Wojtas8d5047c2016-12-01 18:03:07 +01002863 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD - pp->rx_offset_correction);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002864
2865 /* Set coalescing pkts and time */
2866 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2867 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2868
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002869 if (!pp->bm_priv) {
2870 /* Fill RXQ with buffers from RX pool */
2871 mvneta_rxq_buf_size_set(pp, rxq,
2872 MVNETA_RX_BUF_SIZE(pp->pkt_size));
2873 mvneta_rxq_bm_disable(pp, rxq);
Gregory CLEMENTe9f64992016-12-01 18:03:05 +01002874 mvneta_rxq_fill(pp, rxq, rxq->size);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002875 } else {
2876 mvneta_rxq_bm_enable(pp, rxq);
2877 mvneta_rxq_long_pool_set(pp, rxq);
2878 mvneta_rxq_short_pool_set(pp, rxq);
Gregory CLEMENTe9f64992016-12-01 18:03:05 +01002879 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01002880 }
2881
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002882 return 0;
2883}
2884
2885/* Cleanup Rx queue */
2886static void mvneta_rxq_deinit(struct mvneta_port *pp,
2887 struct mvneta_rx_queue *rxq)
2888{
2889 mvneta_rxq_drop_pkts(pp, rxq);
2890
2891 if (rxq->descs)
2892 dma_free_coherent(pp->dev->dev.parent,
2893 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2894 rxq->descs,
2895 rxq->descs_phys);
2896
2897 rxq->descs = NULL;
2898 rxq->last_desc = 0;
2899 rxq->next_desc_to_proc = 0;
2900 rxq->descs_phys = 0;
2901}
2902
2903/* Create and initialize a tx queue */
2904static int mvneta_txq_init(struct mvneta_port *pp,
2905 struct mvneta_tx_queue *txq)
2906{
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002907 int cpu;
2908
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002909 txq->size = pp->tx_ring_size;
2910
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002911 /* A queue must always have room for at least one skb.
2912 * Therefore, stop the queue when the free entries reaches
2913 * the maximum number of descriptors per skb.
2914 */
2915 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2916 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2917
2918
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002919 /* Allocate memory for TX descriptors */
2920 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2921 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2922 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002923 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002924 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002925
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002926 txq->last_desc = txq->size - 1;
2927
2928 /* Set maximum bandwidth for enabled TXQs */
2929 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2930 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2931
2932 /* Set Tx descriptors queue starting address */
2933 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2934 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2935
2936 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2937 if (txq->tx_skb == NULL) {
2938 dma_free_coherent(pp->dev->dev.parent,
2939 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2940 txq->descs, txq->descs_phys);
2941 return -ENOMEM;
2942 }
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002943
2944 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2945 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2946 txq->size * TSO_HEADER_SIZE,
2947 &txq->tso_hdrs_phys, GFP_KERNEL);
2948 if (txq->tso_hdrs == NULL) {
2949 kfree(txq->tx_skb);
2950 dma_free_coherent(pp->dev->dev.parent,
2951 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2952 txq->descs, txq->descs_phys);
2953 return -ENOMEM;
2954 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002955 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2956
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01002957 /* Setup XPS mapping */
2958 if (txq_number > 1)
2959 cpu = txq->id % num_present_cpus();
2960 else
2961 cpu = pp->rxq_def % num_present_cpus();
2962 cpumask_set_cpu(cpu, &txq->affinity_mask);
2963 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
2964
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002965 return 0;
2966}
2967
2968/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2969static void mvneta_txq_deinit(struct mvneta_port *pp,
2970 struct mvneta_tx_queue *txq)
2971{
Marcin Wojtasa29b6232017-01-16 18:08:32 +01002972 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2973
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002974 kfree(txq->tx_skb);
2975
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03002976 if (txq->tso_hdrs)
2977 dma_free_coherent(pp->dev->dev.parent,
2978 txq->size * TSO_HEADER_SIZE,
2979 txq->tso_hdrs, txq->tso_hdrs_phys);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002980 if (txq->descs)
2981 dma_free_coherent(pp->dev->dev.parent,
2982 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2983 txq->descs, txq->descs_phys);
2984
Marcin Wojtasa29b6232017-01-16 18:08:32 +01002985 netdev_tx_reset_queue(nq);
2986
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002987 txq->descs = NULL;
2988 txq->last_desc = 0;
2989 txq->next_desc_to_proc = 0;
2990 txq->descs_phys = 0;
2991
2992 /* Set minimum bandwidth for disabled TXQs */
2993 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2994 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2995
2996 /* Set Tx descriptors queue starting address and size */
2997 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2998 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2999}
3000
3001/* Cleanup all Tx queues */
3002static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3003{
3004 int queue;
3005
3006 for (queue = 0; queue < txq_number; queue++)
3007 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3008}
3009
3010/* Cleanup all Rx queues */
3011static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3012{
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003013 int queue;
3014
3015 for (queue = 0; queue < txq_number; queue++)
3016 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003017}
3018
3019
3020/* Init all Rx queues */
3021static int mvneta_setup_rxqs(struct mvneta_port *pp)
3022{
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003023 int queue;
3024
3025 for (queue = 0; queue < rxq_number; queue++) {
3026 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3027
3028 if (err) {
3029 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3030 __func__, queue);
3031 mvneta_cleanup_rxqs(pp);
3032 return err;
3033 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003034 }
3035
3036 return 0;
3037}
3038
3039/* Init all tx queues */
3040static int mvneta_setup_txqs(struct mvneta_port *pp)
3041{
3042 int queue;
3043
3044 for (queue = 0; queue < txq_number; queue++) {
3045 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3046 if (err) {
3047 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3048 __func__, queue);
3049 mvneta_cleanup_txqs(pp);
3050 return err;
3051 }
3052 }
3053
3054 return 0;
3055}
3056
3057static void mvneta_start_dev(struct mvneta_port *pp)
3058{
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003059 int cpu;
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003060 struct net_device *ndev = pp->dev;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003061
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003062 mvneta_max_rx_size_set(pp, pp->pkt_size);
3063 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3064
3065 /* start the Rx/Tx activity */
3066 mvneta_port_enable(pp);
3067
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003068 if (!pp->neta_armada3700) {
3069 /* Enable polling on the port */
3070 for_each_online_cpu(cpu) {
3071 struct mvneta_pcpu_port *port =
3072 per_cpu_ptr(pp->ports, cpu);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003073
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003074 napi_enable(&port->napi);
3075 }
3076 } else {
3077 napi_enable(&pp->napi);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003078 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003079
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003080 /* Unmask interrupts. It has to be done from each CPU */
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003081 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3082
Stas Sergeev898b29702015-04-01 20:32:49 +03003083 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3084 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3085 MVNETA_CAUSE_LINK_CHANGE |
3086 MVNETA_CAUSE_PSC_SYNC_CHANGE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003087
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003088 phy_start(ndev->phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003089 netif_tx_start_all_queues(pp->dev);
3090}
3091
3092static void mvneta_stop_dev(struct mvneta_port *pp)
3093{
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003094 unsigned int cpu;
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003095 struct net_device *ndev = pp->dev;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003096
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003097 phy_stop(ndev->phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003098
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003099 if (!pp->neta_armada3700) {
3100 for_each_online_cpu(cpu) {
3101 struct mvneta_pcpu_port *port =
3102 per_cpu_ptr(pp->ports, cpu);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003103
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003104 napi_disable(&port->napi);
3105 }
3106 } else {
3107 napi_disable(&pp->napi);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02003108 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003109
3110 netif_carrier_off(pp->dev);
3111
3112 mvneta_port_down(pp);
3113 netif_tx_stop_all_queues(pp->dev);
3114
3115 /* Stop the port activity */
3116 mvneta_port_disable(pp);
3117
3118 /* Clear all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01003119 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003120
3121 /* Mask all ethernet port interrupts */
Gregory CLEMENTdb488c12016-02-04 22:09:27 +01003122 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003123
3124 mvneta_tx_reset(pp);
3125 mvneta_rx_reset(pp);
3126}
3127
Marcin Wojtasdb5dd0d2016-04-01 15:21:18 +02003128static void mvneta_percpu_enable(void *arg)
3129{
3130 struct mvneta_port *pp = arg;
3131
3132 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3133}
3134
3135static void mvneta_percpu_disable(void *arg)
3136{
3137 struct mvneta_port *pp = arg;
3138
3139 disable_percpu_irq(pp->dev->irq);
3140}
3141
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003142/* Change the device mtu */
3143static int mvneta_change_mtu(struct net_device *dev, int mtu)
3144{
3145 struct mvneta_port *pp = netdev_priv(dev);
3146 int ret;
3147
Jarod Wilson57779872016-10-17 15:54:06 -04003148 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3149 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3150 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3151 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3152 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003153
3154 dev->mtu = mtu;
3155
Simon Guinotb65657f2015-06-30 16:20:22 +02003156 if (!netif_running(dev)) {
Marcin Wojtasdc35a102016-03-14 09:39:03 +01003157 if (pp->bm_priv)
3158 mvneta_bm_update_mtu(pp, mtu);
3159
Simon Guinotb65657f2015-06-30 16:20:22 +02003160 netdev_update_features(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003161 return 0;
Simon Guinotb65657f2015-06-30 16:20:22 +02003162 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003163
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01003164 /* The interface is running, so we have to force a
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03003165 * reallocation of the queues
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003166 */
3167 mvneta_stop_dev(pp);
Marcin Wojtasdb5dd0d2016-04-01 15:21:18 +02003168 on_each_cpu(mvneta_percpu_disable, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003169
3170 mvneta_cleanup_txqs(pp);
3171 mvneta_cleanup_rxqs(pp);
3172
Marcin Wojtasdc35a102016-03-14 09:39:03 +01003173 if (pp->bm_priv)
3174 mvneta_bm_update_mtu(pp, mtu);
3175
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03003176 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01003177 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3178 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003179
3180 ret = mvneta_setup_rxqs(pp);
3181 if (ret) {
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03003182 netdev_err(dev, "unable to setup rxqs after MTU change\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003183 return ret;
3184 }
3185
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03003186 ret = mvneta_setup_txqs(pp);
3187 if (ret) {
3188 netdev_err(dev, "unable to setup txqs after MTU change\n");
3189 return ret;
3190 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003191
Marcin Wojtasdb5dd0d2016-04-01 15:21:18 +02003192 on_each_cpu(mvneta_percpu_enable, pp, true);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003193 mvneta_start_dev(pp);
3194 mvneta_port_up(pp);
3195
Simon Guinotb65657f2015-06-30 16:20:22 +02003196 netdev_update_features(dev);
3197
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003198 return 0;
3199}
3200
Simon Guinotb65657f2015-06-30 16:20:22 +02003201static netdev_features_t mvneta_fix_features(struct net_device *dev,
3202 netdev_features_t features)
3203{
3204 struct mvneta_port *pp = netdev_priv(dev);
3205
3206 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3207 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3208 netdev_info(dev,
3209 "Disable IP checksum for MTU greater than %dB\n",
3210 pp->tx_csum_limit);
3211 }
3212
3213 return features;
3214}
3215
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003216/* Get mac address */
3217static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3218{
3219 u32 mac_addr_l, mac_addr_h;
3220
3221 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3222 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3223 addr[0] = (mac_addr_h >> 24) & 0xFF;
3224 addr[1] = (mac_addr_h >> 16) & 0xFF;
3225 addr[2] = (mac_addr_h >> 8) & 0xFF;
3226 addr[3] = mac_addr_h & 0xFF;
3227 addr[4] = (mac_addr_l >> 8) & 0xFF;
3228 addr[5] = mac_addr_l & 0xFF;
3229}
3230
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003231/* Handle setting mac address */
3232static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3233{
3234 struct mvneta_port *pp = netdev_priv(dev);
Ezequiel Garciae68de362014-05-22 20:07:00 -03003235 struct sockaddr *sockaddr = addr;
3236 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003237
Ezequiel Garciae68de362014-05-22 20:07:00 -03003238 ret = eth_prepare_mac_addr_change(dev, addr);
3239 if (ret < 0)
3240 return ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003241 /* Remove previous address table entry */
3242 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3243
3244 /* Set new addr in hw */
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01003245 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003246
Ezequiel Garciae68de362014-05-22 20:07:00 -03003247 eth_commit_mac_addr_change(dev, addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003248 return 0;
3249}
3250
3251static void mvneta_adjust_link(struct net_device *ndev)
3252{
3253 struct mvneta_port *pp = netdev_priv(ndev);
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003254 struct phy_device *phydev = ndev->phydev;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003255 int status_change = 0;
3256
3257 if (phydev->link) {
3258 if ((pp->speed != phydev->speed) ||
3259 (pp->duplex != phydev->duplex)) {
3260 u32 val;
3261
3262 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3263 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3264 MVNETA_GMAC_CONFIG_GMII_SPEED |
Stas Sergeev898b29702015-04-01 20:32:49 +03003265 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003266
3267 if (phydev->duplex)
3268 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3269
3270 if (phydev->speed == SPEED_1000)
3271 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni4d12bc62014-07-08 10:49:43 +02003272 else if (phydev->speed == SPEED_100)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003273 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3274
3275 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3276
3277 pp->duplex = phydev->duplex;
3278 pp->speed = phydev->speed;
3279 }
3280 }
3281
3282 if (phydev->link != pp->link) {
3283 if (!phydev->link) {
3284 pp->duplex = -1;
3285 pp->speed = 0;
3286 }
3287
3288 pp->link = phydev->link;
3289 status_change = 1;
3290 }
3291
3292 if (status_change) {
3293 if (phydev->link) {
Stas Sergeev898b29702015-04-01 20:32:49 +03003294 if (!pp->use_inband_status) {
3295 u32 val = mvreg_read(pp,
3296 MVNETA_GMAC_AUTONEG_CONFIG);
3297 val &= ~MVNETA_GMAC_FORCE_LINK_DOWN;
3298 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3299 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3300 val);
3301 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003302 mvneta_port_up(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003303 } else {
Stas Sergeev898b29702015-04-01 20:32:49 +03003304 if (!pp->use_inband_status) {
3305 u32 val = mvreg_read(pp,
3306 MVNETA_GMAC_AUTONEG_CONFIG);
3307 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3308 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3309 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3310 val);
3311 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003312 mvneta_port_down(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003313 }
Ezequiel Garcia0089b742014-10-31 12:57:20 -03003314 phy_print_status(phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003315 }
3316}
3317
3318static int mvneta_mdio_probe(struct mvneta_port *pp)
3319{
3320 struct phy_device *phy_dev;
3321
3322 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
3323 pp->phy_interface);
3324 if (!phy_dev) {
3325 netdev_err(pp->dev, "could not find the PHY\n");
3326 return -ENODEV;
3327 }
3328
3329 phy_dev->supported &= PHY_GBIT_FEATURES;
3330 phy_dev->advertising = phy_dev->supported;
3331
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003332 pp->link = 0;
3333 pp->duplex = 0;
3334 pp->speed = 0;
3335
3336 return 0;
3337}
3338
3339static void mvneta_mdio_remove(struct mvneta_port *pp)
3340{
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003341 struct net_device *ndev = pp->dev;
3342
3343 phy_disconnect(ndev->phydev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003344}
3345
Gregory CLEMENT120cfa52016-02-04 22:09:29 +01003346/* Electing a CPU must be done in an atomic way: it should be done
3347 * after or before the removal/insertion of a CPU and this function is
3348 * not reentrant.
3349 */
Maxime Ripardf8642882015-09-25 18:09:38 +02003350static void mvneta_percpu_elect(struct mvneta_port *pp)
3351{
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01003352 int elected_cpu = 0, max_cpu, cpu, i = 0;
Maxime Ripardf8642882015-09-25 18:09:38 +02003353
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01003354 /* Use the cpu associated to the rxq when it is online, in all
3355 * the other cases, use the cpu 0 which can't be offline.
3356 */
3357 if (cpu_online(pp->rxq_def))
3358 elected_cpu = pp->rxq_def;
3359
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003360 max_cpu = num_present_cpus();
Maxime Ripardf8642882015-09-25 18:09:38 +02003361
3362 for_each_online_cpu(cpu) {
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003363 int rxq_map = 0, txq_map = 0;
3364 int rxq;
3365
3366 for (rxq = 0; rxq < rxq_number; rxq++)
3367 if ((rxq % max_cpu) == cpu)
3368 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
3369
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01003370 if (cpu == elected_cpu)
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01003371 /* Map the default receive queue queue to the
3372 * elected CPU
Maxime Ripardf8642882015-09-25 18:09:38 +02003373 */
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003374 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01003375
3376 /* We update the TX queue map only if we have one
3377 * queue. In this case we associate the TX queue to
3378 * the CPU bound to the default RX queue
3379 */
3380 if (txq_number == 1)
Gregory CLEMENTcad5d842016-02-04 22:09:24 +01003381 txq_map = (cpu == elected_cpu) ?
Gregory CLEMENT50bf8cb2015-12-09 18:23:51 +01003382 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3383 else
3384 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
3385 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
3386
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003387 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
3388
3389 /* Update the interrupt mask on each CPU according the
3390 * new mapping
3391 */
3392 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
3393 pp, true);
Maxime Ripardf8642882015-09-25 18:09:38 +02003394 i++;
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003395
Maxime Ripardf8642882015-09-25 18:09:38 +02003396 }
3397};
3398
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003399static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
Maxime Ripardf8642882015-09-25 18:09:38 +02003400{
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003401 int other_cpu;
3402 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3403 node_online);
Maxime Ripardf8642882015-09-25 18:09:38 +02003404 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3405
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003406
3407 spin_lock(&pp->lock);
3408 /*
3409 * Configuring the driver for a new CPU while the driver is
3410 * stopping is racy, so just avoid it.
3411 */
3412 if (pp->is_stopped) {
3413 spin_unlock(&pp->lock);
3414 return 0;
3415 }
3416 netif_tx_stop_all_queues(pp->dev);
3417
3418 /*
3419 * We have to synchronise on tha napi of each CPU except the one
3420 * just being woken up
3421 */
3422 for_each_online_cpu(other_cpu) {
3423 if (other_cpu != cpu) {
3424 struct mvneta_pcpu_port *other_port =
3425 per_cpu_ptr(pp->ports, other_cpu);
3426
3427 napi_synchronize(&other_port->napi);
Gregory CLEMENT120cfa52016-02-04 22:09:29 +01003428 }
Maxime Ripardf8642882015-09-25 18:09:38 +02003429 }
3430
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003431 /* Mask all ethernet port interrupts */
3432 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3433 napi_enable(&port->napi);
3434
3435 /*
3436 * Enable per-CPU interrupts on the CPU that is
3437 * brought up.
3438 */
3439 mvneta_percpu_enable(pp);
3440
3441 /*
3442 * Enable per-CPU interrupt on the one CPU we care
3443 * about.
3444 */
3445 mvneta_percpu_elect(pp);
3446
3447 /* Unmask all ethernet port interrupts */
3448 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3449 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3450 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3451 MVNETA_CAUSE_LINK_CHANGE |
3452 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3453 netif_tx_start_all_queues(pp->dev);
3454 spin_unlock(&pp->lock);
3455 return 0;
3456}
3457
3458static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3459{
3460 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3461 node_online);
3462 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3463
3464 /*
3465 * Thanks to this lock we are sure that any pending cpu election is
3466 * done.
3467 */
3468 spin_lock(&pp->lock);
3469 /* Mask all ethernet port interrupts */
3470 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3471 spin_unlock(&pp->lock);
3472
3473 napi_synchronize(&port->napi);
3474 napi_disable(&port->napi);
3475 /* Disable per-CPU interrupts on the CPU that is brought down. */
3476 mvneta_percpu_disable(pp);
3477 return 0;
3478}
3479
3480static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3481{
3482 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3483 node_dead);
3484
3485 /* Check if a new CPU must be elected now this on is down */
3486 spin_lock(&pp->lock);
3487 mvneta_percpu_elect(pp);
3488 spin_unlock(&pp->lock);
3489 /* Unmask all ethernet port interrupts */
3490 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3491 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3492 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3493 MVNETA_CAUSE_LINK_CHANGE |
3494 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3495 netif_tx_start_all_queues(pp->dev);
3496 return 0;
Maxime Ripardf8642882015-09-25 18:09:38 +02003497}
3498
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003499static int mvneta_open(struct net_device *dev)
3500{
3501 struct mvneta_port *pp = netdev_priv(dev);
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003502 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003503
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003504 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01003505 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
3506 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003507
3508 ret = mvneta_setup_rxqs(pp);
3509 if (ret)
3510 return ret;
3511
3512 ret = mvneta_setup_txqs(pp);
3513 if (ret)
3514 goto err_cleanup_rxqs;
3515
3516 /* Connect to port interrupt line */
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003517 if (pp->neta_armada3700)
3518 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
3519 dev->name, pp);
3520 else
3521 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
3522 dev->name, pp->ports);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003523 if (ret) {
3524 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
3525 goto err_cleanup_txqs;
3526 }
3527
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003528 if (!pp->neta_armada3700) {
3529 /* Enable per-CPU interrupt on all the CPU to handle our RX
3530 * queue interrupts
3531 */
3532 on_each_cpu(mvneta_percpu_enable, pp, true);
Gregory CLEMENT2dcf75e2015-12-09 18:23:49 +01003533
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003534 pp->is_stopped = false;
3535 /* Register a CPU notifier to handle the case where our CPU
3536 * might be taken offline.
3537 */
3538 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3539 &pp->node_online);
3540 if (ret)
3541 goto err_free_irq;
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003542
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003543 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3544 &pp->node_dead);
3545 if (ret)
3546 goto err_free_online_hp;
3547 }
Maxime Ripardf8642882015-09-25 18:09:38 +02003548
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003549 /* In default link is down */
3550 netif_carrier_off(pp->dev);
3551
3552 ret = mvneta_mdio_probe(pp);
3553 if (ret < 0) {
3554 netdev_err(dev, "cannot probe MDIO bus\n");
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003555 goto err_free_dead_hp;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003556 }
3557
3558 mvneta_start_dev(pp);
3559
3560 return 0;
3561
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003562err_free_dead_hp:
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003563 if (!pp->neta_armada3700)
3564 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3565 &pp->node_dead);
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003566err_free_online_hp:
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003567 if (!pp->neta_armada3700)
3568 cpuhp_state_remove_instance_nocalls(online_hpstate,
3569 &pp->node_online);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003570err_free_irq:
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003571 if (pp->neta_armada3700) {
3572 free_irq(pp->dev->irq, pp);
3573 } else {
3574 on_each_cpu(mvneta_percpu_disable, pp, true);
3575 free_percpu_irq(pp->dev->irq, pp->ports);
3576 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003577err_cleanup_txqs:
3578 mvneta_cleanup_txqs(pp);
3579err_cleanup_rxqs:
3580 mvneta_cleanup_rxqs(pp);
3581 return ret;
3582}
3583
3584/* Stop the port, free port interrupt line */
3585static int mvneta_stop(struct net_device *dev)
3586{
3587 struct mvneta_port *pp = netdev_priv(dev);
3588
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003589 if (!pp->neta_armada3700) {
3590 /* Inform that we are stopping so we don't want to setup the
3591 * driver for new CPUs in the notifiers. The code of the
3592 * notifier for CPU online is protected by the same spinlock,
3593 * so when we get the lock, the notifer work is done.
3594 */
3595 spin_lock(&pp->lock);
3596 pp->is_stopped = true;
3597 spin_unlock(&pp->lock);
Gregory CLEMENT1c2722a2016-03-12 18:44:17 +01003598
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003599 mvneta_stop_dev(pp);
3600 mvneta_mdio_remove(pp);
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02003601
Dan Carpenterd26aac22016-12-07 14:32:17 +03003602 cpuhp_state_remove_instance_nocalls(online_hpstate,
3603 &pp->node_online);
3604 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3605 &pp->node_dead);
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003606 on_each_cpu(mvneta_percpu_disable, pp, true);
3607 free_percpu_irq(dev->irq, pp->ports);
3608 } else {
3609 mvneta_stop_dev(pp);
3610 mvneta_mdio_remove(pp);
3611 free_irq(dev->irq, pp);
3612 }
3613
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003614 mvneta_cleanup_rxqs(pp);
3615 mvneta_cleanup_txqs(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003616
3617 return 0;
3618}
3619
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003620static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3621{
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003622 if (!dev->phydev)
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003623 return -ENOTSUPP;
3624
Philippe Reynesc6c022e2016-07-30 17:42:11 +02003625 return phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003626}
3627
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003628/* Ethtool methods */
3629
Philippe Reynes013ad402016-07-30 17:42:12 +02003630/* Set link ksettings (phy address, speed) for ethtools */
Baoyou Xie2dc0d2b2016-09-25 17:20:41 +08003631static int
3632mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
3633 const struct ethtool_link_ksettings *cmd)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003634{
Philippe Reynes013ad402016-07-30 17:42:12 +02003635 struct mvneta_port *pp = netdev_priv(ndev);
3636 struct phy_device *phydev = ndev->phydev;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003637
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003638 if (!phydev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003639 return -ENODEV;
3640
Philippe Reynes013ad402016-07-30 17:42:12 +02003641 if ((cmd->base.autoneg == AUTONEG_ENABLE) != pp->use_inband_status) {
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003642 u32 val;
3643
Philippe Reynes013ad402016-07-30 17:42:12 +02003644 mvneta_set_autoneg(pp, cmd->base.autoneg == AUTONEG_ENABLE);
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003645
Philippe Reynes013ad402016-07-30 17:42:12 +02003646 if (cmd->base.autoneg == AUTONEG_DISABLE) {
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003647 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3648 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
3649 MVNETA_GMAC_CONFIG_GMII_SPEED |
3650 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3651
3652 if (phydev->duplex)
3653 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3654
3655 if (phydev->speed == SPEED_1000)
3656 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3657 else if (phydev->speed == SPEED_100)
3658 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3659
3660 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3661 }
3662
Philippe Reynes013ad402016-07-30 17:42:12 +02003663 pp->use_inband_status = (cmd->base.autoneg == AUTONEG_ENABLE);
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003664 netdev_info(pp->dev, "autoneg status set to %i\n",
3665 pp->use_inband_status);
3666
Philippe Reynes013ad402016-07-30 17:42:12 +02003667 if (netif_running(ndev)) {
Stas Sergeev0c0744f2015-12-02 20:35:11 +03003668 mvneta_port_down(pp);
3669 mvneta_port_up(pp);
3670 }
3671 }
3672
Philippe Reynes013ad402016-07-30 17:42:12 +02003673 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003674}
3675
3676/* Set interrupt coalescing for ethtools */
3677static int mvneta_ethtool_set_coalesce(struct net_device *dev,
3678 struct ethtool_coalesce *c)
3679{
3680 struct mvneta_port *pp = netdev_priv(dev);
3681 int queue;
3682
3683 for (queue = 0; queue < rxq_number; queue++) {
3684 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
3685 rxq->time_coal = c->rx_coalesce_usecs;
3686 rxq->pkts_coal = c->rx_max_coalesced_frames;
3687 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3688 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3689 }
3690
3691 for (queue = 0; queue < txq_number; queue++) {
3692 struct mvneta_tx_queue *txq = &pp->txqs[queue];
3693 txq->done_pkts_coal = c->tx_max_coalesced_frames;
3694 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3695 }
3696
3697 return 0;
3698}
3699
3700/* get coalescing for ethtools */
3701static int mvneta_ethtool_get_coalesce(struct net_device *dev,
3702 struct ethtool_coalesce *c)
3703{
3704 struct mvneta_port *pp = netdev_priv(dev);
3705
3706 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
3707 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
3708
3709 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
3710 return 0;
3711}
3712
3713
3714static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
3715 struct ethtool_drvinfo *drvinfo)
3716{
3717 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
3718 sizeof(drvinfo->driver));
3719 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
3720 sizeof(drvinfo->version));
3721 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
3722 sizeof(drvinfo->bus_info));
3723}
3724
3725
3726static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
3727 struct ethtool_ringparam *ring)
3728{
3729 struct mvneta_port *pp = netdev_priv(netdev);
3730
3731 ring->rx_max_pending = MVNETA_MAX_RXD;
3732 ring->tx_max_pending = MVNETA_MAX_TXD;
3733 ring->rx_pending = pp->rx_ring_size;
3734 ring->tx_pending = pp->tx_ring_size;
3735}
3736
3737static int mvneta_ethtool_set_ringparam(struct net_device *dev,
3738 struct ethtool_ringparam *ring)
3739{
3740 struct mvneta_port *pp = netdev_priv(dev);
3741
3742 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
3743 return -EINVAL;
3744 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
3745 ring->rx_pending : MVNETA_MAX_RXD;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003746
3747 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
3748 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
3749 if (pp->tx_ring_size != ring->tx_pending)
3750 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
3751 pp->tx_ring_size, ring->tx_pending);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003752
3753 if (netif_running(dev)) {
3754 mvneta_stop(dev);
3755 if (mvneta_open(dev)) {
3756 netdev_err(dev,
3757 "error on opening device after ring param change\n");
3758 return -ENOMEM;
3759 }
3760 }
3761
3762 return 0;
3763}
3764
Russell King9b0cdef2015-10-22 18:37:30 +01003765static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
3766 u8 *data)
3767{
3768 if (sset == ETH_SS_STATS) {
3769 int i;
3770
3771 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3772 memcpy(data + i * ETH_GSTRING_LEN,
3773 mvneta_statistics[i].name, ETH_GSTRING_LEN);
3774 }
3775}
3776
3777static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
3778{
3779 const struct mvneta_statistic *s;
3780 void __iomem *base = pp->base;
3781 u32 high, low, val;
Jisheng Zhang2c832292016-01-20 16:36:25 +08003782 u64 val64;
Russell King9b0cdef2015-10-22 18:37:30 +01003783 int i;
3784
3785 for (i = 0, s = mvneta_statistics;
3786 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
3787 s++, i++) {
Russell King9b0cdef2015-10-22 18:37:30 +01003788 switch (s->type) {
3789 case T_REG_32:
3790 val = readl_relaxed(base + s->offset);
Jisheng Zhang2c832292016-01-20 16:36:25 +08003791 pp->ethtool_stats[i] += val;
Russell King9b0cdef2015-10-22 18:37:30 +01003792 break;
3793 case T_REG_64:
3794 /* Docs say to read low 32-bit then high */
3795 low = readl_relaxed(base + s->offset);
3796 high = readl_relaxed(base + s->offset + 4);
Jisheng Zhang2c832292016-01-20 16:36:25 +08003797 val64 = (u64)high << 32 | low;
3798 pp->ethtool_stats[i] += val64;
Russell King9b0cdef2015-10-22 18:37:30 +01003799 break;
3800 }
Russell King9b0cdef2015-10-22 18:37:30 +01003801 }
3802}
3803
3804static void mvneta_ethtool_get_stats(struct net_device *dev,
3805 struct ethtool_stats *stats, u64 *data)
3806{
3807 struct mvneta_port *pp = netdev_priv(dev);
3808 int i;
3809
3810 mvneta_ethtool_update_stats(pp);
3811
3812 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
3813 *data++ = pp->ethtool_stats[i];
3814}
3815
3816static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
3817{
3818 if (sset == ETH_SS_STATS)
3819 return ARRAY_SIZE(mvneta_statistics);
3820 return -EOPNOTSUPP;
3821}
3822
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003823static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
3824{
3825 return MVNETA_RSS_LU_TABLE_SIZE;
3826}
3827
3828static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
3829 struct ethtool_rxnfc *info,
3830 u32 *rules __always_unused)
3831{
3832 switch (info->cmd) {
3833 case ETHTOOL_GRXRINGS:
3834 info->data = rxq_number;
3835 return 0;
3836 case ETHTOOL_GRXFH:
3837 return -EOPNOTSUPP;
3838 default:
3839 return -EOPNOTSUPP;
3840 }
3841}
3842
3843static int mvneta_config_rss(struct mvneta_port *pp)
3844{
3845 int cpu;
3846 u32 val;
3847
3848 netif_tx_stop_all_queues(pp->dev);
3849
Gregory CLEMENT6b125d62016-02-04 22:09:25 +01003850 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003851
3852 /* We have to synchronise on the napi of each CPU */
3853 for_each_online_cpu(cpu) {
3854 struct mvneta_pcpu_port *pcpu_port =
3855 per_cpu_ptr(pp->ports, cpu);
3856
3857 napi_synchronize(&pcpu_port->napi);
3858 napi_disable(&pcpu_port->napi);
3859 }
3860
3861 pp->rxq_def = pp->indir[0];
3862
3863 /* Update unicast mapping */
3864 mvneta_set_rx_mode(pp->dev);
3865
3866 /* Update val of portCfg register accordingly with all RxQueue types */
3867 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
3868 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3869
3870 /* Update the elected CPU matching the new rxq_def */
Gregory CLEMENT120cfa52016-02-04 22:09:29 +01003871 spin_lock(&pp->lock);
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003872 mvneta_percpu_elect(pp);
Gregory CLEMENT120cfa52016-02-04 22:09:29 +01003873 spin_unlock(&pp->lock);
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003874
3875 /* We have to synchronise on the napi of each CPU */
3876 for_each_online_cpu(cpu) {
3877 struct mvneta_pcpu_port *pcpu_port =
3878 per_cpu_ptr(pp->ports, cpu);
3879
3880 napi_enable(&pcpu_port->napi);
3881 }
3882
3883 netif_tx_start_all_queues(pp->dev);
3884
3885 return 0;
3886}
3887
3888static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
3889 const u8 *key, const u8 hfunc)
3890{
3891 struct mvneta_port *pp = netdev_priv(dev);
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003892
3893 /* Current code for Armada 3700 doesn't support RSS features yet */
3894 if (pp->neta_armada3700)
3895 return -EOPNOTSUPP;
3896
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003897 /* We require at least one supported parameter to be changed
3898 * and no change in any of the unsupported parameters
3899 */
3900 if (key ||
3901 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
3902 return -EOPNOTSUPP;
3903
3904 if (!indir)
3905 return 0;
3906
3907 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
3908
3909 return mvneta_config_rss(pp);
3910}
3911
3912static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
3913 u8 *hfunc)
3914{
3915 struct mvneta_port *pp = netdev_priv(dev);
3916
Marcin Wojtas2636ac32016-12-01 18:03:09 +01003917 /* Current code for Armada 3700 doesn't support RSS features yet */
3918 if (pp->neta_armada3700)
3919 return -EOPNOTSUPP;
3920
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003921 if (hfunc)
3922 *hfunc = ETH_RSS_HASH_TOP;
3923
3924 if (!indir)
3925 return 0;
3926
3927 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
3928
3929 return 0;
3930}
3931
Jingju Houb60a00f2017-02-06 14:58:13 +08003932static void mvneta_ethtool_get_wol(struct net_device *dev,
3933 struct ethtool_wolinfo *wol)
3934{
3935 wol->supported = 0;
3936 wol->wolopts = 0;
3937
3938 if (dev->phydev)
3939 phy_ethtool_get_wol(dev->phydev, wol);
3940}
3941
3942static int mvneta_ethtool_set_wol(struct net_device *dev,
3943 struct ethtool_wolinfo *wol)
3944{
3945 if (!dev->phydev)
3946 return -EOPNOTSUPP;
3947
3948 return phy_ethtool_set_wol(dev->phydev, wol);
3949}
3950
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003951static const struct net_device_ops mvneta_netdev_ops = {
3952 .ndo_open = mvneta_open,
3953 .ndo_stop = mvneta_stop,
3954 .ndo_start_xmit = mvneta_tx,
3955 .ndo_set_rx_mode = mvneta_set_rx_mode,
3956 .ndo_set_mac_address = mvneta_set_mac_addr,
3957 .ndo_change_mtu = mvneta_change_mtu,
Simon Guinotb65657f2015-06-30 16:20:22 +02003958 .ndo_fix_features = mvneta_fix_features,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003959 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02003960 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003961};
3962
Jisheng Zhang4581be42017-02-16 17:07:39 +08003963static const struct ethtool_ops mvneta_eth_tool_ops = {
Florian Fainelli5489ee82016-11-15 11:19:47 -08003964 .nway_reset = phy_ethtool_nway_reset,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003965 .get_link = ethtool_op_get_link,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003966 .set_coalesce = mvneta_ethtool_set_coalesce,
3967 .get_coalesce = mvneta_ethtool_get_coalesce,
3968 .get_drvinfo = mvneta_ethtool_get_drvinfo,
3969 .get_ringparam = mvneta_ethtool_get_ringparam,
3970 .set_ringparam = mvneta_ethtool_set_ringparam,
Russell King9b0cdef2015-10-22 18:37:30 +01003971 .get_strings = mvneta_ethtool_get_strings,
3972 .get_ethtool_stats = mvneta_ethtool_get_stats,
3973 .get_sset_count = mvneta_ethtool_get_sset_count,
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01003974 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
3975 .get_rxnfc = mvneta_ethtool_get_rxnfc,
3976 .get_rxfh = mvneta_ethtool_get_rxfh,
3977 .set_rxfh = mvneta_ethtool_set_rxfh,
Philippe Reynes013ad402016-07-30 17:42:12 +02003978 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3979 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
Jingju Houb60a00f2017-02-06 14:58:13 +08003980 .get_wol = mvneta_ethtool_get_wol,
3981 .set_wol = mvneta_ethtool_set_wol,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003982};
3983
3984/* Initialize hw */
Ezequiel Garcia96728502014-05-22 20:06:59 -03003985static int mvneta_init(struct device *dev, struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003986{
3987 int queue;
3988
3989 /* Disable port */
3990 mvneta_port_disable(pp);
3991
3992 /* Set port default values */
3993 mvneta_defaults_set(pp);
3994
Ezequiel Garcia96728502014-05-22 20:06:59 -03003995 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
3996 GFP_KERNEL);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003997 if (!pp->txqs)
3998 return -ENOMEM;
3999
4000 /* Initialize TX descriptor rings */
4001 for (queue = 0; queue < txq_number; queue++) {
4002 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4003 txq->id = queue;
4004 txq->size = pp->tx_ring_size;
4005 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4006 }
4007
Ezequiel Garcia96728502014-05-22 20:06:59 -03004008 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
4009 GFP_KERNEL);
4010 if (!pp->rxqs)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004011 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004012
4013 /* Create Rx descriptor rings */
4014 for (queue = 0; queue < rxq_number; queue++) {
4015 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4016 rxq->id = queue;
4017 rxq->size = pp->rx_ring_size;
4018 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4019 rxq->time_coal = MVNETA_RX_COAL_USEC;
Gregory CLEMENTf88bee12016-12-01 18:03:06 +01004020 rxq->buf_virt_addr = devm_kmalloc(pp->dev->dev.parent,
4021 rxq->size * sizeof(void *),
4022 GFP_KERNEL);
4023 if (!rxq->buf_virt_addr)
4024 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004025 }
4026
4027 return 0;
4028}
4029
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004030/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00004031static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4032 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004033{
4034 u32 win_enable;
4035 u32 win_protect;
4036 int i;
4037
4038 for (i = 0; i < 6; i++) {
4039 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4040 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4041
4042 if (i < 4)
4043 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4044 }
4045
4046 win_enable = 0x3f;
4047 win_protect = 0;
4048
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004049 if (dram) {
4050 for (i = 0; i < dram->num_cs; i++) {
4051 const struct mbus_dram_window *cs = dram->cs + i;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004052
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004053 mvreg_write(pp, MVNETA_WIN_BASE(i),
4054 (cs->base & 0xffff0000) |
4055 (cs->mbus_attr << 8) |
4056 dram->mbus_dram_target_id);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004057
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004058 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4059 (cs->size - 1) & 0xffff0000);
4060
4061 win_enable &= ~(1 << i);
4062 win_protect |= 3 << (2 * i);
4063 }
4064 } else {
4065 /* For Armada3700 open default 4GB Mbus window, leaving
4066 * arbitration of target/attribute to a different layer
4067 * of configuration.
4068 */
4069 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4070 win_enable &= ~BIT(0);
4071 win_protect = 3;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004072 }
4073
4074 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
Marcin Wojtasdb6ba9a2015-11-30 13:27:41 +01004075 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004076}
4077
4078/* Power up the port */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004079static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004080{
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004081 u32 ctrl;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004082
4083 /* MAC Cause register should be cleared */
4084 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4085
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004086 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004087
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004088 /* Even though it might look weird, when we're configured in
4089 * SGMII or QSGMII mode, the RGMII bit needs to be set.
4090 */
4091 switch(phy_mode) {
4092 case PHY_INTERFACE_MODE_QSGMII:
4093 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4094 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4095 break;
4096 case PHY_INTERFACE_MODE_SGMII:
4097 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4098 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
4099 break;
4100 case PHY_INTERFACE_MODE_RGMII:
4101 case PHY_INTERFACE_MODE_RGMII_ID:
Jisheng Zhanga38d20d2017-03-29 16:42:26 +08004102 case PHY_INTERFACE_MODE_RGMII_RXID:
4103 case PHY_INTERFACE_MODE_RGMII_TXID:
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004104 ctrl |= MVNETA_GMAC2_PORT_RGMII;
4105 break;
4106 default:
4107 return -EINVAL;
4108 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004109
4110 /* Cancel Port Reset */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004111 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
4112 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004113
4114 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
4115 MVNETA_GMAC2_PORT_RESET) != 0)
4116 continue;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004117
4118 return 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004119}
4120
4121/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00004122static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004123{
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01004124 struct resource *res;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004125 struct device_node *dn = pdev->dev.of_node;
4126 struct device_node *phy_node;
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004127 struct device_node *bm_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004128 struct mvneta_port *pp;
4129 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00004130 const char *dt_mac_addr;
4131 char hw_mac_addr[ETH_ALEN];
4132 const char *mac_from;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07004133 const char *managed;
Marcin Wojtas9110ee02015-11-30 13:27:45 +01004134 int tx_csum_limit;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004135 int phy_mode;
4136 int err;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004137 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004138
Willy Tarreauee40a112013-04-11 23:00:37 +02004139 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004140 if (!dev)
4141 return -ENOMEM;
4142
4143 dev->irq = irq_of_parse_and_map(dn, 0);
4144 if (dev->irq == 0) {
4145 err = -EINVAL;
4146 goto err_free_netdev;
4147 }
4148
4149 phy_node = of_parse_phandle(dn, "phy", 0);
4150 if (!phy_node) {
Thomas Petazzoni83895be2014-05-16 16:14:06 +02004151 if (!of_phy_is_fixed_link(dn)) {
4152 dev_err(&pdev->dev, "no PHY specified\n");
4153 err = -ENODEV;
4154 goto err_free_irq;
4155 }
4156
4157 err = of_phy_register_fixed_link(dn);
4158 if (err < 0) {
4159 dev_err(&pdev->dev, "cannot register fixed PHY\n");
4160 goto err_free_irq;
4161 }
4162
4163 /* In the case of a fixed PHY, the DT node associated
4164 * to the PHY is the Ethernet MAC DT node.
4165 */
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02004166 phy_node = of_node_get(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004167 }
4168
4169 phy_mode = of_get_phy_mode(dn);
4170 if (phy_mode < 0) {
4171 dev_err(&pdev->dev, "incorrect phy-mode\n");
4172 err = -EINVAL;
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02004173 goto err_put_phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004174 }
4175
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004176 dev->tx_queue_len = MVNETA_MAX_TXD;
4177 dev->watchdog_timeo = 5 * HZ;
4178 dev->netdev_ops = &mvneta_netdev_ops;
4179
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004180 dev->ethtool_ops = &mvneta_eth_tool_ops;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004181
4182 pp = netdev_priv(dev);
Gregory CLEMENT1c2722a2016-03-12 18:44:17 +01004183 spin_lock_init(&pp->lock);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004184 pp->phy_node = phy_node;
4185 pp->phy_interface = phy_mode;
Stas Sergeevf8af8e62015-07-20 17:49:58 -07004186
4187 err = of_property_read_string(dn, "managed", &managed);
4188 pp->use_inband_status = (err == 0 &&
4189 strcmp(managed, "in-band-status") == 0);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004190
Gregory CLEMENT90b74c02015-12-09 18:23:48 +01004191 pp->rxq_def = rxq_def;
4192
Marcin Wojtas8d5047c2016-12-01 18:03:07 +01004193 /* Set RX packet offset correction for platforms, whose
4194 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4195 * platforms and 0B for 32-bit ones.
4196 */
4197 pp->rx_offset_correction =
4198 max(0, NET_SKB_PAD - MVNETA_RX_PKT_OFFSET_CORRECTION);
4199
Gregory CLEMENT9a401de2015-12-09 18:23:50 +01004200 pp->indir[0] = rxq_def;
4201
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004202 /* Get special SoC configurations */
4203 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
4204 pp->neta_armada3700 = true;
4205
Jisheng Zhang2804ba42016-01-20 19:27:23 +08004206 pp->clk = devm_clk_get(&pdev->dev, "core");
4207 if (IS_ERR(pp->clk))
4208 pp->clk = devm_clk_get(&pdev->dev, NULL);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01004209 if (IS_ERR(pp->clk)) {
4210 err = PTR_ERR(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02004211 goto err_put_phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01004212 }
4213
4214 clk_prepare_enable(pp->clk);
4215
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08004216 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
4217 if (!IS_ERR(pp->clk_bus))
4218 clk_prepare_enable(pp->clk_bus);
4219
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01004220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4221 pp->base = devm_ioremap_resource(&pdev->dev, res);
4222 if (IS_ERR(pp->base)) {
4223 err = PTR_ERR(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02004224 goto err_clk;
4225 }
4226
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004227 /* Alloc per-cpu port structure */
4228 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
4229 if (!pp->ports) {
4230 err = -ENOMEM;
4231 goto err_clk;
4232 }
4233
willy tarreau74c41b02014-01-16 08:20:08 +01004234 /* Alloc per-cpu stats */
WANG Cong1c213bd2014-02-13 11:46:28 -08004235 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
willy tarreau74c41b02014-01-16 08:20:08 +01004236 if (!pp->stats) {
4237 err = -ENOMEM;
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004238 goto err_free_ports;
willy tarreau74c41b02014-01-16 08:20:08 +01004239 }
4240
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00004241 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01004242 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00004243 mac_from = "device tree";
4244 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
4245 } else {
4246 mvneta_get_mac_addr(pp, hw_mac_addr);
4247 if (is_valid_ether_addr(hw_mac_addr)) {
4248 mac_from = "hardware";
4249 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
4250 } else {
4251 mac_from = "random";
4252 eth_hw_addr_random(dev);
4253 }
4254 }
4255
Marcin Wojtas9110ee02015-11-30 13:27:45 +01004256 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
4257 if (tx_csum_limit < 0 ||
4258 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
4259 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4260 dev_info(&pdev->dev,
4261 "Wrong TX csum limit in DT, set to %dB\n",
4262 MVNETA_TX_CSUM_DEF_SIZE);
4263 }
4264 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
4265 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
4266 } else {
4267 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
4268 }
4269
4270 pp->tx_csum_limit = tx_csum_limit;
Simon Guinotb65657f2015-06-30 16:20:22 +02004271
Jane Li9768b452017-03-16 16:22:28 +08004272 pp->dram_target_info = mv_mbus_dram_info();
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004273 /* Armada3700 requires setting default configuration of Mbus
4274 * windows, however without using filled mbus_dram_target_info
4275 * structure.
4276 */
Jane Li9768b452017-03-16 16:22:28 +08004277 if (pp->dram_target_info || pp->neta_armada3700)
4278 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004279
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004280 pp->tx_ring_size = MVNETA_MAX_TXD;
4281 pp->rx_ring_size = MVNETA_MAX_RXD;
4282
4283 pp->dev = dev;
4284 SET_NETDEV_DEV(dev, &pdev->dev);
4285
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004286 pp->id = global_port_id++;
4287
4288 /* Obtain access to BM resources if enabled and already initialized */
4289 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
4290 if (bm_node && bm_node->data) {
4291 pp->bm_priv = bm_node->data;
4292 err = mvneta_bm_port_init(pdev, pp);
4293 if (err < 0) {
4294 dev_info(&pdev->dev, "use SW buffer management\n");
4295 pp->bm_priv = NULL;
4296 }
4297 }
Peter Chend4e4da02016-08-01 15:02:36 +08004298 of_node_put(bm_node);
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004299
Ezequiel Garcia96728502014-05-22 20:06:59 -03004300 err = mvneta_init(&pdev->dev, pp);
4301 if (err < 0)
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004302 goto err_netdev;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004303
4304 err = mvneta_port_power_up(pp, phy_mode);
4305 if (err < 0) {
4306 dev_err(&pdev->dev, "can't power up port\n");
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004307 goto err_netdev;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02004308 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004309
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004310 /* Armada3700 network controller does not support per-cpu
4311 * operation, so only single NAPI should be initialized.
4312 */
4313 if (pp->neta_armada3700) {
4314 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
4315 } else {
4316 for_each_present_cpu(cpu) {
4317 struct mvneta_pcpu_port *port =
4318 per_cpu_ptr(pp->ports, cpu);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004319
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004320 netif_napi_add(dev, &port->napi, mvneta_poll,
4321 NAPI_POLL_WEIGHT);
4322 port->pp = pp;
4323 }
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004324 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004325
Ezequiel Garcia2adb719d2014-05-19 13:59:55 -03004326 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Ezequiel Garcia01ef26c2014-05-19 13:59:53 -03004327 dev->hw_features |= dev->features;
4328 dev->vlan_features |= dev->features;
Andrew Lunn97db8af2016-11-24 00:08:13 +01004329 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03004330 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
willy tarreaub50b72d2013-04-06 08:47:01 +00004331
Jarod Wilson57779872016-10-17 15:54:06 -04004332 /* MTU range: 68 - 9676 */
4333 dev->min_mtu = ETH_MIN_MTU;
4334 /* 9676 == 9700 - 20 and rounding to 8 */
4335 dev->max_mtu = 9676;
4336
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004337 err = register_netdev(dev);
4338 if (err < 0) {
4339 dev_err(&pdev->dev, "failed to register\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03004340 goto err_free_stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004341 }
4342
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00004343 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
4344 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004345
4346 platform_set_drvdata(pdev, pp->dev);
4347
Stas Sergeev898b29702015-04-01 20:32:49 +03004348 if (pp->use_inband_status) {
4349 struct phy_device *phy = of_phy_find_device(dn);
4350
4351 mvneta_fixed_link_update(pp, phy);
Russell King04d53b22015-09-24 20:36:18 +01004352
Andrew Lunne5a03bf2016-01-06 20:11:16 +01004353 put_device(&phy->mdio.dev);
Stas Sergeev898b29702015-04-01 20:32:49 +03004354 }
4355
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004356 return 0;
4357
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004358err_netdev:
4359 unregister_netdev(dev);
4360 if (pp->bm_priv) {
4361 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4362 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4363 1 << pp->id);
4364 }
willy tarreau74c41b02014-01-16 08:20:08 +01004365err_free_stats:
4366 free_percpu(pp->stats);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004367err_free_ports:
4368 free_percpu(pp->ports);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02004369err_clk:
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08004370 clk_disable_unprepare(pp->clk_bus);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02004371 clk_disable_unprepare(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02004372err_put_phy_node:
4373 of_node_put(phy_node);
Johan Hovold5a57a302016-11-28 19:25:04 +01004374 if (of_phy_is_fixed_link(dn))
4375 of_phy_deregister_fixed_link(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004376err_free_irq:
4377 irq_dispose_mapping(dev->irq);
4378err_free_netdev:
4379 free_netdev(dev);
4380 return err;
4381}
4382
4383/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00004384static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004385{
4386 struct net_device *dev = platform_get_drvdata(pdev);
Johan Hovold5a57a302016-11-28 19:25:04 +01004387 struct device_node *dn = pdev->dev.of_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004388 struct mvneta_port *pp = netdev_priv(dev);
4389
4390 unregister_netdev(dev);
Jisheng Zhang15cc4a42016-01-20 19:27:24 +08004391 clk_disable_unprepare(pp->clk_bus);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01004392 clk_disable_unprepare(pp->clk);
Maxime Ripard12bb03b2015-09-25 18:09:36 +02004393 free_percpu(pp->ports);
willy tarreau74c41b02014-01-16 08:20:08 +01004394 free_percpu(pp->stats);
Johan Hovold5a57a302016-11-28 19:25:04 +01004395 if (of_phy_is_fixed_link(dn))
4396 of_phy_deregister_fixed_link(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004397 irq_dispose_mapping(dev->irq);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02004398 of_node_put(pp->phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004399 free_netdev(dev);
4400
Marcin Wojtasdc35a102016-03-14 09:39:03 +01004401 if (pp->bm_priv) {
4402 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
4403 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
4404 1 << pp->id);
4405 }
4406
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004407 return 0;
4408}
4409
Jane Li9768b452017-03-16 16:22:28 +08004410#ifdef CONFIG_PM_SLEEP
4411static int mvneta_suspend(struct device *device)
4412{
4413 struct net_device *dev = dev_get_drvdata(device);
4414 struct mvneta_port *pp = netdev_priv(dev);
4415
4416 if (netif_running(dev))
4417 mvneta_stop(dev);
4418 netif_device_detach(dev);
4419 clk_disable_unprepare(pp->clk_bus);
4420 clk_disable_unprepare(pp->clk);
4421 return 0;
4422}
4423
4424static int mvneta_resume(struct device *device)
4425{
4426 struct platform_device *pdev = to_platform_device(device);
4427 struct net_device *dev = dev_get_drvdata(device);
4428 struct mvneta_port *pp = netdev_priv(dev);
4429 int err;
4430
4431 clk_prepare_enable(pp->clk);
4432 if (!IS_ERR(pp->clk_bus))
4433 clk_prepare_enable(pp->clk_bus);
4434 if (pp->dram_target_info || pp->neta_armada3700)
4435 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
4436 if (pp->bm_priv) {
4437 err = mvneta_bm_port_init(pdev, pp);
4438 if (err < 0) {
4439 dev_info(&pdev->dev, "use SW buffer management\n");
4440 pp->bm_priv = NULL;
4441 }
4442 }
4443 mvneta_defaults_set(pp);
4444 err = mvneta_port_power_up(pp, pp->phy_interface);
4445 if (err < 0) {
4446 dev_err(device, "can't power up port\n");
4447 return err;
4448 }
4449
4450 if (pp->use_inband_status)
4451 mvneta_fixed_link_update(pp, dev->phydev);
4452
4453 netif_device_attach(dev);
Jisheng Zhangd6956ac2017-03-29 16:47:19 +08004454 if (netif_running(dev)) {
Jane Li9768b452017-03-16 16:22:28 +08004455 mvneta_open(dev);
Jisheng Zhangd6956ac2017-03-29 16:47:19 +08004456 mvneta_set_rx_mode(dev);
4457 }
4458
Jane Li9768b452017-03-16 16:22:28 +08004459 return 0;
4460}
4461#endif
4462
4463static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
4464
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004465static const struct of_device_id mvneta_match[] = {
4466 { .compatible = "marvell,armada-370-neta" },
Simon Guinotf522a972015-06-30 16:20:20 +02004467 { .compatible = "marvell,armada-xp-neta" },
Marcin Wojtas2636ac32016-12-01 18:03:09 +01004468 { .compatible = "marvell,armada-3700-neta" },
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004469 { }
4470};
4471MODULE_DEVICE_TABLE(of, mvneta_match);
4472
4473static struct platform_driver mvneta_driver = {
4474 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00004475 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004476 .driver = {
4477 .name = MVNETA_DRIVER_NAME,
4478 .of_match_table = mvneta_match,
Jane Li9768b452017-03-16 16:22:28 +08004479 .pm = &mvneta_pm_ops,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004480 },
4481};
4482
Sebastian Andrzej Siewior84a3f4d2016-08-18 14:57:23 +02004483static int __init mvneta_driver_init(void)
4484{
4485 int ret;
4486
4487 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4488 mvneta_cpu_online,
4489 mvneta_cpu_down_prepare);
4490 if (ret < 0)
4491 goto out;
4492 online_hpstate = ret;
4493 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4494 NULL, mvneta_cpu_dead);
4495 if (ret)
4496 goto err_dead;
4497
4498 ret = platform_driver_register(&mvneta_driver);
4499 if (ret)
4500 goto err;
4501 return 0;
4502
4503err:
4504 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4505err_dead:
4506 cpuhp_remove_multi_state(online_hpstate);
4507out:
4508 return ret;
4509}
4510module_init(mvneta_driver_init);
4511
4512static void __exit mvneta_driver_exit(void)
4513{
4514 platform_driver_unregister(&mvneta_driver);
4515 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4516 cpuhp_remove_multi_state(online_hpstate);
4517}
4518module_exit(mvneta_driver_exit);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03004519
4520MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4521MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4522MODULE_LICENSE("GPL");
4523
4524module_param(rxq_number, int, S_IRUGO);
4525module_param(txq_number, int, S_IRUGO);
4526
4527module_param(rxq_def, int, S_IRUGO);
willy tarreauf19fadf2014-01-16 08:20:17 +01004528module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);