blob: 133f8c6444e0cf7a2e75b6becab04d86766c000a [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030015#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <net/ip.h>
24#include <net/ipv6.h>
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +010025#include <linux/io.h>
Ezequiel Garcia2adb7192014-05-19 13:59:55 -030026#include <net/tso.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030027#include <linux/of.h>
28#include <linux/of_irq.h>
29#include <linux/of_mdio.h>
30#include <linux/of_net.h>
31#include <linux/of_address.h>
32#include <linux/phy.h>
Thomas Petazzoni189dd622012-11-19 14:15:25 +010033#include <linux/clk.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030034
35/* Registers */
36#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
37#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
38#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
39#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
40#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
41#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
42#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
43#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
44#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
45#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
46#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
47#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
48#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
49#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
50#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
51#define MVNETA_PORT_RX_RESET 0x1cc0
52#define MVNETA_PORT_RX_DMA_RESET BIT(0)
53#define MVNETA_PHY_ADDR 0x2000
54#define MVNETA_PHY_ADDR_MASK 0x1f
55#define MVNETA_MBUS_RETRY 0x2010
56#define MVNETA_UNIT_INTR_CAUSE 0x2080
57#define MVNETA_UNIT_CONTROL 0x20B0
58#define MVNETA_PHY_POLLING_ENABLE BIT(1)
59#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
60#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
61#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
62#define MVNETA_BASE_ADDR_ENABLE 0x2290
63#define MVNETA_PORT_CONFIG 0x2400
64#define MVNETA_UNI_PROMISC_MODE BIT(0)
65#define MVNETA_DEF_RXQ(q) ((q) << 1)
66#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
67#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
68#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
69#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
70#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
71#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
72#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
73 MVNETA_DEF_RXQ_ARP(q) | \
74 MVNETA_DEF_RXQ_TCP(q) | \
75 MVNETA_DEF_RXQ_UDP(q) | \
76 MVNETA_DEF_RXQ_BPDU(q) | \
77 MVNETA_TX_UNSET_ERR_SUM | \
78 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
79#define MVNETA_PORT_CONFIG_EXTEND 0x2404
80#define MVNETA_MAC_ADDR_LOW 0x2414
81#define MVNETA_MAC_ADDR_HIGH 0x2418
82#define MVNETA_SDMA_CONFIG 0x241c
83#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030084#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
85#define MVNETA_RX_NO_DATA_SWAP BIT(4)
86#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +020087#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030088#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
89#define MVNETA_PORT_STATUS 0x2444
90#define MVNETA_TX_IN_PRGRS BIT(1)
91#define MVNETA_TX_FIFO_EMPTY BIT(8)
92#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020093#define MVNETA_SERDES_CFG 0x24A0
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +020094#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +020095#define MVNETA_QSGMII_SERDES_PROTO 0x0667
Thomas Petazzonic5aff182012-08-17 14:04:28 +030096#define MVNETA_TYPE_PRIO 0x24bc
97#define MVNETA_FORCE_UNI BIT(21)
98#define MVNETA_TXQ_CMD_1 0x24e4
99#define MVNETA_TXQ_CMD 0x2448
100#define MVNETA_TXQ_DISABLE_SHIFT 8
101#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
102#define MVNETA_ACC_MODE 0x2500
103#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
104#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
105#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
106#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
willy tarreau40ba35e2014-01-16 08:20:10 +0100107
108/* Exception Interrupt Port/Queue Cause register */
109
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300110#define MVNETA_INTR_NEW_CAUSE 0x25a0
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300111#define MVNETA_INTR_NEW_MASK 0x25a4
willy tarreau40ba35e2014-01-16 08:20:10 +0100112
113/* bits 0..7 = TXQ SENT, one bit per queue.
114 * bits 8..15 = RXQ OCCUP, one bit per queue.
115 * bits 16..23 = RXQ FREE, one bit per queue.
116 * bit 29 = OLD_REG_SUM, see old reg ?
117 * bit 30 = TX_ERR_SUM, one bit for 4 ports
118 * bit 31 = MISC_SUM, one bit for 4 ports
119 */
120#define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
121#define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
122#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
123#define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
124
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300125#define MVNETA_INTR_OLD_CAUSE 0x25a8
126#define MVNETA_INTR_OLD_MASK 0x25ac
willy tarreau40ba35e2014-01-16 08:20:10 +0100127
128/* Data Path Port/Queue Cause Register */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300129#define MVNETA_INTR_MISC_CAUSE 0x25b0
130#define MVNETA_INTR_MISC_MASK 0x25b4
willy tarreau40ba35e2014-01-16 08:20:10 +0100131
132#define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
133#define MVNETA_CAUSE_LINK_CHANGE BIT(1)
134#define MVNETA_CAUSE_PTP BIT(4)
135
136#define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
137#define MVNETA_CAUSE_RX_OVERRUN BIT(8)
138#define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
139#define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
140#define MVNETA_CAUSE_TX_UNDERUN BIT(11)
141#define MVNETA_CAUSE_PRBS_ERR BIT(12)
142#define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
143#define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
144
145#define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
146#define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
147#define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
148
149#define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
150#define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
151#define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
152
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300153#define MVNETA_INTR_ENABLE 0x25b8
154#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
willy tarreau40ba35e2014-01-16 08:20:10 +0100155#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
156
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300157#define MVNETA_RXQ_CMD 0x2680
158#define MVNETA_RXQ_DISABLE_SHIFT 8
159#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
160#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
161#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
162#define MVNETA_GMAC_CTRL_0 0x2c00
163#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
164#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
165#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
166#define MVNETA_GMAC_CTRL_2 0x2c08
Thomas Petazzonia79121d2014-03-26 00:25:41 +0100167#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300168#define MVNETA_GMAC2_PORT_RGMII BIT(4)
169#define MVNETA_GMAC2_PORT_RESET BIT(6)
170#define MVNETA_GMAC_STATUS 0x2c10
171#define MVNETA_GMAC_LINK_UP BIT(0)
172#define MVNETA_GMAC_SPEED_1000 BIT(1)
173#define MVNETA_GMAC_SPEED_100 BIT(2)
174#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
175#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
176#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
177#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
178#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
179#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
180#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
181#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
182#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
183#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200184#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300185#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200186#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300187#define MVNETA_MIB_COUNTERS_BASE 0x3080
188#define MVNETA_MIB_LATE_COLLISION 0x7c
189#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
190#define MVNETA_DA_FILT_OTH_MCAST 0x3500
191#define MVNETA_DA_FILT_UCAST_BASE 0x3600
192#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
193#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
194#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
195#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
196#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
197#define MVNETA_TXQ_DEC_SENT_SHIFT 16
198#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
199#define MVNETA_TXQ_SENT_DESC_SHIFT 16
200#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
201#define MVNETA_PORT_TX_RESET 0x3cf0
202#define MVNETA_PORT_TX_DMA_RESET BIT(0)
203#define MVNETA_TX_MTU 0x3e0c
204#define MVNETA_TX_TOKEN_SIZE 0x3e14
205#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
206#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
207#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
208
209#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
210
211/* Descriptor ring Macros */
212#define MVNETA_QUEUE_NEXT_DESC(q, index) \
213 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
214
215/* Various constants */
216
217/* Coalescing */
218#define MVNETA_TXDONE_COAL_PKTS 16
219#define MVNETA_RX_COAL_PKTS 32
220#define MVNETA_RX_COAL_USEC 100
221
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100222/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300223 * by Marvell switches when a specific hardware mode is enabled (not
224 * supported by this driver) or is filled automatically by zeroes on
225 * the RX side. Those two bytes being at the front of the Ethernet
226 * header, they allow to have the IP header aligned on a 4 bytes
227 * boundary automatically: the hardware skips those two bytes on its
228 * own.
229 */
230#define MVNETA_MH_SIZE 2
231
232#define MVNETA_VLAN_TAG_LEN 4
233
234#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
235#define MVNETA_TX_CSUM_MAX_SIZE 9800
236#define MVNETA_ACC_MODE_EXT 1
237
238/* Timeout constants */
239#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
240#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
241#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
242
243#define MVNETA_TX_MTU_MAX 0x3ffff
244
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300245/* TSO header size */
246#define TSO_HEADER_SIZE 128
247
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300248/* Max number of Rx descriptors */
249#define MVNETA_MAX_RXD 128
250
251/* Max number of Tx descriptors */
252#define MVNETA_MAX_TXD 532
253
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300254/* Max number of allowed TCP segments for software TSO */
255#define MVNETA_MAX_TSO_SEGS 100
256
257#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
258
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300259/* descriptor aligned size */
260#define MVNETA_DESC_ALIGNED_SIZE 32
261
262#define MVNETA_RX_PKT_SIZE(mtu) \
263 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
264 ETH_HLEN + ETH_FCS_LEN, \
265 MVNETA_CPU_D_CACHE_LINE_SIZE)
266
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -0300267#define IS_TSO_HEADER(txq, addr) \
268 ((addr >= txq->tso_hdrs_phys) && \
269 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
270
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300271#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
272
willy tarreau74c41b02014-01-16 08:20:08 +0100273struct mvneta_pcpu_stats {
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300274 struct u64_stats_sync syncp;
willy tarreau74c41b02014-01-16 08:20:08 +0100275 u64 rx_packets;
276 u64 rx_bytes;
277 u64 tx_packets;
278 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300279};
280
281struct mvneta_port {
282 int pkt_size;
willy tarreau8ec2cd42014-01-16 08:20:16 +0100283 unsigned int frag_size;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300284 void __iomem *base;
285 struct mvneta_rx_queue *rxqs;
286 struct mvneta_tx_queue *txqs;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300287 struct net_device *dev;
288
289 u32 cause_rx_tx;
290 struct napi_struct napi;
291
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300292 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100293 struct clk *clk;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300294 u8 mcast_count[256];
295 u16 tx_ring_size;
296 u16 rx_ring_size;
willy tarreau74c41b02014-01-16 08:20:08 +0100297 struct mvneta_pcpu_stats *stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300298
299 struct mii_bus *mii_bus;
300 struct phy_device *phy_dev;
301 phy_interface_t phy_interface;
302 struct device_node *phy_node;
303 unsigned int link;
304 unsigned int duplex;
305 unsigned int speed;
306};
307
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100308/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300309 * layout of the transmit and reception DMA descriptors, and their
310 * layout is therefore defined by the hardware design
311 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200312
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300313#define MVNETA_TX_L3_OFF_SHIFT 0
314#define MVNETA_TX_IP_HLEN_SHIFT 8
315#define MVNETA_TX_L4_UDP BIT(16)
316#define MVNETA_TX_L3_IP6 BIT(17)
317#define MVNETA_TXD_IP_CSUM BIT(18)
318#define MVNETA_TXD_Z_PAD BIT(19)
319#define MVNETA_TXD_L_DESC BIT(20)
320#define MVNETA_TXD_F_DESC BIT(21)
321#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
322 MVNETA_TXD_L_DESC | \
323 MVNETA_TXD_F_DESC)
324#define MVNETA_TX_L4_CSUM_FULL BIT(30)
325#define MVNETA_TX_L4_CSUM_NOT BIT(31)
326
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300327#define MVNETA_RXD_ERR_CRC 0x0
328#define MVNETA_RXD_ERR_SUMMARY BIT(16)
329#define MVNETA_RXD_ERR_OVERRUN BIT(17)
330#define MVNETA_RXD_ERR_LEN BIT(18)
331#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
332#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
333#define MVNETA_RXD_L3_IP4 BIT(25)
334#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
335#define MVNETA_RXD_L4_CSUM_OK BIT(30)
336
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200337#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200338struct mvneta_tx_desc {
339 u32 command; /* Options used by HW for packet transmitting.*/
340 u16 reserverd1; /* csum_l4 (for future use) */
341 u16 data_size; /* Data size of transmitted packet in bytes */
342 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
343 u32 reserved2; /* hw_cmd - (for future use, PMT) */
344 u32 reserved3[4]; /* Reserved - (for future use) */
345};
346
347struct mvneta_rx_desc {
348 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300349 u16 reserved1; /* pnc_info - (for future use, PnC) */
350 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200351
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300352 u32 buf_phys_addr; /* Physical address of the buffer */
353 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200354
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300355 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
356 u16 reserved3; /* prefetch_cmd, for future use */
357 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200358
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300359 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
360 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
361};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200362#else
363struct mvneta_tx_desc {
364 u16 data_size; /* Data size of transmitted packet in bytes */
365 u16 reserverd1; /* csum_l4 (for future use) */
366 u32 command; /* Options used by HW for packet transmitting.*/
367 u32 reserved2; /* hw_cmd - (for future use, PMT) */
368 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
369 u32 reserved3[4]; /* Reserved - (for future use) */
370};
371
372struct mvneta_rx_desc {
373 u16 data_size; /* Size of received packet in bytes */
374 u16 reserved1; /* pnc_info - (for future use, PnC) */
375 u32 status; /* Info about received packet */
376
377 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
378 u32 buf_phys_addr; /* Physical address of the buffer */
379
380 u16 reserved4; /* csum_l4 - (for future use, PnC) */
381 u16 reserved3; /* prefetch_cmd, for future use */
382 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
383
384 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
385 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
386};
387#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300388
389struct mvneta_tx_queue {
390 /* Number of this TX queue, in the range 0-7 */
391 u8 id;
392
393 /* Number of TX DMA descriptors in the descriptor ring */
394 int size;
395
396 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100397 * descriptor ring
398 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300399 int count;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -0300400 int tx_stop_threshold;
401 int tx_wake_threshold;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300402
403 /* Array of transmitted skb */
404 struct sk_buff **tx_skb;
405
406 /* Index of last TX DMA descriptor that was inserted */
407 int txq_put_index;
408
409 /* Index of the TX DMA descriptor to be cleaned up */
410 int txq_get_index;
411
412 u32 done_pkts_coal;
413
414 /* Virtual address of the TX DMA descriptors array */
415 struct mvneta_tx_desc *descs;
416
417 /* DMA address of the TX DMA descriptors array */
418 dma_addr_t descs_phys;
419
420 /* Index of the last TX DMA descriptor */
421 int last_desc;
422
423 /* Index of the next TX DMA descriptor to process */
424 int next_desc_to_proc;
Ezequiel Garcia2adb7192014-05-19 13:59:55 -0300425
426 /* DMA buffers for TSO headers */
427 char *tso_hdrs;
428
429 /* DMA address of TSO headers */
430 dma_addr_t tso_hdrs_phys;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300431};
432
433struct mvneta_rx_queue {
434 /* rx queue number, in the range 0-7 */
435 u8 id;
436
437 /* num of rx descriptors in the rx descriptor ring */
438 int size;
439
440 /* counter of times when mvneta_refill() failed */
441 int missed;
442
443 u32 pkts_coal;
444 u32 time_coal;
445
446 /* Virtual address of the RX DMA descriptors array */
447 struct mvneta_rx_desc *descs;
448
449 /* DMA address of the RX DMA descriptors array */
450 dma_addr_t descs_phys;
451
452 /* Index of the last RX DMA descriptor */
453 int last_desc;
454
455 /* Index of the next RX DMA descriptor to process */
456 int next_desc_to_proc;
457};
458
Ezequiel Garciaedadb7f2014-05-22 20:07:01 -0300459/* The hardware supports eight (8) rx queues, but we are only allowing
460 * the first one to be used. Therefore, let's just allocate one queue.
461 */
462static int rxq_number = 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300463static int txq_number = 8;
464
465static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300466
willy tarreauf19fadf2014-01-16 08:20:17 +0100467static int rx_copybreak __read_mostly = 256;
468
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300469#define MVNETA_DRIVER_NAME "mvneta"
470#define MVNETA_DRIVER_VERSION "1.0"
471
472/* Utility/helper methods */
473
474/* Write helper method */
475static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
476{
477 writel(data, pp->base + offset);
478}
479
480/* Read helper method */
481static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
482{
483 return readl(pp->base + offset);
484}
485
486/* Increment txq get counter */
487static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
488{
489 txq->txq_get_index++;
490 if (txq->txq_get_index == txq->size)
491 txq->txq_get_index = 0;
492}
493
494/* Increment txq put counter */
495static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
496{
497 txq->txq_put_index++;
498 if (txq->txq_put_index == txq->size)
499 txq->txq_put_index = 0;
500}
501
502
503/* Clear all MIB counters */
504static void mvneta_mib_counters_clear(struct mvneta_port *pp)
505{
506 int i;
507 u32 dummy;
508
509 /* Perform dummy reads from MIB counters */
510 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
511 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
512}
513
514/* Get System Network Statistics */
515struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
516 struct rtnl_link_stats64 *stats)
517{
518 struct mvneta_port *pp = netdev_priv(dev);
519 unsigned int start;
willy tarreau74c41b02014-01-16 08:20:08 +0100520 int cpu;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300521
willy tarreau74c41b02014-01-16 08:20:08 +0100522 for_each_possible_cpu(cpu) {
523 struct mvneta_pcpu_stats *cpu_stats;
524 u64 rx_packets;
525 u64 rx_bytes;
526 u64 tx_packets;
527 u64 tx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300528
willy tarreau74c41b02014-01-16 08:20:08 +0100529 cpu_stats = per_cpu_ptr(pp->stats, cpu);
530 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700531 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
willy tarreau74c41b02014-01-16 08:20:08 +0100532 rx_packets = cpu_stats->rx_packets;
533 rx_bytes = cpu_stats->rx_bytes;
534 tx_packets = cpu_stats->tx_packets;
535 tx_bytes = cpu_stats->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700536 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300537
willy tarreau74c41b02014-01-16 08:20:08 +0100538 stats->rx_packets += rx_packets;
539 stats->rx_bytes += rx_bytes;
540 stats->tx_packets += tx_packets;
541 stats->tx_bytes += tx_bytes;
542 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300543
544 stats->rx_errors = dev->stats.rx_errors;
545 stats->rx_dropped = dev->stats.rx_dropped;
546
547 stats->tx_dropped = dev->stats.tx_dropped;
548
549 return stats;
550}
551
552/* Rx descriptors helper methods */
553
willy tarreau54282132014-01-16 08:20:14 +0100554/* Checks whether the RX descriptor having this status is both the first
555 * and the last descriptor for the RX packet. Each RX packet is currently
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300556 * received through a single RX descriptor, so not having each RX
557 * descriptor with its first and last bits set is an error
558 */
willy tarreau54282132014-01-16 08:20:14 +0100559static int mvneta_rxq_desc_is_first_last(u32 status)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300560{
willy tarreau54282132014-01-16 08:20:14 +0100561 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300562 MVNETA_RXD_FIRST_LAST_DESC;
563}
564
565/* Add number of descriptors ready to receive new packets */
566static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
567 struct mvneta_rx_queue *rxq,
568 int ndescs)
569{
570 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100571 * be added at once
572 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300573 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
574 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
575 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
576 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
577 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
578 }
579
580 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
581 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
582}
583
584/* Get number of RX descriptors occupied by received packets */
585static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
586 struct mvneta_rx_queue *rxq)
587{
588 u32 val;
589
590 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
591 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
592}
593
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100594/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300595 * from mvneta_rxq_drop_pkts().
596 */
597static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
598 struct mvneta_rx_queue *rxq,
599 int rx_done, int rx_filled)
600{
601 u32 val;
602
603 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
604 val = rx_done |
605 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
606 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
607 return;
608 }
609
610 /* Only 255 descriptors can be added at once */
611 while ((rx_done > 0) || (rx_filled > 0)) {
612 if (rx_done <= 0xff) {
613 val = rx_done;
614 rx_done = 0;
615 } else {
616 val = 0xff;
617 rx_done -= 0xff;
618 }
619 if (rx_filled <= 0xff) {
620 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
621 rx_filled = 0;
622 } else {
623 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
624 rx_filled -= 0xff;
625 }
626 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
627 }
628}
629
630/* Get pointer to next RX descriptor to be processed by SW */
631static struct mvneta_rx_desc *
632mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
633{
634 int rx_desc = rxq->next_desc_to_proc;
635
636 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
willy tarreau34e41792014-01-16 08:20:15 +0100637 prefetch(rxq->descs + rxq->next_desc_to_proc);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300638 return rxq->descs + rx_desc;
639}
640
641/* Change maximum receive size of the port. */
642static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
643{
644 u32 val;
645
646 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
647 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
648 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
649 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
650 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
651}
652
653
654/* Set rx queue offset */
655static void mvneta_rxq_offset_set(struct mvneta_port *pp,
656 struct mvneta_rx_queue *rxq,
657 int offset)
658{
659 u32 val;
660
661 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
662 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
663
664 /* Offset is in */
665 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
666 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
667}
668
669
670/* Tx descriptors helper methods */
671
672/* Update HW with number of TX descriptors to be sent */
673static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
674 struct mvneta_tx_queue *txq,
675 int pend_desc)
676{
677 u32 val;
678
679 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100680 * process TX desriptors in quanta less than 256
681 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300682 val = pend_desc;
683 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
684}
685
686/* Get pointer to next TX descriptor to be processed (send) by HW */
687static struct mvneta_tx_desc *
688mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
689{
690 int tx_desc = txq->next_desc_to_proc;
691
692 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
693 return txq->descs + tx_desc;
694}
695
696/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100697 * mapping failures in the TX path.
698 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300699static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
700{
701 if (txq->next_desc_to_proc == 0)
702 txq->next_desc_to_proc = txq->last_desc - 1;
703 else
704 txq->next_desc_to_proc--;
705}
706
707/* Set rxq buf size */
708static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
709 struct mvneta_rx_queue *rxq,
710 int buf_size)
711{
712 u32 val;
713
714 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
715
716 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
717 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
718
719 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
720}
721
722/* Disable buffer management (BM) */
723static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
724 struct mvneta_rx_queue *rxq)
725{
726 u32 val;
727
728 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
729 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
730 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
731}
732
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300733/* Start the Ethernet port RX and TX activity */
734static void mvneta_port_up(struct mvneta_port *pp)
735{
736 int queue;
737 u32 q_map;
738
739 /* Enable all initialized TXs. */
740 mvneta_mib_counters_clear(pp);
741 q_map = 0;
742 for (queue = 0; queue < txq_number; queue++) {
743 struct mvneta_tx_queue *txq = &pp->txqs[queue];
744 if (txq->descs != NULL)
745 q_map |= (1 << queue);
746 }
747 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
748
749 /* Enable all initialized RXQs. */
750 q_map = 0;
751 for (queue = 0; queue < rxq_number; queue++) {
752 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
753 if (rxq->descs != NULL)
754 q_map |= (1 << queue);
755 }
756
757 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
758}
759
760/* Stop the Ethernet port activity */
761static void mvneta_port_down(struct mvneta_port *pp)
762{
763 u32 val;
764 int count;
765
766 /* Stop Rx port activity. Check port Rx activity. */
767 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
768
769 /* Issue stop command for active channels only */
770 if (val != 0)
771 mvreg_write(pp, MVNETA_RXQ_CMD,
772 val << MVNETA_RXQ_DISABLE_SHIFT);
773
774 /* Wait for all Rx activity to terminate. */
775 count = 0;
776 do {
777 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
778 netdev_warn(pp->dev,
779 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
780 val);
781 break;
782 }
783 mdelay(1);
784
785 val = mvreg_read(pp, MVNETA_RXQ_CMD);
786 } while (val & 0xff);
787
788 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100789 * command for active channels only
790 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300791 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
792
793 if (val != 0)
794 mvreg_write(pp, MVNETA_TXQ_CMD,
795 (val << MVNETA_TXQ_DISABLE_SHIFT));
796
797 /* Wait for all Tx activity to terminate. */
798 count = 0;
799 do {
800 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
801 netdev_warn(pp->dev,
802 "TIMEOUT for TX stopped status=0x%08x\n",
803 val);
804 break;
805 }
806 mdelay(1);
807
808 /* Check TX Command reg that all Txqs are stopped */
809 val = mvreg_read(pp, MVNETA_TXQ_CMD);
810
811 } while (val & 0xff);
812
813 /* Double check to verify that TX FIFO is empty */
814 count = 0;
815 do {
816 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
817 netdev_warn(pp->dev,
818 "TX FIFO empty timeout status=0x08%x\n",
819 val);
820 break;
821 }
822 mdelay(1);
823
824 val = mvreg_read(pp, MVNETA_PORT_STATUS);
825 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
826 (val & MVNETA_TX_IN_PRGRS));
827
828 udelay(200);
829}
830
831/* Enable the port by setting the port enable bit of the MAC control register */
832static void mvneta_port_enable(struct mvneta_port *pp)
833{
834 u32 val;
835
836 /* Enable port */
837 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
838 val |= MVNETA_GMAC0_PORT_ENABLE;
839 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
840}
841
842/* Disable the port and wait for about 200 usec before retuning */
843static void mvneta_port_disable(struct mvneta_port *pp)
844{
845 u32 val;
846
847 /* Reset the Enable bit in the Serial Control Register */
848 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
849 val &= ~MVNETA_GMAC0_PORT_ENABLE;
850 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
851
852 udelay(200);
853}
854
855/* Multicast tables methods */
856
857/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
858static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
859{
860 int offset;
861 u32 val;
862
863 if (queue == -1) {
864 val = 0;
865 } else {
866 val = 0x1 | (queue << 1);
867 val |= (val << 24) | (val << 16) | (val << 8);
868 }
869
870 for (offset = 0; offset <= 0xc; offset += 4)
871 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
872}
873
874/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
875static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
876{
877 int offset;
878 u32 val;
879
880 if (queue == -1) {
881 val = 0;
882 } else {
883 val = 0x1 | (queue << 1);
884 val |= (val << 24) | (val << 16) | (val << 8);
885 }
886
887 for (offset = 0; offset <= 0xfc; offset += 4)
888 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
889
890}
891
892/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
893static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
894{
895 int offset;
896 u32 val;
897
898 if (queue == -1) {
899 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
900 val = 0;
901 } else {
902 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
903 val = 0x1 | (queue << 1);
904 val |= (val << 24) | (val << 16) | (val << 8);
905 }
906
907 for (offset = 0; offset <= 0xfc; offset += 4)
908 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
909}
910
911/* This method sets defaults to the NETA port:
912 * Clears interrupt Cause and Mask registers.
913 * Clears all MAC tables.
914 * Sets defaults to all registers.
915 * Resets RX and TX descriptor rings.
916 * Resets PHY.
917 * This method can be called after mvneta_port_down() to return the port
918 * settings to defaults.
919 */
920static void mvneta_defaults_set(struct mvneta_port *pp)
921{
922 int cpu;
923 int queue;
924 u32 val;
925
926 /* Clear all Cause registers */
927 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
928 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
929 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
930
931 /* Mask all interrupts */
932 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
933 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
934 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
935 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
936
937 /* Enable MBUS Retry bit16 */
938 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
939
940 /* Set CPU queue access map - all CPUs have access to all RX
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100941 * queues and to all TX queues
942 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300943 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
944 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
945 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
946 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
947
948 /* Reset RX and TX DMAs */
949 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
950 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
951
952 /* Disable Legacy WRR, Disable EJP, Release from reset */
953 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
954 for (queue = 0; queue < txq_number; queue++) {
955 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
956 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
957 }
958
959 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
960 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
961
962 /* Set Port Acceleration Mode */
963 val = MVNETA_ACC_MODE_EXT;
964 mvreg_write(pp, MVNETA_ACC_MODE, val);
965
966 /* Update val of portCfg register accordingly with all RxQueue types */
967 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
968 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
969
970 val = 0;
971 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
972 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
973
974 /* Build PORT_SDMA_CONFIG_REG */
975 val = 0;
976
977 /* Default burst size */
978 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
979 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200980 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300981
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200982#if defined(__BIG_ENDIAN)
983 val |= MVNETA_DESC_SWAP;
984#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300985
986 /* Assign port SDMA configuration */
987 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
988
Thomas Petazzoni71408602013-09-04 16:21:18 +0200989 /* Disable PHY polling in hardware, since we're using the
990 * kernel phylib to do this.
991 */
992 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
993 val &= ~MVNETA_PHY_POLLING_ENABLE;
994 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
995
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300996 mvneta_set_ucast_table(pp, -1);
997 mvneta_set_special_mcast_table(pp, -1);
998 mvneta_set_other_mcast_table(pp, -1);
999
1000 /* Set port interrupt enable register - default enable all */
1001 mvreg_write(pp, MVNETA_INTR_ENABLE,
1002 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1003 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1004}
1005
1006/* Set max sizes for tx queues */
1007static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1008
1009{
1010 u32 val, size, mtu;
1011 int queue;
1012
1013 mtu = max_tx_size * 8;
1014 if (mtu > MVNETA_TX_MTU_MAX)
1015 mtu = MVNETA_TX_MTU_MAX;
1016
1017 /* Set MTU */
1018 val = mvreg_read(pp, MVNETA_TX_MTU);
1019 val &= ~MVNETA_TX_MTU_MAX;
1020 val |= mtu;
1021 mvreg_write(pp, MVNETA_TX_MTU, val);
1022
1023 /* TX token size and all TXQs token size must be larger that MTU */
1024 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1025
1026 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1027 if (size < mtu) {
1028 size = mtu;
1029 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1030 val |= size;
1031 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1032 }
1033 for (queue = 0; queue < txq_number; queue++) {
1034 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1035
1036 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1037 if (size < mtu) {
1038 size = mtu;
1039 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1040 val |= size;
1041 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1042 }
1043 }
1044}
1045
1046/* Set unicast address */
1047static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1048 int queue)
1049{
1050 unsigned int unicast_reg;
1051 unsigned int tbl_offset;
1052 unsigned int reg_offset;
1053
1054 /* Locate the Unicast table entry */
1055 last_nibble = (0xf & last_nibble);
1056
1057 /* offset from unicast tbl base */
1058 tbl_offset = (last_nibble / 4) * 4;
1059
1060 /* offset within the above reg */
1061 reg_offset = last_nibble % 4;
1062
1063 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1064
1065 if (queue == -1) {
1066 /* Clear accepts frame bit at specified unicast DA tbl entry */
1067 unicast_reg &= ~(0xff << (8 * reg_offset));
1068 } else {
1069 unicast_reg &= ~(0xff << (8 * reg_offset));
1070 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1071 }
1072
1073 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1074}
1075
1076/* Set mac address */
1077static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1078 int queue)
1079{
1080 unsigned int mac_h;
1081 unsigned int mac_l;
1082
1083 if (queue != -1) {
1084 mac_l = (addr[4] << 8) | (addr[5]);
1085 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1086 (addr[2] << 8) | (addr[3] << 0);
1087
1088 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1089 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1090 }
1091
1092 /* Accept frames of this address */
1093 mvneta_set_ucast_addr(pp, addr[5], queue);
1094}
1095
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001096/* Set the number of packets that will be received before RX interrupt
1097 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001098 */
1099static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1100 struct mvneta_rx_queue *rxq, u32 value)
1101{
1102 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1103 value | MVNETA_RXQ_NON_OCCUPIED(0));
1104 rxq->pkts_coal = value;
1105}
1106
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001107/* Set the time delay in usec before RX interrupt will be generated by
1108 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001109 */
1110static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1111 struct mvneta_rx_queue *rxq, u32 value)
1112{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001113 u32 val;
1114 unsigned long clk_rate;
1115
1116 clk_rate = clk_get_rate(pp->clk);
1117 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001118
1119 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1120 rxq->time_coal = value;
1121}
1122
1123/* Set threshold for TX_DONE pkts coalescing */
1124static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1125 struct mvneta_tx_queue *txq, u32 value)
1126{
1127 u32 val;
1128
1129 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1130
1131 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1132 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1133
1134 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1135
1136 txq->done_pkts_coal = value;
1137}
1138
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001139/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1140static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1141 u32 phys_addr, u32 cookie)
1142{
1143 rx_desc->buf_cookie = cookie;
1144 rx_desc->buf_phys_addr = phys_addr;
1145}
1146
1147/* Decrement sent descriptors counter */
1148static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1149 struct mvneta_tx_queue *txq,
1150 int sent_desc)
1151{
1152 u32 val;
1153
1154 /* Only 255 TX descriptors can be updated at once */
1155 while (sent_desc > 0xff) {
1156 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1157 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1158 sent_desc = sent_desc - 0xff;
1159 }
1160
1161 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1162 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1163}
1164
1165/* Get number of TX descriptors already sent by HW */
1166static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1167 struct mvneta_tx_queue *txq)
1168{
1169 u32 val;
1170 int sent_desc;
1171
1172 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1173 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1174 MVNETA_TXQ_SENT_DESC_SHIFT;
1175
1176 return sent_desc;
1177}
1178
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001179/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001180 * The number of sent descriptors is returned.
1181 */
1182static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1183 struct mvneta_tx_queue *txq)
1184{
1185 int sent_desc;
1186
1187 /* Get number of sent descriptors */
1188 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1189
1190 /* Decrement sent descriptors counter */
1191 if (sent_desc)
1192 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1193
1194 return sent_desc;
1195}
1196
1197/* Set TXQ descriptors fields relevant for CSUM calculation */
1198static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1199 int ip_hdr_len, int l4_proto)
1200{
1201 u32 command;
1202
1203 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001204 * G_L4_chk, L4_type; required only for checksum
1205 * calculation
1206 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1209
Thomas Fitzsimmons0a198582014-07-08 19:44:07 -04001210 if (l3_proto == htons(ETH_P_IP))
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001211 command |= MVNETA_TXD_IP_CSUM;
1212 else
1213 command |= MVNETA_TX_L3_IP6;
1214
1215 if (l4_proto == IPPROTO_TCP)
1216 command |= MVNETA_TX_L4_CSUM_FULL;
1217 else if (l4_proto == IPPROTO_UDP)
1218 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1219 else
1220 command |= MVNETA_TX_L4_CSUM_NOT;
1221
1222 return command;
1223}
1224
1225
1226/* Display more error info */
1227static void mvneta_rx_error(struct mvneta_port *pp,
1228 struct mvneta_rx_desc *rx_desc)
1229{
1230 u32 status = rx_desc->status;
1231
willy tarreau54282132014-01-16 08:20:14 +01001232 if (!mvneta_rxq_desc_is_first_last(status)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001233 netdev_err(pp->dev,
1234 "bad rx status %08x (buffer oversize), size=%d\n",
willy tarreau54282132014-01-16 08:20:14 +01001235 status, rx_desc->data_size);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001236 return;
1237 }
1238
1239 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1240 case MVNETA_RXD_ERR_CRC:
1241 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1242 status, rx_desc->data_size);
1243 break;
1244 case MVNETA_RXD_ERR_OVERRUN:
1245 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1246 status, rx_desc->data_size);
1247 break;
1248 case MVNETA_RXD_ERR_LEN:
1249 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1250 status, rx_desc->data_size);
1251 break;
1252 case MVNETA_RXD_ERR_RESOURCE:
1253 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1254 status, rx_desc->data_size);
1255 break;
1256 }
1257}
1258
willy tarreau54282132014-01-16 08:20:14 +01001259/* Handle RX checksum offload based on the descriptor's status */
1260static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001261 struct sk_buff *skb)
1262{
willy tarreau54282132014-01-16 08:20:14 +01001263 if ((status & MVNETA_RXD_L3_IP4) &&
1264 (status & MVNETA_RXD_L4_CSUM_OK)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001265 skb->csum = 0;
1266 skb->ip_summed = CHECKSUM_UNNECESSARY;
1267 return;
1268 }
1269
1270 skb->ip_summed = CHECKSUM_NONE;
1271}
1272
willy tarreau6c498972014-01-16 08:20:12 +01001273/* Return tx queue pointer (find last set bit) according to <cause> returned
1274 * form tx_done reg. <cause> must not be null. The return value is always a
1275 * valid queue for matching the first one found in <cause>.
1276 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001277static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1278 u32 cause)
1279{
1280 int queue = fls(cause) - 1;
1281
willy tarreau6c498972014-01-16 08:20:12 +01001282 return &pp->txqs[queue];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001283}
1284
1285/* Free tx queue skbuffs */
1286static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1287 struct mvneta_tx_queue *txq, int num)
1288{
1289 int i;
1290
1291 for (i = 0; i < num; i++) {
1292 struct mvneta_tx_desc *tx_desc = txq->descs +
1293 txq->txq_get_index;
1294 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1295
1296 mvneta_txq_inc_get(txq);
1297
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001298 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1299 dma_unmap_single(pp->dev->dev.parent,
1300 tx_desc->buf_phys_addr,
1301 tx_desc->data_size, DMA_TO_DEVICE);
Ezequiel Garciaba7e46e2014-05-30 13:40:06 -03001302 if (!skb)
1303 continue;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001304 dev_kfree_skb_any(skb);
1305 }
1306}
1307
1308/* Handle end of transmission */
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001309static void mvneta_txq_done(struct mvneta_port *pp,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001310 struct mvneta_tx_queue *txq)
1311{
1312 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1313 int tx_done;
1314
1315 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
Arnaud Ebalardcd713192014-01-16 08:20:19 +01001316 if (!tx_done)
1317 return;
1318
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001319 mvneta_txq_bufs_free(pp, txq, tx_done);
1320
1321 txq->count -= tx_done;
1322
1323 if (netif_tx_queue_stopped(nq)) {
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001324 if (txq->count <= txq->tx_wake_threshold)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001325 netif_tx_wake_queue(nq);
1326 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001327}
1328
willy tarreau8ec2cd42014-01-16 08:20:16 +01001329static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1330{
1331 if (likely(pp->frag_size <= PAGE_SIZE))
1332 return netdev_alloc_frag(pp->frag_size);
1333 else
1334 return kmalloc(pp->frag_size, GFP_ATOMIC);
1335}
1336
1337static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1338{
1339 if (likely(pp->frag_size <= PAGE_SIZE))
1340 put_page(virt_to_head_page(data));
1341 else
1342 kfree(data);
1343}
1344
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001345/* Refill processing */
1346static int mvneta_rx_refill(struct mvneta_port *pp,
1347 struct mvneta_rx_desc *rx_desc)
1348
1349{
1350 dma_addr_t phys_addr;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001351 void *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001352
willy tarreau8ec2cd42014-01-16 08:20:16 +01001353 data = mvneta_frag_alloc(pp);
1354 if (!data)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001355 return -ENOMEM;
1356
willy tarreau8ec2cd42014-01-16 08:20:16 +01001357 phys_addr = dma_map_single(pp->dev->dev.parent, data,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001358 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1359 DMA_FROM_DEVICE);
1360 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
willy tarreau8ec2cd42014-01-16 08:20:16 +01001361 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001362 return -ENOMEM;
1363 }
1364
willy tarreau8ec2cd42014-01-16 08:20:16 +01001365 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001366 return 0;
1367}
1368
1369/* Handle tx checksum */
1370static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1371{
1372 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1373 int ip_hdr_len = 0;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001374 __be16 l3_proto = vlan_get_protocol(skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001375 u8 l4_proto;
1376
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001377 if (l3_proto == htons(ETH_P_IP)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001378 struct iphdr *ip4h = ip_hdr(skb);
1379
1380 /* Calculate IPv4 checksum and L4 checksum */
1381 ip_hdr_len = ip4h->ihl;
1382 l4_proto = ip4h->protocol;
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001383 } else if (l3_proto == htons(ETH_P_IPV6)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001384 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1385
1386 /* Read l4_protocol from one of IPv6 extra headers */
1387 if (skb_network_header_len(skb) > 0)
1388 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1389 l4_proto = ip6h->nexthdr;
1390 } else
1391 return MVNETA_TX_L4_CSUM_NOT;
1392
1393 return mvneta_txq_desc_csum(skb_network_offset(skb),
Vlad Yasevich817dbfa2014-08-25 10:34:54 -04001394 l3_proto, ip_hdr_len, l4_proto);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001395 }
1396
1397 return MVNETA_TX_L4_CSUM_NOT;
1398}
1399
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001400/* Returns rx queue pointer (find last set bit) according to causeRxTx
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001401 * value
1402 */
1403static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1404 u32 cause)
1405{
1406 int queue = fls(cause >> 8) - 1;
1407
1408 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1409}
1410
1411/* Drop packets received by the RXQ and free buffers */
1412static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1413 struct mvneta_rx_queue *rxq)
1414{
1415 int rx_done, i;
1416
1417 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1418 for (i = 0; i < rxq->size; i++) {
1419 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001420 void *data = (void *)rx_desc->buf_cookie;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001421
willy tarreau8ec2cd42014-01-16 08:20:16 +01001422 mvneta_frag_free(pp, data);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001423 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001424 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001425 }
1426
1427 if (rx_done)
1428 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1429}
1430
1431/* Main rx processing */
1432static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1433 struct mvneta_rx_queue *rxq)
1434{
1435 struct net_device *dev = pp->dev;
1436 int rx_done, rx_filled;
willy tarreaudc4277d2014-01-16 08:20:07 +01001437 u32 rcvd_pkts = 0;
1438 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001439
1440 /* Get number of received packets */
1441 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1442
1443 if (rx_todo > rx_done)
1444 rx_todo = rx_done;
1445
1446 rx_done = 0;
1447 rx_filled = 0;
1448
1449 /* Fairness NAPI loop */
1450 while (rx_done < rx_todo) {
1451 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1452 struct sk_buff *skb;
willy tarreau8ec2cd42014-01-16 08:20:16 +01001453 unsigned char *data;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001454 u32 rx_status;
1455 int rx_bytes, err;
1456
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001457 rx_done++;
1458 rx_filled++;
1459 rx_status = rx_desc->status;
willy tarreauf19fadf2014-01-16 08:20:17 +01001460 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001461 data = (unsigned char *)rx_desc->buf_cookie;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001462
willy tarreau54282132014-01-16 08:20:14 +01001463 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
willy tarreauf19fadf2014-01-16 08:20:17 +01001464 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1465 err_drop_frame:
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001466 dev->stats.rx_errors++;
1467 mvneta_rx_error(pp, rx_desc);
willy tarreau8ec2cd42014-01-16 08:20:16 +01001468 /* leave the descriptor untouched */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001469 continue;
1470 }
1471
willy tarreauf19fadf2014-01-16 08:20:17 +01001472 if (rx_bytes <= rx_copybreak) {
1473 /* better copy a small frame and not unmap the DMA region */
1474 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1475 if (unlikely(!skb))
1476 goto err_drop_frame;
1477
1478 dma_sync_single_range_for_cpu(dev->dev.parent,
1479 rx_desc->buf_phys_addr,
1480 MVNETA_MH_SIZE + NET_SKB_PAD,
1481 rx_bytes,
1482 DMA_FROM_DEVICE);
1483 memcpy(skb_put(skb, rx_bytes),
1484 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1485 rx_bytes);
1486
1487 skb->protocol = eth_type_trans(skb, dev);
1488 mvneta_rx_csum(pp, rx_status, skb);
1489 napi_gro_receive(&pp->napi, skb);
1490
1491 rcvd_pkts++;
1492 rcvd_bytes += rx_bytes;
1493
1494 /* leave the descriptor and buffer untouched */
1495 continue;
1496 }
1497
1498 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1499 if (!skb)
1500 goto err_drop_frame;
1501
1502 dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001503 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001504
willy tarreaudc4277d2014-01-16 08:20:07 +01001505 rcvd_pkts++;
1506 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001507
1508 /* Linux processing */
willy tarreau8ec2cd42014-01-16 08:20:16 +01001509 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001510 skb_put(skb, rx_bytes);
1511
1512 skb->protocol = eth_type_trans(skb, dev);
1513
willy tarreau54282132014-01-16 08:20:14 +01001514 mvneta_rx_csum(pp, rx_status, skb);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001515
1516 napi_gro_receive(&pp->napi, skb);
1517
1518 /* Refill processing */
1519 err = mvneta_rx_refill(pp, rx_desc);
1520 if (err) {
willy tarreauf19fadf2014-01-16 08:20:17 +01001521 netdev_err(dev, "Linux processing - Can't refill\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001522 rxq->missed++;
1523 rx_filled--;
1524 }
1525 }
1526
willy tarreaudc4277d2014-01-16 08:20:07 +01001527 if (rcvd_pkts) {
willy tarreau74c41b02014-01-16 08:20:08 +01001528 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1529
1530 u64_stats_update_begin(&stats->syncp);
1531 stats->rx_packets += rcvd_pkts;
1532 stats->rx_bytes += rcvd_bytes;
1533 u64_stats_update_end(&stats->syncp);
willy tarreaudc4277d2014-01-16 08:20:07 +01001534 }
1535
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001536 /* Update rxq management counters */
1537 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1538
1539 return rx_done;
1540}
1541
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001542static inline void
1543mvneta_tso_put_hdr(struct sk_buff *skb,
1544 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1545{
1546 struct mvneta_tx_desc *tx_desc;
1547 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1548
1549 txq->tx_skb[txq->txq_put_index] = NULL;
1550 tx_desc = mvneta_txq_next_desc_get(txq);
1551 tx_desc->data_size = hdr_len;
1552 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1553 tx_desc->command |= MVNETA_TXD_F_DESC;
1554 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1555 txq->txq_put_index * TSO_HEADER_SIZE;
1556 mvneta_txq_inc_put(txq);
1557}
1558
1559static inline int
1560mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1561 struct sk_buff *skb, char *data, int size,
1562 bool last_tcp, bool is_last)
1563{
1564 struct mvneta_tx_desc *tx_desc;
1565
1566 tx_desc = mvneta_txq_next_desc_get(txq);
1567 tx_desc->data_size = size;
1568 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1569 size, DMA_TO_DEVICE);
1570 if (unlikely(dma_mapping_error(dev->dev.parent,
1571 tx_desc->buf_phys_addr))) {
1572 mvneta_txq_desc_put(txq);
1573 return -ENOMEM;
1574 }
1575
1576 tx_desc->command = 0;
1577 txq->tx_skb[txq->txq_put_index] = NULL;
1578
1579 if (last_tcp) {
1580 /* last descriptor in the TCP packet */
1581 tx_desc->command = MVNETA_TXD_L_DESC;
1582
1583 /* last descriptor in SKB */
1584 if (is_last)
1585 txq->tx_skb[txq->txq_put_index] = skb;
1586 }
1587 mvneta_txq_inc_put(txq);
1588 return 0;
1589}
1590
1591static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1592 struct mvneta_tx_queue *txq)
1593{
1594 int total_len, data_left;
1595 int desc_count = 0;
1596 struct mvneta_port *pp = netdev_priv(dev);
1597 struct tso_t tso;
1598 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1599 int i;
1600
1601 /* Count needed descriptors */
1602 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1603 return 0;
1604
1605 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1606 pr_info("*** Is this even possible???!?!?\n");
1607 return 0;
1608 }
1609
1610 /* Initialize the TSO handler, and prepare the first payload */
1611 tso_start(skb, &tso);
1612
1613 total_len = skb->len - hdr_len;
1614 while (total_len > 0) {
1615 char *hdr;
1616
1617 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1618 total_len -= data_left;
1619 desc_count++;
1620
1621 /* prepare packet headers: MAC + IP + TCP */
1622 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1623 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1624
1625 mvneta_tso_put_hdr(skb, pp, txq);
1626
1627 while (data_left > 0) {
1628 int size;
1629 desc_count++;
1630
1631 size = min_t(int, tso.size, data_left);
1632
1633 if (mvneta_tso_put_data(dev, txq, skb,
1634 tso.data, size,
1635 size == data_left,
1636 total_len == 0))
1637 goto err_release;
1638 data_left -= size;
1639
1640 tso_build_data(skb, &tso, size);
1641 }
1642 }
1643
1644 return desc_count;
1645
1646err_release:
1647 /* Release all used data descriptors; header descriptors must not
1648 * be DMA-unmapped.
1649 */
1650 for (i = desc_count - 1; i >= 0; i--) {
1651 struct mvneta_tx_desc *tx_desc = txq->descs + i;
Ezequiel Garcia2e3173a2014-05-30 13:40:07 -03001652 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001653 dma_unmap_single(pp->dev->dev.parent,
1654 tx_desc->buf_phys_addr,
1655 tx_desc->data_size,
1656 DMA_TO_DEVICE);
1657 mvneta_txq_desc_put(txq);
1658 }
1659 return 0;
1660}
1661
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001662/* Handle tx fragmentation processing */
1663static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1664 struct mvneta_tx_queue *txq)
1665{
1666 struct mvneta_tx_desc *tx_desc;
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001667 int i, nr_frags = skb_shinfo(skb)->nr_frags;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001668
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001669 for (i = 0; i < nr_frags; i++) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001670 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1671 void *addr = page_address(frag->page.p) + frag->page_offset;
1672
1673 tx_desc = mvneta_txq_next_desc_get(txq);
1674 tx_desc->data_size = frag->size;
1675
1676 tx_desc->buf_phys_addr =
1677 dma_map_single(pp->dev->dev.parent, addr,
1678 tx_desc->data_size, DMA_TO_DEVICE);
1679
1680 if (dma_mapping_error(pp->dev->dev.parent,
1681 tx_desc->buf_phys_addr)) {
1682 mvneta_txq_desc_put(txq);
1683 goto error;
1684 }
1685
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001686 if (i == nr_frags - 1) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001687 /* Last descriptor */
1688 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001689 txq->tx_skb[txq->txq_put_index] = skb;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001690 } else {
1691 /* Descriptor in the middle: Not First, Not Last */
1692 tx_desc->command = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001693 txq->tx_skb[txq->txq_put_index] = NULL;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001694 }
Ezequiel Garcia3d4ea022014-05-22 20:06:57 -03001695 mvneta_txq_inc_put(txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001696 }
1697
1698 return 0;
1699
1700error:
1701 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001702 * this packet, as well as the corresponding DMA mappings
1703 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001704 for (i = i - 1; i >= 0; i--) {
1705 tx_desc = txq->descs + i;
1706 dma_unmap_single(pp->dev->dev.parent,
1707 tx_desc->buf_phys_addr,
1708 tx_desc->data_size,
1709 DMA_TO_DEVICE);
1710 mvneta_txq_desc_put(txq);
1711 }
1712
1713 return -ENOMEM;
1714}
1715
1716/* Main tx processing */
1717static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1718{
1719 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02001720 u16 txq_id = skb_get_queue_mapping(skb);
1721 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001722 struct mvneta_tx_desc *tx_desc;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001723 int frags = 0;
1724 u32 tx_cmd;
1725
1726 if (!netif_running(dev))
1727 goto out;
1728
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03001729 if (skb_is_gso(skb)) {
1730 frags = mvneta_tx_tso(skb, dev, txq);
1731 goto out;
1732 }
1733
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001734 frags = skb_shinfo(skb)->nr_frags + 1;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001735
1736 /* Get a descriptor for the first part of the packet */
1737 tx_desc = mvneta_txq_next_desc_get(txq);
1738
1739 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1740
1741 tx_desc->data_size = skb_headlen(skb);
1742
1743 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1744 tx_desc->data_size,
1745 DMA_TO_DEVICE);
1746 if (unlikely(dma_mapping_error(dev->dev.parent,
1747 tx_desc->buf_phys_addr))) {
1748 mvneta_txq_desc_put(txq);
1749 frags = 0;
1750 goto out;
1751 }
1752
1753 if (frags == 1) {
1754 /* First and Last descriptor */
1755 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1756 tx_desc->command = tx_cmd;
1757 txq->tx_skb[txq->txq_put_index] = skb;
1758 mvneta_txq_inc_put(txq);
1759 } else {
1760 /* First but not Last */
1761 tx_cmd |= MVNETA_TXD_F_DESC;
1762 txq->tx_skb[txq->txq_put_index] = NULL;
1763 mvneta_txq_inc_put(txq);
1764 tx_desc->command = tx_cmd;
1765 /* Continue with other skb fragments */
1766 if (mvneta_tx_frag_process(pp, skb, txq)) {
1767 dma_unmap_single(dev->dev.parent,
1768 tx_desc->buf_phys_addr,
1769 tx_desc->data_size,
1770 DMA_TO_DEVICE);
1771 mvneta_txq_desc_put(txq);
1772 frags = 0;
1773 goto out;
1774 }
1775 }
1776
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001777out:
1778 if (frags > 0) {
willy tarreau74c41b02014-01-16 08:20:08 +01001779 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001780 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1781
1782 txq->count += frags;
1783 mvneta_txq_pend_desc_add(pp, txq, frags);
1784
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03001785 if (txq->count >= txq->tx_stop_threshold)
Ezequiel Garciae19d2dd2014-05-19 13:59:54 -03001786 netif_tx_stop_queue(nq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001787
willy tarreau74c41b02014-01-16 08:20:08 +01001788 u64_stats_update_begin(&stats->syncp);
1789 stats->tx_packets++;
1790 stats->tx_bytes += skb->len;
1791 u64_stats_update_end(&stats->syncp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001792 } else {
1793 dev->stats.tx_dropped++;
1794 dev_kfree_skb_any(skb);
1795 }
1796
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001797 return NETDEV_TX_OK;
1798}
1799
1800
1801/* Free tx resources, when resetting a port */
1802static void mvneta_txq_done_force(struct mvneta_port *pp,
1803 struct mvneta_tx_queue *txq)
1804
1805{
1806 int tx_done = txq->count;
1807
1808 mvneta_txq_bufs_free(pp, txq, tx_done);
1809
1810 /* reset txq */
1811 txq->count = 0;
1812 txq->txq_put_index = 0;
1813 txq->txq_get_index = 0;
1814}
1815
willy tarreau6c498972014-01-16 08:20:12 +01001816/* Handle tx done - called in softirq context. The <cause_tx_done> argument
1817 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1818 */
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001819static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001820{
1821 struct mvneta_tx_queue *txq;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001822 struct netdev_queue *nq;
1823
willy tarreau6c498972014-01-16 08:20:12 +01001824 while (cause_tx_done) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001825 txq = mvneta_tx_done_policy(pp, cause_tx_done);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001826
1827 nq = netdev_get_tx_queue(pp->dev, txq->id);
1828 __netif_tx_lock(nq, smp_processor_id());
1829
Arnaud Ebalard0713a862014-01-16 08:20:18 +01001830 if (txq->count)
1831 mvneta_txq_done(pp, txq);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001832
1833 __netif_tx_unlock(nq);
1834 cause_tx_done &= ~((1 << txq->id));
1835 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001836}
1837
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001838/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001839 * according to hw spec, different than generic crc8 algorithm
1840 */
1841static int mvneta_addr_crc(unsigned char *addr)
1842{
1843 int crc = 0;
1844 int i;
1845
1846 for (i = 0; i < ETH_ALEN; i++) {
1847 int j;
1848
1849 crc = (crc ^ addr[i]) << 8;
1850 for (j = 7; j >= 0; j--) {
1851 if (crc & (0x100 << j))
1852 crc ^= 0x107 << j;
1853 }
1854 }
1855
1856 return crc;
1857}
1858
1859/* This method controls the net device special MAC multicast support.
1860 * The Special Multicast Table for MAC addresses supports MAC of the form
1861 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1862 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1863 * Table entries in the DA-Filter table. This method set the Special
1864 * Multicast Table appropriate entry.
1865 */
1866static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1867 unsigned char last_byte,
1868 int queue)
1869{
1870 unsigned int smc_table_reg;
1871 unsigned int tbl_offset;
1872 unsigned int reg_offset;
1873
1874 /* Register offset from SMC table base */
1875 tbl_offset = (last_byte / 4);
1876 /* Entry offset within the above reg */
1877 reg_offset = last_byte % 4;
1878
1879 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1880 + tbl_offset * 4));
1881
1882 if (queue == -1)
1883 smc_table_reg &= ~(0xff << (8 * reg_offset));
1884 else {
1885 smc_table_reg &= ~(0xff << (8 * reg_offset));
1886 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1887 }
1888
1889 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1890 smc_table_reg);
1891}
1892
1893/* This method controls the network device Other MAC multicast support.
1894 * The Other Multicast Table is used for multicast of another type.
1895 * A CRC-8 is used as an index to the Other Multicast Table entries
1896 * in the DA-Filter table.
1897 * The method gets the CRC-8 value from the calling routine and
1898 * sets the Other Multicast Table appropriate entry according to the
1899 * specified CRC-8 .
1900 */
1901static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1902 unsigned char crc8,
1903 int queue)
1904{
1905 unsigned int omc_table_reg;
1906 unsigned int tbl_offset;
1907 unsigned int reg_offset;
1908
1909 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1910 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1911
1912 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1913
1914 if (queue == -1) {
1915 /* Clear accepts frame bit at specified Other DA table entry */
1916 omc_table_reg &= ~(0xff << (8 * reg_offset));
1917 } else {
1918 omc_table_reg &= ~(0xff << (8 * reg_offset));
1919 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1920 }
1921
1922 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1923}
1924
1925/* The network device supports multicast using two tables:
1926 * 1) Special Multicast Table for MAC addresses of the form
1927 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1928 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1929 * Table entries in the DA-Filter table.
1930 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1931 * is used as an index to the Other Multicast Table entries in the
1932 * DA-Filter table.
1933 */
1934static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1935 int queue)
1936{
1937 unsigned char crc_result = 0;
1938
1939 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1940 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1941 return 0;
1942 }
1943
1944 crc_result = mvneta_addr_crc(p_addr);
1945 if (queue == -1) {
1946 if (pp->mcast_count[crc_result] == 0) {
1947 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1948 crc_result);
1949 return -EINVAL;
1950 }
1951
1952 pp->mcast_count[crc_result]--;
1953 if (pp->mcast_count[crc_result] != 0) {
1954 netdev_info(pp->dev,
1955 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1956 pp->mcast_count[crc_result], crc_result);
1957 return -EINVAL;
1958 }
1959 } else
1960 pp->mcast_count[crc_result]++;
1961
1962 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1963
1964 return 0;
1965}
1966
1967/* Configure Fitering mode of Ethernet port */
1968static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1969 int is_promisc)
1970{
1971 u32 port_cfg_reg, val;
1972
1973 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1974
1975 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1976
1977 /* Set / Clear UPM bit in port configuration register */
1978 if (is_promisc) {
1979 /* Accept all Unicast addresses */
1980 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1981 val |= MVNETA_FORCE_UNI;
1982 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1983 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1984 } else {
1985 /* Reject all Unicast addresses */
1986 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1987 val &= ~MVNETA_FORCE_UNI;
1988 }
1989
1990 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1991 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1992}
1993
1994/* register unicast and multicast addresses */
1995static void mvneta_set_rx_mode(struct net_device *dev)
1996{
1997 struct mvneta_port *pp = netdev_priv(dev);
1998 struct netdev_hw_addr *ha;
1999
2000 if (dev->flags & IFF_PROMISC) {
2001 /* Accept all: Multicast + Unicast */
2002 mvneta_rx_unicast_promisc_set(pp, 1);
2003 mvneta_set_ucast_table(pp, rxq_def);
2004 mvneta_set_special_mcast_table(pp, rxq_def);
2005 mvneta_set_other_mcast_table(pp, rxq_def);
2006 } else {
2007 /* Accept single Unicast */
2008 mvneta_rx_unicast_promisc_set(pp, 0);
2009 mvneta_set_ucast_table(pp, -1);
2010 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2011
2012 if (dev->flags & IFF_ALLMULTI) {
2013 /* Accept all multicast */
2014 mvneta_set_special_mcast_table(pp, rxq_def);
2015 mvneta_set_other_mcast_table(pp, rxq_def);
2016 } else {
2017 /* Accept only initialized multicast */
2018 mvneta_set_special_mcast_table(pp, -1);
2019 mvneta_set_other_mcast_table(pp, -1);
2020
2021 if (!netdev_mc_empty(dev)) {
2022 netdev_for_each_mc_addr(ha, dev) {
2023 mvneta_mcast_addr_set(pp, ha->addr,
2024 rxq_def);
2025 }
2026 }
2027 }
2028 }
2029}
2030
2031/* Interrupt handling - the callback for request_irq() */
2032static irqreturn_t mvneta_isr(int irq, void *dev_id)
2033{
2034 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2035
2036 /* Mask all interrupts */
2037 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2038
2039 napi_schedule(&pp->napi);
2040
2041 return IRQ_HANDLED;
2042}
2043
2044/* NAPI handler
2045 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2046 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2047 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2048 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2049 * Each CPU has its own causeRxTx register
2050 */
2051static int mvneta_poll(struct napi_struct *napi, int budget)
2052{
2053 int rx_done = 0;
2054 u32 cause_rx_tx;
2055 unsigned long flags;
2056 struct mvneta_port *pp = netdev_priv(napi->dev);
2057
2058 if (!netif_running(pp->dev)) {
2059 napi_complete(napi);
2060 return rx_done;
2061 }
2062
2063 /* Read cause register */
2064 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
willy tarreau71f6d1b2014-01-16 08:20:11 +01002065 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
2066
2067 /* Release Tx descriptors */
2068 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
Arnaud Ebalard0713a862014-01-16 08:20:18 +01002069 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
willy tarreau71f6d1b2014-01-16 08:20:11 +01002070 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2071 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002072
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002073 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002074 * RX packets
2075 */
2076 cause_rx_tx |= pp->cause_rx_tx;
2077 if (rxq_number > 1) {
willy tarreau71f6d1b2014-01-16 08:20:11 +01002078 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002079 int count;
2080 struct mvneta_rx_queue *rxq;
2081 /* get rx queue number from cause_rx_tx */
2082 rxq = mvneta_rx_policy(pp, cause_rx_tx);
2083 if (!rxq)
2084 break;
2085
2086 /* process the packet in that rx queue */
2087 count = mvneta_rx(pp, budget, rxq);
2088 rx_done += count;
2089 budget -= count;
2090 if (budget > 0) {
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002091 /* set off the rx bit of the
2092 * corresponding bit in the cause rx
2093 * tx register, so that next iteration
2094 * will find the next rx queue where
2095 * packets are received on
2096 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002097 cause_rx_tx &= ~((1 << rxq->id) << 8);
2098 }
2099 }
2100 } else {
2101 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2102 budget -= rx_done;
2103 }
2104
2105 if (budget > 0) {
2106 cause_rx_tx = 0;
2107 napi_complete(napi);
2108 local_irq_save(flags);
2109 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
willy tarreau71f6d1b2014-01-16 08:20:11 +01002110 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002111 local_irq_restore(flags);
2112 }
2113
2114 pp->cause_rx_tx = cause_rx_tx;
2115 return rx_done;
2116}
2117
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002118/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2119static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2120 int num)
2121{
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002122 int i;
2123
2124 for (i = 0; i < num; i++) {
willy tarreaua1a65ab2014-01-16 08:20:13 +01002125 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2126 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2127 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002128 __func__, rxq->id, i, num);
2129 break;
2130 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002131 }
2132
2133 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002134 * get packets)
2135 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002136 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2137
2138 return i;
2139}
2140
2141/* Free all packets pending transmit from all TXQs and reset TX port */
2142static void mvneta_tx_reset(struct mvneta_port *pp)
2143{
2144 int queue;
2145
Ezequiel Garcia96728502014-05-22 20:06:59 -03002146 /* free the skb's in the tx ring */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002147 for (queue = 0; queue < txq_number; queue++)
2148 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2149
2150 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2151 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2152}
2153
2154static void mvneta_rx_reset(struct mvneta_port *pp)
2155{
2156 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2157 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2158}
2159
2160/* Rx/Tx queue initialization/cleanup methods */
2161
2162/* Create a specified RX queue */
2163static int mvneta_rxq_init(struct mvneta_port *pp,
2164 struct mvneta_rx_queue *rxq)
2165
2166{
2167 rxq->size = pp->rx_ring_size;
2168
2169 /* Allocate memory for RX descriptors */
2170 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2171 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2172 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002173 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002174 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002175
2176 BUG_ON(rxq->descs !=
2177 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2178
2179 rxq->last_desc = rxq->size - 1;
2180
2181 /* Set Rx descriptors queue starting address */
2182 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2183 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2184
2185 /* Set Offset */
2186 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2187
2188 /* Set coalescing pkts and time */
2189 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2190 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2191
2192 /* Fill RXQ with buffers from RX pool */
2193 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2194 mvneta_rxq_bm_disable(pp, rxq);
2195 mvneta_rxq_fill(pp, rxq, rxq->size);
2196
2197 return 0;
2198}
2199
2200/* Cleanup Rx queue */
2201static void mvneta_rxq_deinit(struct mvneta_port *pp,
2202 struct mvneta_rx_queue *rxq)
2203{
2204 mvneta_rxq_drop_pkts(pp, rxq);
2205
2206 if (rxq->descs)
2207 dma_free_coherent(pp->dev->dev.parent,
2208 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2209 rxq->descs,
2210 rxq->descs_phys);
2211
2212 rxq->descs = NULL;
2213 rxq->last_desc = 0;
2214 rxq->next_desc_to_proc = 0;
2215 rxq->descs_phys = 0;
2216}
2217
2218/* Create and initialize a tx queue */
2219static int mvneta_txq_init(struct mvneta_port *pp,
2220 struct mvneta_tx_queue *txq)
2221{
2222 txq->size = pp->tx_ring_size;
2223
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002224 /* A queue must always have room for at least one skb.
2225 * Therefore, stop the queue when the free entries reaches
2226 * the maximum number of descriptors per skb.
2227 */
2228 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2229 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2230
2231
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002232 /* Allocate memory for TX descriptors */
2233 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2234 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2235 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002236 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002237 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002238
2239 /* Make sure descriptor address is cache line size aligned */
2240 BUG_ON(txq->descs !=
2241 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2242
2243 txq->last_desc = txq->size - 1;
2244
2245 /* Set maximum bandwidth for enabled TXQs */
2246 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2247 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2248
2249 /* Set Tx descriptors queue starting address */
2250 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2251 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2252
2253 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2254 if (txq->tx_skb == NULL) {
2255 dma_free_coherent(pp->dev->dev.parent,
2256 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2257 txq->descs, txq->descs_phys);
2258 return -ENOMEM;
2259 }
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002260
2261 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2262 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2263 txq->size * TSO_HEADER_SIZE,
2264 &txq->tso_hdrs_phys, GFP_KERNEL);
2265 if (txq->tso_hdrs == NULL) {
2266 kfree(txq->tx_skb);
2267 dma_free_coherent(pp->dev->dev.parent,
2268 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2269 txq->descs, txq->descs_phys);
2270 return -ENOMEM;
2271 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002272 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2273
2274 return 0;
2275}
2276
2277/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2278static void mvneta_txq_deinit(struct mvneta_port *pp,
2279 struct mvneta_tx_queue *txq)
2280{
2281 kfree(txq->tx_skb);
2282
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03002283 if (txq->tso_hdrs)
2284 dma_free_coherent(pp->dev->dev.parent,
2285 txq->size * TSO_HEADER_SIZE,
2286 txq->tso_hdrs, txq->tso_hdrs_phys);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002287 if (txq->descs)
2288 dma_free_coherent(pp->dev->dev.parent,
2289 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2290 txq->descs, txq->descs_phys);
2291
2292 txq->descs = NULL;
2293 txq->last_desc = 0;
2294 txq->next_desc_to_proc = 0;
2295 txq->descs_phys = 0;
2296
2297 /* Set minimum bandwidth for disabled TXQs */
2298 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2299 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2300
2301 /* Set Tx descriptors queue starting address and size */
2302 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2303 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2304}
2305
2306/* Cleanup all Tx queues */
2307static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2308{
2309 int queue;
2310
2311 for (queue = 0; queue < txq_number; queue++)
2312 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2313}
2314
2315/* Cleanup all Rx queues */
2316static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2317{
2318 int queue;
2319
2320 for (queue = 0; queue < rxq_number; queue++)
2321 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2322}
2323
2324
2325/* Init all Rx queues */
2326static int mvneta_setup_rxqs(struct mvneta_port *pp)
2327{
2328 int queue;
2329
2330 for (queue = 0; queue < rxq_number; queue++) {
2331 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2332 if (err) {
2333 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2334 __func__, queue);
2335 mvneta_cleanup_rxqs(pp);
2336 return err;
2337 }
2338 }
2339
2340 return 0;
2341}
2342
2343/* Init all tx queues */
2344static int mvneta_setup_txqs(struct mvneta_port *pp)
2345{
2346 int queue;
2347
2348 for (queue = 0; queue < txq_number; queue++) {
2349 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2350 if (err) {
2351 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2352 __func__, queue);
2353 mvneta_cleanup_txqs(pp);
2354 return err;
2355 }
2356 }
2357
2358 return 0;
2359}
2360
2361static void mvneta_start_dev(struct mvneta_port *pp)
2362{
2363 mvneta_max_rx_size_set(pp, pp->pkt_size);
2364 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2365
2366 /* start the Rx/Tx activity */
2367 mvneta_port_enable(pp);
2368
2369 /* Enable polling on the port */
2370 napi_enable(&pp->napi);
2371
2372 /* Unmask interrupts */
2373 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
willy tarreau71f6d1b2014-01-16 08:20:11 +01002374 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002375
2376 phy_start(pp->phy_dev);
2377 netif_tx_start_all_queues(pp->dev);
2378}
2379
2380static void mvneta_stop_dev(struct mvneta_port *pp)
2381{
2382 phy_stop(pp->phy_dev);
2383
2384 napi_disable(&pp->napi);
2385
2386 netif_carrier_off(pp->dev);
2387
2388 mvneta_port_down(pp);
2389 netif_tx_stop_all_queues(pp->dev);
2390
2391 /* Stop the port activity */
2392 mvneta_port_disable(pp);
2393
2394 /* Clear all ethernet port interrupts */
2395 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2396 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2397
2398 /* Mask all ethernet port interrupts */
2399 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2400 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2401 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2402
2403 mvneta_tx_reset(pp);
2404 mvneta_rx_reset(pp);
2405}
2406
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002407/* Return positive if MTU is valid */
2408static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2409{
2410 if (mtu < 68) {
2411 netdev_err(dev, "cannot change mtu to less than 68\n");
2412 return -EINVAL;
2413 }
2414
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002415 /* 9676 == 9700 - 20 and rounding to 8 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002416 if (mtu > 9676) {
2417 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2418 mtu = 9676;
2419 }
2420
2421 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2422 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2423 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2424 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2425 }
2426
2427 return mtu;
2428}
2429
2430/* Change the device mtu */
2431static int mvneta_change_mtu(struct net_device *dev, int mtu)
2432{
2433 struct mvneta_port *pp = netdev_priv(dev);
2434 int ret;
2435
2436 mtu = mvneta_check_mtu_valid(dev, mtu);
2437 if (mtu < 0)
2438 return -EINVAL;
2439
2440 dev->mtu = mtu;
2441
2442 if (!netif_running(dev))
2443 return 0;
2444
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002445 /* The interface is running, so we have to force a
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002446 * reallocation of the queues
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002447 */
2448 mvneta_stop_dev(pp);
2449
2450 mvneta_cleanup_txqs(pp);
2451 mvneta_cleanup_rxqs(pp);
2452
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002453 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002454 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2455 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002456
2457 ret = mvneta_setup_rxqs(pp);
2458 if (ret) {
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002459 netdev_err(dev, "unable to setup rxqs after MTU change\n");
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002460 return ret;
2461 }
2462
Ezequiel Garciaa92dbd92014-05-22 20:06:58 -03002463 ret = mvneta_setup_txqs(pp);
2464 if (ret) {
2465 netdev_err(dev, "unable to setup txqs after MTU change\n");
2466 return ret;
2467 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002468
2469 mvneta_start_dev(pp);
2470 mvneta_port_up(pp);
2471
2472 return 0;
2473}
2474
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002475/* Get mac address */
2476static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2477{
2478 u32 mac_addr_l, mac_addr_h;
2479
2480 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2481 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2482 addr[0] = (mac_addr_h >> 24) & 0xFF;
2483 addr[1] = (mac_addr_h >> 16) & 0xFF;
2484 addr[2] = (mac_addr_h >> 8) & 0xFF;
2485 addr[3] = mac_addr_h & 0xFF;
2486 addr[4] = (mac_addr_l >> 8) & 0xFF;
2487 addr[5] = mac_addr_l & 0xFF;
2488}
2489
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002490/* Handle setting mac address */
2491static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2492{
2493 struct mvneta_port *pp = netdev_priv(dev);
Ezequiel Garciae68de362014-05-22 20:07:00 -03002494 struct sockaddr *sockaddr = addr;
2495 int ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002496
Ezequiel Garciae68de362014-05-22 20:07:00 -03002497 ret = eth_prepare_mac_addr_change(dev, addr);
2498 if (ret < 0)
2499 return ret;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002500 /* Remove previous address table entry */
2501 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2502
2503 /* Set new addr in hw */
Ezequiel Garciae68de362014-05-22 20:07:00 -03002504 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002505
Ezequiel Garciae68de362014-05-22 20:07:00 -03002506 eth_commit_mac_addr_change(dev, addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002507 return 0;
2508}
2509
2510static void mvneta_adjust_link(struct net_device *ndev)
2511{
2512 struct mvneta_port *pp = netdev_priv(ndev);
2513 struct phy_device *phydev = pp->phy_dev;
2514 int status_change = 0;
2515
2516 if (phydev->link) {
2517 if ((pp->speed != phydev->speed) ||
2518 (pp->duplex != phydev->duplex)) {
2519 u32 val;
2520
2521 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2522 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2523 MVNETA_GMAC_CONFIG_GMII_SPEED |
Thomas Petazzoni71408602013-09-04 16:21:18 +02002524 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2525 MVNETA_GMAC_AN_SPEED_EN |
2526 MVNETA_GMAC_AN_DUPLEX_EN);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002527
2528 if (phydev->duplex)
2529 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2530
2531 if (phydev->speed == SPEED_1000)
2532 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni4d12bc62014-07-08 10:49:43 +02002533 else if (phydev->speed == SPEED_100)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002534 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2535
2536 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2537
2538 pp->duplex = phydev->duplex;
2539 pp->speed = phydev->speed;
2540 }
2541 }
2542
2543 if (phydev->link != pp->link) {
2544 if (!phydev->link) {
2545 pp->duplex = -1;
2546 pp->speed = 0;
2547 }
2548
2549 pp->link = phydev->link;
2550 status_change = 1;
2551 }
2552
2553 if (status_change) {
2554 if (phydev->link) {
2555 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2556 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2557 MVNETA_GMAC_FORCE_LINK_DOWN);
2558 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2559 mvneta_port_up(pp);
2560 netdev_info(pp->dev, "link up\n");
2561 } else {
2562 mvneta_port_down(pp);
2563 netdev_info(pp->dev, "link down\n");
2564 }
2565 }
2566}
2567
2568static int mvneta_mdio_probe(struct mvneta_port *pp)
2569{
2570 struct phy_device *phy_dev;
2571
2572 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2573 pp->phy_interface);
2574 if (!phy_dev) {
2575 netdev_err(pp->dev, "could not find the PHY\n");
2576 return -ENODEV;
2577 }
2578
2579 phy_dev->supported &= PHY_GBIT_FEATURES;
2580 phy_dev->advertising = phy_dev->supported;
2581
2582 pp->phy_dev = phy_dev;
2583 pp->link = 0;
2584 pp->duplex = 0;
2585 pp->speed = 0;
2586
2587 return 0;
2588}
2589
2590static void mvneta_mdio_remove(struct mvneta_port *pp)
2591{
2592 phy_disconnect(pp->phy_dev);
2593 pp->phy_dev = NULL;
2594}
2595
2596static int mvneta_open(struct net_device *dev)
2597{
2598 struct mvneta_port *pp = netdev_priv(dev);
2599 int ret;
2600
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002601 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
willy tarreau8ec2cd42014-01-16 08:20:16 +01002602 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2603 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002604
2605 ret = mvneta_setup_rxqs(pp);
2606 if (ret)
2607 return ret;
2608
2609 ret = mvneta_setup_txqs(pp);
2610 if (ret)
2611 goto err_cleanup_rxqs;
2612
2613 /* Connect to port interrupt line */
2614 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2615 MVNETA_DRIVER_NAME, pp);
2616 if (ret) {
2617 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2618 goto err_cleanup_txqs;
2619 }
2620
2621 /* In default link is down */
2622 netif_carrier_off(pp->dev);
2623
2624 ret = mvneta_mdio_probe(pp);
2625 if (ret < 0) {
2626 netdev_err(dev, "cannot probe MDIO bus\n");
2627 goto err_free_irq;
2628 }
2629
2630 mvneta_start_dev(pp);
2631
2632 return 0;
2633
2634err_free_irq:
2635 free_irq(pp->dev->irq, pp);
2636err_cleanup_txqs:
2637 mvneta_cleanup_txqs(pp);
2638err_cleanup_rxqs:
2639 mvneta_cleanup_rxqs(pp);
2640 return ret;
2641}
2642
2643/* Stop the port, free port interrupt line */
2644static int mvneta_stop(struct net_device *dev)
2645{
2646 struct mvneta_port *pp = netdev_priv(dev);
2647
2648 mvneta_stop_dev(pp);
2649 mvneta_mdio_remove(pp);
2650 free_irq(dev->irq, pp);
2651 mvneta_cleanup_rxqs(pp);
2652 mvneta_cleanup_txqs(pp);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002653
2654 return 0;
2655}
2656
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002657static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2658{
2659 struct mvneta_port *pp = netdev_priv(dev);
2660 int ret;
2661
2662 if (!pp->phy_dev)
2663 return -ENOTSUPP;
2664
2665 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2666 if (!ret)
2667 mvneta_adjust_link(dev);
2668
2669 return ret;
2670}
2671
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002672/* Ethtool methods */
2673
2674/* Get settings (phy address, speed) for ethtools */
2675int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2676{
2677 struct mvneta_port *pp = netdev_priv(dev);
2678
2679 if (!pp->phy_dev)
2680 return -ENODEV;
2681
2682 return phy_ethtool_gset(pp->phy_dev, cmd);
2683}
2684
2685/* Set settings (phy address, speed) for ethtools */
2686int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2687{
2688 struct mvneta_port *pp = netdev_priv(dev);
2689
2690 if (!pp->phy_dev)
2691 return -ENODEV;
2692
2693 return phy_ethtool_sset(pp->phy_dev, cmd);
2694}
2695
2696/* Set interrupt coalescing for ethtools */
2697static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2698 struct ethtool_coalesce *c)
2699{
2700 struct mvneta_port *pp = netdev_priv(dev);
2701 int queue;
2702
2703 for (queue = 0; queue < rxq_number; queue++) {
2704 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2705 rxq->time_coal = c->rx_coalesce_usecs;
2706 rxq->pkts_coal = c->rx_max_coalesced_frames;
2707 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2708 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2709 }
2710
2711 for (queue = 0; queue < txq_number; queue++) {
2712 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2713 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2714 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2715 }
2716
2717 return 0;
2718}
2719
2720/* get coalescing for ethtools */
2721static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2722 struct ethtool_coalesce *c)
2723{
2724 struct mvneta_port *pp = netdev_priv(dev);
2725
2726 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2727 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2728
2729 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2730 return 0;
2731}
2732
2733
2734static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2735 struct ethtool_drvinfo *drvinfo)
2736{
2737 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2738 sizeof(drvinfo->driver));
2739 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2740 sizeof(drvinfo->version));
2741 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2742 sizeof(drvinfo->bus_info));
2743}
2744
2745
2746static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2747 struct ethtool_ringparam *ring)
2748{
2749 struct mvneta_port *pp = netdev_priv(netdev);
2750
2751 ring->rx_max_pending = MVNETA_MAX_RXD;
2752 ring->tx_max_pending = MVNETA_MAX_TXD;
2753 ring->rx_pending = pp->rx_ring_size;
2754 ring->tx_pending = pp->tx_ring_size;
2755}
2756
2757static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2758 struct ethtool_ringparam *ring)
2759{
2760 struct mvneta_port *pp = netdev_priv(dev);
2761
2762 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2763 return -EINVAL;
2764 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2765 ring->rx_pending : MVNETA_MAX_RXD;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03002766
2767 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2768 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2769 if (pp->tx_ring_size != ring->tx_pending)
2770 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2771 pp->tx_ring_size, ring->tx_pending);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002772
2773 if (netif_running(dev)) {
2774 mvneta_stop(dev);
2775 if (mvneta_open(dev)) {
2776 netdev_err(dev,
2777 "error on opening device after ring param change\n");
2778 return -ENOMEM;
2779 }
2780 }
2781
2782 return 0;
2783}
2784
2785static const struct net_device_ops mvneta_netdev_ops = {
2786 .ndo_open = mvneta_open,
2787 .ndo_stop = mvneta_stop,
2788 .ndo_start_xmit = mvneta_tx,
2789 .ndo_set_rx_mode = mvneta_set_rx_mode,
2790 .ndo_set_mac_address = mvneta_set_mac_addr,
2791 .ndo_change_mtu = mvneta_change_mtu,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002792 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002793 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002794};
2795
2796const struct ethtool_ops mvneta_eth_tool_ops = {
2797 .get_link = ethtool_op_get_link,
2798 .get_settings = mvneta_ethtool_get_settings,
2799 .set_settings = mvneta_ethtool_set_settings,
2800 .set_coalesce = mvneta_ethtool_set_coalesce,
2801 .get_coalesce = mvneta_ethtool_get_coalesce,
2802 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2803 .get_ringparam = mvneta_ethtool_get_ringparam,
2804 .set_ringparam = mvneta_ethtool_set_ringparam,
2805};
2806
2807/* Initialize hw */
Ezequiel Garcia96728502014-05-22 20:06:59 -03002808static int mvneta_init(struct device *dev, struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002809{
2810 int queue;
2811
2812 /* Disable port */
2813 mvneta_port_disable(pp);
2814
2815 /* Set port default values */
2816 mvneta_defaults_set(pp);
2817
Ezequiel Garcia96728502014-05-22 20:06:59 -03002818 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2819 GFP_KERNEL);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002820 if (!pp->txqs)
2821 return -ENOMEM;
2822
2823 /* Initialize TX descriptor rings */
2824 for (queue = 0; queue < txq_number; queue++) {
2825 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2826 txq->id = queue;
2827 txq->size = pp->tx_ring_size;
2828 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2829 }
2830
Ezequiel Garcia96728502014-05-22 20:06:59 -03002831 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2832 GFP_KERNEL);
2833 if (!pp->rxqs)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002834 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002835
2836 /* Create Rx descriptor rings */
2837 for (queue = 0; queue < rxq_number; queue++) {
2838 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2839 rxq->id = queue;
2840 rxq->size = pp->rx_ring_size;
2841 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2842 rxq->time_coal = MVNETA_RX_COAL_USEC;
2843 }
2844
2845 return 0;
2846}
2847
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002848/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00002849static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2850 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002851{
2852 u32 win_enable;
2853 u32 win_protect;
2854 int i;
2855
2856 for (i = 0; i < 6; i++) {
2857 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2858 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2859
2860 if (i < 4)
2861 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2862 }
2863
2864 win_enable = 0x3f;
2865 win_protect = 0;
2866
2867 for (i = 0; i < dram->num_cs; i++) {
2868 const struct mbus_dram_window *cs = dram->cs + i;
2869 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2870 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2871
2872 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2873 (cs->size - 1) & 0xffff0000);
2874
2875 win_enable &= ~(1 << i);
2876 win_protect |= 3 << (2 * i);
2877 }
2878
2879 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2880}
2881
2882/* Power up the port */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02002883static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002884{
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02002885 u32 ctrl;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002886
2887 /* MAC Cause register should be cleared */
2888 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2889
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02002890 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002891
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02002892 /* Even though it might look weird, when we're configured in
2893 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2894 */
2895 switch(phy_mode) {
2896 case PHY_INTERFACE_MODE_QSGMII:
2897 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2898 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2899 break;
2900 case PHY_INTERFACE_MODE_SGMII:
2901 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2902 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2903 break;
2904 case PHY_INTERFACE_MODE_RGMII:
2905 case PHY_INTERFACE_MODE_RGMII_ID:
2906 ctrl |= MVNETA_GMAC2_PORT_RGMII;
2907 break;
2908 default:
2909 return -EINVAL;
2910 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002911
2912 /* Cancel Port Reset */
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02002913 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
2914 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002915
2916 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2917 MVNETA_GMAC2_PORT_RESET) != 0)
2918 continue;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02002919
2920 return 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002921}
2922
2923/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00002924static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002925{
2926 const struct mbus_dram_target_info *dram_target_info;
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01002927 struct resource *res;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002928 struct device_node *dn = pdev->dev.of_node;
2929 struct device_node *phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002930 struct mvneta_port *pp;
2931 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002932 const char *dt_mac_addr;
2933 char hw_mac_addr[ETH_ALEN];
2934 const char *mac_from;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002935 int phy_mode;
2936 int err;
2937
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002938 /* Our multiqueue support is not complete, so for now, only
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002939 * allow the usage of the first RX queue
2940 */
2941 if (rxq_def != 0) {
2942 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2943 return -EINVAL;
2944 }
2945
Willy Tarreauee40a112013-04-11 23:00:37 +02002946 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002947 if (!dev)
2948 return -ENOMEM;
2949
2950 dev->irq = irq_of_parse_and_map(dn, 0);
2951 if (dev->irq == 0) {
2952 err = -EINVAL;
2953 goto err_free_netdev;
2954 }
2955
2956 phy_node = of_parse_phandle(dn, "phy", 0);
2957 if (!phy_node) {
Thomas Petazzoni83895be2014-05-16 16:14:06 +02002958 if (!of_phy_is_fixed_link(dn)) {
2959 dev_err(&pdev->dev, "no PHY specified\n");
2960 err = -ENODEV;
2961 goto err_free_irq;
2962 }
2963
2964 err = of_phy_register_fixed_link(dn);
2965 if (err < 0) {
2966 dev_err(&pdev->dev, "cannot register fixed PHY\n");
2967 goto err_free_irq;
2968 }
2969
2970 /* In the case of a fixed PHY, the DT node associated
2971 * to the PHY is the Ethernet MAC DT node.
2972 */
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02002973 phy_node = of_node_get(dn);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002974 }
2975
2976 phy_mode = of_get_phy_mode(dn);
2977 if (phy_mode < 0) {
2978 dev_err(&pdev->dev, "incorrect phy-mode\n");
2979 err = -EINVAL;
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02002980 goto err_put_phy_node;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002981 }
2982
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002983 dev->tx_queue_len = MVNETA_MAX_TXD;
2984 dev->watchdog_timeo = 5 * HZ;
2985 dev->netdev_ops = &mvneta_netdev_ops;
2986
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002987 dev->ethtool_ops = &mvneta_eth_tool_ops;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002988
2989 pp = netdev_priv(dev);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002990 pp->phy_node = phy_node;
2991 pp->phy_interface = phy_mode;
2992
Thomas Petazzoni189dd622012-11-19 14:15:25 +01002993 pp->clk = devm_clk_get(&pdev->dev, NULL);
2994 if (IS_ERR(pp->clk)) {
2995 err = PTR_ERR(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02002996 goto err_put_phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01002997 }
2998
2999 clk_prepare_enable(pp->clk);
3000
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003001 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3002 pp->base = devm_ioremap_resource(&pdev->dev, res);
3003 if (IS_ERR(pp->base)) {
3004 err = PTR_ERR(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003005 goto err_clk;
3006 }
3007
willy tarreau74c41b02014-01-16 08:20:08 +01003008 /* Alloc per-cpu stats */
WANG Cong1c213bd2014-02-13 11:46:28 -08003009 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
willy tarreau74c41b02014-01-16 08:20:08 +01003010 if (!pp->stats) {
3011 err = -ENOMEM;
Thomas Petazzonic3f0dd32014-03-27 11:39:29 +01003012 goto err_clk;
willy tarreau74c41b02014-01-16 08:20:08 +01003013 }
3014
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003015 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01003016 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003017 mac_from = "device tree";
3018 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3019 } else {
3020 mvneta_get_mac_addr(pp, hw_mac_addr);
3021 if (is_valid_ether_addr(hw_mac_addr)) {
3022 mac_from = "hardware";
3023 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3024 } else {
3025 mac_from = "random";
3026 eth_hw_addr_random(dev);
3027 }
3028 }
3029
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003030 pp->tx_ring_size = MVNETA_MAX_TXD;
3031 pp->rx_ring_size = MVNETA_MAX_RXD;
3032
3033 pp->dev = dev;
3034 SET_NETDEV_DEV(dev, &pdev->dev);
3035
Ezequiel Garcia96728502014-05-22 20:06:59 -03003036 err = mvneta_init(&pdev->dev, pp);
3037 if (err < 0)
willy tarreau74c41b02014-01-16 08:20:08 +01003038 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003039
3040 err = mvneta_port_power_up(pp, phy_mode);
3041 if (err < 0) {
3042 dev_err(&pdev->dev, "can't power up port\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003043 goto err_free_stats;
Thomas Petazzoni3f1dd4b2014-04-15 15:50:20 +02003044 }
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003045
3046 dram_target_info = mv_mbus_dram_info();
3047 if (dram_target_info)
3048 mvneta_conf_mbus_windows(pp, dram_target_info);
3049
Ezequiel Garcia9fa93792014-05-30 13:40:04 -03003050 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003051
Ezequiel Garcia2adb7192014-05-19 13:59:55 -03003052 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Ezequiel Garcia01ef26c2014-05-19 13:59:53 -03003053 dev->hw_features |= dev->features;
3054 dev->vlan_features |= dev->features;
willy tarreaub50b72d2013-04-06 08:47:01 +00003055 dev->priv_flags |= IFF_UNICAST_FLT;
Ezequiel Garcia8eef5f92014-05-30 13:40:05 -03003056 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
willy tarreaub50b72d2013-04-06 08:47:01 +00003057
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003058 err = register_netdev(dev);
3059 if (err < 0) {
3060 dev_err(&pdev->dev, "failed to register\n");
Ezequiel Garcia96728502014-05-22 20:06:59 -03003061 goto err_free_stats;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003062 }
3063
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00003064 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3065 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003066
3067 platform_set_drvdata(pdev, pp->dev);
3068
3069 return 0;
3070
willy tarreau74c41b02014-01-16 08:20:08 +01003071err_free_stats:
3072 free_percpu(pp->stats);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02003073err_clk:
3074 clk_disable_unprepare(pp->clk);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003075err_put_phy_node:
3076 of_node_put(phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003077err_free_irq:
3078 irq_dispose_mapping(dev->irq);
3079err_free_netdev:
3080 free_netdev(dev);
3081 return err;
3082}
3083
3084/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00003085static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003086{
3087 struct net_device *dev = platform_get_drvdata(pdev);
3088 struct mvneta_port *pp = netdev_priv(dev);
3089
3090 unregister_netdev(dev);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01003091 clk_disable_unprepare(pp->clk);
willy tarreau74c41b02014-01-16 08:20:08 +01003092 free_percpu(pp->stats);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003093 irq_dispose_mapping(dev->irq);
Uwe Kleine-Königc891c242014-08-07 21:58:46 +02003094 of_node_put(pp->phy_node);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003095 free_netdev(dev);
3096
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003097 return 0;
3098}
3099
3100static const struct of_device_id mvneta_match[] = {
3101 { .compatible = "marvell,armada-370-neta" },
3102 { }
3103};
3104MODULE_DEVICE_TABLE(of, mvneta_match);
3105
3106static struct platform_driver mvneta_driver = {
3107 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00003108 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03003109 .driver = {
3110 .name = MVNETA_DRIVER_NAME,
3111 .of_match_table = mvneta_match,
3112 },
3113};
3114
3115module_platform_driver(mvneta_driver);
3116
3117MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3118MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3119MODULE_LICENSE("GPL");
3120
3121module_param(rxq_number, int, S_IRUGO);
3122module_param(txq_number, int, S_IRUGO);
3123
3124module_param(rxq_def, int, S_IRUGO);
willy tarreauf19fadf2014-01-16 08:20:17 +01003125module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);