blob: baa85af960410e16ee1424517da77afe9312d411 [file] [log] [blame]
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001/*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/kernel.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030015#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
22#include <linux/interrupt.h>
23#include <net/ip.h>
24#include <net/ipv6.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_address.h>
30#include <linux/phy.h>
Thomas Petazzoni189dd622012-11-19 14:15:25 +010031#include <linux/clk.h>
Thomas Petazzonic5aff182012-08-17 14:04:28 +030032
33/* Registers */
34#define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
35#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
36#define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
37#define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
38#define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
39#define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
40#define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
41#define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
42#define MVNETA_RXQ_BUF_SIZE_SHIFT 19
43#define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
44#define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
45#define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
46#define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
47#define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
48#define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
49#define MVNETA_PORT_RX_RESET 0x1cc0
50#define MVNETA_PORT_RX_DMA_RESET BIT(0)
51#define MVNETA_PHY_ADDR 0x2000
52#define MVNETA_PHY_ADDR_MASK 0x1f
53#define MVNETA_MBUS_RETRY 0x2010
54#define MVNETA_UNIT_INTR_CAUSE 0x2080
55#define MVNETA_UNIT_CONTROL 0x20B0
56#define MVNETA_PHY_POLLING_ENABLE BIT(1)
57#define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
58#define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
59#define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
60#define MVNETA_BASE_ADDR_ENABLE 0x2290
61#define MVNETA_PORT_CONFIG 0x2400
62#define MVNETA_UNI_PROMISC_MODE BIT(0)
63#define MVNETA_DEF_RXQ(q) ((q) << 1)
64#define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
65#define MVNETA_TX_UNSET_ERR_SUM BIT(12)
66#define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
67#define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
68#define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
69#define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
70#define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
71 MVNETA_DEF_RXQ_ARP(q) | \
72 MVNETA_DEF_RXQ_TCP(q) | \
73 MVNETA_DEF_RXQ_UDP(q) | \
74 MVNETA_DEF_RXQ_BPDU(q) | \
75 MVNETA_TX_UNSET_ERR_SUM | \
76 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
77#define MVNETA_PORT_CONFIG_EXTEND 0x2404
78#define MVNETA_MAC_ADDR_LOW 0x2414
79#define MVNETA_MAC_ADDR_HIGH 0x2418
80#define MVNETA_SDMA_CONFIG 0x241c
81#define MVNETA_SDMA_BRST_SIZE_16 4
Thomas Petazzonic5aff182012-08-17 14:04:28 +030082#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
83#define MVNETA_RX_NO_DATA_SWAP BIT(4)
84#define MVNETA_TX_NO_DATA_SWAP BIT(5)
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +020085#define MVNETA_DESC_SWAP BIT(6)
Thomas Petazzonic5aff182012-08-17 14:04:28 +030086#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
87#define MVNETA_PORT_STATUS 0x2444
88#define MVNETA_TX_IN_PRGRS BIT(1)
89#define MVNETA_TX_FIFO_EMPTY BIT(8)
90#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +020091#define MVNETA_SGMII_SERDES_CFG 0x24A0
92#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
Thomas Petazzonic5aff182012-08-17 14:04:28 +030093#define MVNETA_TYPE_PRIO 0x24bc
94#define MVNETA_FORCE_UNI BIT(21)
95#define MVNETA_TXQ_CMD_1 0x24e4
96#define MVNETA_TXQ_CMD 0x2448
97#define MVNETA_TXQ_DISABLE_SHIFT 8
98#define MVNETA_TXQ_ENABLE_MASK 0x000000ff
99#define MVNETA_ACC_MODE 0x2500
100#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
101#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
102#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
103#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
104#define MVNETA_INTR_NEW_CAUSE 0x25a0
105#define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
106#define MVNETA_INTR_NEW_MASK 0x25a4
107#define MVNETA_INTR_OLD_CAUSE 0x25a8
108#define MVNETA_INTR_OLD_MASK 0x25ac
109#define MVNETA_INTR_MISC_CAUSE 0x25b0
110#define MVNETA_INTR_MISC_MASK 0x25b4
111#define MVNETA_INTR_ENABLE 0x25b8
112#define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
113#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000
114#define MVNETA_RXQ_CMD 0x2680
115#define MVNETA_RXQ_DISABLE_SHIFT 8
116#define MVNETA_RXQ_ENABLE_MASK 0x000000ff
117#define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
118#define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
119#define MVNETA_GMAC_CTRL_0 0x2c00
120#define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
121#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
122#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
123#define MVNETA_GMAC_CTRL_2 0x2c08
124#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
125#define MVNETA_GMAC2_PORT_RGMII BIT(4)
126#define MVNETA_GMAC2_PORT_RESET BIT(6)
127#define MVNETA_GMAC_STATUS 0x2c10
128#define MVNETA_GMAC_LINK_UP BIT(0)
129#define MVNETA_GMAC_SPEED_1000 BIT(1)
130#define MVNETA_GMAC_SPEED_100 BIT(2)
131#define MVNETA_GMAC_FULL_DUPLEX BIT(3)
132#define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
133#define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
134#define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
135#define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
136#define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
137#define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
138#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
139#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
140#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200141#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300142#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
Thomas Petazzoni71408602013-09-04 16:21:18 +0200143#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300144#define MVNETA_MIB_COUNTERS_BASE 0x3080
145#define MVNETA_MIB_LATE_COLLISION 0x7c
146#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
147#define MVNETA_DA_FILT_OTH_MCAST 0x3500
148#define MVNETA_DA_FILT_UCAST_BASE 0x3600
149#define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
150#define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
151#define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
152#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
153#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
154#define MVNETA_TXQ_DEC_SENT_SHIFT 16
155#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
156#define MVNETA_TXQ_SENT_DESC_SHIFT 16
157#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
158#define MVNETA_PORT_TX_RESET 0x3cf0
159#define MVNETA_PORT_TX_DMA_RESET BIT(0)
160#define MVNETA_TX_MTU 0x3e0c
161#define MVNETA_TX_TOKEN_SIZE 0x3e14
162#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
163#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
164#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
165
166#define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
167
168/* Descriptor ring Macros */
169#define MVNETA_QUEUE_NEXT_DESC(q, index) \
170 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
171
172/* Various constants */
173
174/* Coalescing */
175#define MVNETA_TXDONE_COAL_PKTS 16
176#define MVNETA_RX_COAL_PKTS 32
177#define MVNETA_RX_COAL_USEC 100
178
179/* Timer */
180#define MVNETA_TX_DONE_TIMER_PERIOD 10
181
182/* Napi polling weight */
183#define MVNETA_RX_POLL_WEIGHT 64
184
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100185/* The two bytes Marvell header. Either contains a special value used
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300186 * by Marvell switches when a specific hardware mode is enabled (not
187 * supported by this driver) or is filled automatically by zeroes on
188 * the RX side. Those two bytes being at the front of the Ethernet
189 * header, they allow to have the IP header aligned on a 4 bytes
190 * boundary automatically: the hardware skips those two bytes on its
191 * own.
192 */
193#define MVNETA_MH_SIZE 2
194
195#define MVNETA_VLAN_TAG_LEN 4
196
197#define MVNETA_CPU_D_CACHE_LINE_SIZE 32
198#define MVNETA_TX_CSUM_MAX_SIZE 9800
199#define MVNETA_ACC_MODE_EXT 1
200
201/* Timeout constants */
202#define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
203#define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
204#define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
205
206#define MVNETA_TX_MTU_MAX 0x3ffff
207
208/* Max number of Rx descriptors */
209#define MVNETA_MAX_RXD 128
210
211/* Max number of Tx descriptors */
212#define MVNETA_MAX_TXD 532
213
214/* descriptor aligned size */
215#define MVNETA_DESC_ALIGNED_SIZE 32
216
217#define MVNETA_RX_PKT_SIZE(mtu) \
218 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
219 ETH_HLEN + ETH_FCS_LEN, \
220 MVNETA_CPU_D_CACHE_LINE_SIZE)
221
222#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
223
224struct mvneta_stats {
225 struct u64_stats_sync syncp;
226 u64 packets;
227 u64 bytes;
228};
229
230struct mvneta_port {
231 int pkt_size;
232 void __iomem *base;
233 struct mvneta_rx_queue *rxqs;
234 struct mvneta_tx_queue *txqs;
235 struct timer_list tx_done_timer;
236 struct net_device *dev;
237
238 u32 cause_rx_tx;
239 struct napi_struct napi;
240
241 /* Flags */
242 unsigned long flags;
243#define MVNETA_F_TX_DONE_TIMER_BIT 0
244
245 /* Napi weight */
246 int weight;
247
248 /* Core clock */
Thomas Petazzoni189dd622012-11-19 14:15:25 +0100249 struct clk *clk;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300250 u8 mcast_count[256];
251 u16 tx_ring_size;
252 u16 rx_ring_size;
253 struct mvneta_stats tx_stats;
254 struct mvneta_stats rx_stats;
255
256 struct mii_bus *mii_bus;
257 struct phy_device *phy_dev;
258 phy_interface_t phy_interface;
259 struct device_node *phy_node;
260 unsigned int link;
261 unsigned int duplex;
262 unsigned int speed;
263};
264
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100265/* The mvneta_tx_desc and mvneta_rx_desc structures describe the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300266 * layout of the transmit and reception DMA descriptors, and their
267 * layout is therefore defined by the hardware design
268 */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200269
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300270#define MVNETA_TX_L3_OFF_SHIFT 0
271#define MVNETA_TX_IP_HLEN_SHIFT 8
272#define MVNETA_TX_L4_UDP BIT(16)
273#define MVNETA_TX_L3_IP6 BIT(17)
274#define MVNETA_TXD_IP_CSUM BIT(18)
275#define MVNETA_TXD_Z_PAD BIT(19)
276#define MVNETA_TXD_L_DESC BIT(20)
277#define MVNETA_TXD_F_DESC BIT(21)
278#define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
279 MVNETA_TXD_L_DESC | \
280 MVNETA_TXD_F_DESC)
281#define MVNETA_TX_L4_CSUM_FULL BIT(30)
282#define MVNETA_TX_L4_CSUM_NOT BIT(31)
283
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300284#define MVNETA_RXD_ERR_CRC 0x0
285#define MVNETA_RXD_ERR_SUMMARY BIT(16)
286#define MVNETA_RXD_ERR_OVERRUN BIT(17)
287#define MVNETA_RXD_ERR_LEN BIT(18)
288#define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
289#define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
290#define MVNETA_RXD_L3_IP4 BIT(25)
291#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
292#define MVNETA_RXD_L4_CSUM_OK BIT(30)
293
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200294#if defined(__LITTLE_ENDIAN)
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200295struct mvneta_tx_desc {
296 u32 command; /* Options used by HW for packet transmitting.*/
297 u16 reserverd1; /* csum_l4 (for future use) */
298 u16 data_size; /* Data size of transmitted packet in bytes */
299 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
300 u32 reserved2; /* hw_cmd - (for future use, PMT) */
301 u32 reserved3[4]; /* Reserved - (for future use) */
302};
303
304struct mvneta_rx_desc {
305 u32 status; /* Info about received packet */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300306 u16 reserved1; /* pnc_info - (for future use, PnC) */
307 u16 data_size; /* Size of received packet in bytes */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200308
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300309 u32 buf_phys_addr; /* Physical address of the buffer */
310 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200311
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300312 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
313 u16 reserved3; /* prefetch_cmd, for future use */
314 u16 reserved4; /* csum_l4 - (for future use, PnC) */
Thomas Petazzoni6083ed42013-07-29 15:21:27 +0200315
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300316 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
317 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
318};
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200319#else
320struct mvneta_tx_desc {
321 u16 data_size; /* Data size of transmitted packet in bytes */
322 u16 reserverd1; /* csum_l4 (for future use) */
323 u32 command; /* Options used by HW for packet transmitting.*/
324 u32 reserved2; /* hw_cmd - (for future use, PMT) */
325 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
326 u32 reserved3[4]; /* Reserved - (for future use) */
327};
328
329struct mvneta_rx_desc {
330 u16 data_size; /* Size of received packet in bytes */
331 u16 reserved1; /* pnc_info - (for future use, PnC) */
332 u32 status; /* Info about received packet */
333
334 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
335 u32 buf_phys_addr; /* Physical address of the buffer */
336
337 u16 reserved4; /* csum_l4 - (for future use, PnC) */
338 u16 reserved3; /* prefetch_cmd, for future use */
339 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
340
341 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
342 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
343};
344#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300345
346struct mvneta_tx_queue {
347 /* Number of this TX queue, in the range 0-7 */
348 u8 id;
349
350 /* Number of TX DMA descriptors in the descriptor ring */
351 int size;
352
353 /* Number of currently used TX DMA descriptor in the
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100354 * descriptor ring
355 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300356 int count;
357
358 /* Array of transmitted skb */
359 struct sk_buff **tx_skb;
360
361 /* Index of last TX DMA descriptor that was inserted */
362 int txq_put_index;
363
364 /* Index of the TX DMA descriptor to be cleaned up */
365 int txq_get_index;
366
367 u32 done_pkts_coal;
368
369 /* Virtual address of the TX DMA descriptors array */
370 struct mvneta_tx_desc *descs;
371
372 /* DMA address of the TX DMA descriptors array */
373 dma_addr_t descs_phys;
374
375 /* Index of the last TX DMA descriptor */
376 int last_desc;
377
378 /* Index of the next TX DMA descriptor to process */
379 int next_desc_to_proc;
380};
381
382struct mvneta_rx_queue {
383 /* rx queue number, in the range 0-7 */
384 u8 id;
385
386 /* num of rx descriptors in the rx descriptor ring */
387 int size;
388
389 /* counter of times when mvneta_refill() failed */
390 int missed;
391
392 u32 pkts_coal;
393 u32 time_coal;
394
395 /* Virtual address of the RX DMA descriptors array */
396 struct mvneta_rx_desc *descs;
397
398 /* DMA address of the RX DMA descriptors array */
399 dma_addr_t descs_phys;
400
401 /* Index of the last RX DMA descriptor */
402 int last_desc;
403
404 /* Index of the next RX DMA descriptor to process */
405 int next_desc_to_proc;
406};
407
408static int rxq_number = 8;
409static int txq_number = 8;
410
411static int rxq_def;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300412
413#define MVNETA_DRIVER_NAME "mvneta"
414#define MVNETA_DRIVER_VERSION "1.0"
415
416/* Utility/helper methods */
417
418/* Write helper method */
419static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
420{
421 writel(data, pp->base + offset);
422}
423
424/* Read helper method */
425static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
426{
427 return readl(pp->base + offset);
428}
429
430/* Increment txq get counter */
431static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
432{
433 txq->txq_get_index++;
434 if (txq->txq_get_index == txq->size)
435 txq->txq_get_index = 0;
436}
437
438/* Increment txq put counter */
439static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
440{
441 txq->txq_put_index++;
442 if (txq->txq_put_index == txq->size)
443 txq->txq_put_index = 0;
444}
445
446
447/* Clear all MIB counters */
448static void mvneta_mib_counters_clear(struct mvneta_port *pp)
449{
450 int i;
451 u32 dummy;
452
453 /* Perform dummy reads from MIB counters */
454 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
455 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
456}
457
458/* Get System Network Statistics */
459struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
460 struct rtnl_link_stats64 *stats)
461{
462 struct mvneta_port *pp = netdev_priv(dev);
463 unsigned int start;
464
465 memset(stats, 0, sizeof(struct rtnl_link_stats64));
466
467 do {
468 start = u64_stats_fetch_begin_bh(&pp->rx_stats.syncp);
469 stats->rx_packets = pp->rx_stats.packets;
470 stats->rx_bytes = pp->rx_stats.bytes;
471 } while (u64_stats_fetch_retry_bh(&pp->rx_stats.syncp, start));
472
473
474 do {
475 start = u64_stats_fetch_begin_bh(&pp->tx_stats.syncp);
476 stats->tx_packets = pp->tx_stats.packets;
477 stats->tx_bytes = pp->tx_stats.bytes;
478 } while (u64_stats_fetch_retry_bh(&pp->tx_stats.syncp, start));
479
480 stats->rx_errors = dev->stats.rx_errors;
481 stats->rx_dropped = dev->stats.rx_dropped;
482
483 stats->tx_dropped = dev->stats.tx_dropped;
484
485 return stats;
486}
487
488/* Rx descriptors helper methods */
489
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100490/* Checks whether the given RX descriptor is both the first and the
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300491 * last descriptor for the RX packet. Each RX packet is currently
492 * received through a single RX descriptor, so not having each RX
493 * descriptor with its first and last bits set is an error
494 */
495static int mvneta_rxq_desc_is_first_last(struct mvneta_rx_desc *desc)
496{
497 return (desc->status & MVNETA_RXD_FIRST_LAST_DESC) ==
498 MVNETA_RXD_FIRST_LAST_DESC;
499}
500
501/* Add number of descriptors ready to receive new packets */
502static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
503 struct mvneta_rx_queue *rxq,
504 int ndescs)
505{
506 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100507 * be added at once
508 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300509 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
510 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
511 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
512 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
513 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
514 }
515
516 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
517 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
518}
519
520/* Get number of RX descriptors occupied by received packets */
521static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
522 struct mvneta_rx_queue *rxq)
523{
524 u32 val;
525
526 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
527 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
528}
529
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100530/* Update num of rx desc called upon return from rx path or
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300531 * from mvneta_rxq_drop_pkts().
532 */
533static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
534 struct mvneta_rx_queue *rxq,
535 int rx_done, int rx_filled)
536{
537 u32 val;
538
539 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
540 val = rx_done |
541 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
542 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
543 return;
544 }
545
546 /* Only 255 descriptors can be added at once */
547 while ((rx_done > 0) || (rx_filled > 0)) {
548 if (rx_done <= 0xff) {
549 val = rx_done;
550 rx_done = 0;
551 } else {
552 val = 0xff;
553 rx_done -= 0xff;
554 }
555 if (rx_filled <= 0xff) {
556 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
557 rx_filled = 0;
558 } else {
559 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
560 rx_filled -= 0xff;
561 }
562 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
563 }
564}
565
566/* Get pointer to next RX descriptor to be processed by SW */
567static struct mvneta_rx_desc *
568mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
569{
570 int rx_desc = rxq->next_desc_to_proc;
571
572 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
573 return rxq->descs + rx_desc;
574}
575
576/* Change maximum receive size of the port. */
577static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
578{
579 u32 val;
580
581 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
582 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
583 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
584 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
585 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
586}
587
588
589/* Set rx queue offset */
590static void mvneta_rxq_offset_set(struct mvneta_port *pp,
591 struct mvneta_rx_queue *rxq,
592 int offset)
593{
594 u32 val;
595
596 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
597 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
598
599 /* Offset is in */
600 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
601 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
602}
603
604
605/* Tx descriptors helper methods */
606
607/* Update HW with number of TX descriptors to be sent */
608static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
609 struct mvneta_tx_queue *txq,
610 int pend_desc)
611{
612 u32 val;
613
614 /* Only 255 descriptors can be added at once ; Assume caller
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100615 * process TX desriptors in quanta less than 256
616 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300617 val = pend_desc;
618 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
619}
620
621/* Get pointer to next TX descriptor to be processed (send) by HW */
622static struct mvneta_tx_desc *
623mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
624{
625 int tx_desc = txq->next_desc_to_proc;
626
627 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
628 return txq->descs + tx_desc;
629}
630
631/* Release the last allocated TX descriptor. Useful to handle DMA
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100632 * mapping failures in the TX path.
633 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300634static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
635{
636 if (txq->next_desc_to_proc == 0)
637 txq->next_desc_to_proc = txq->last_desc - 1;
638 else
639 txq->next_desc_to_proc--;
640}
641
642/* Set rxq buf size */
643static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
644 struct mvneta_rx_queue *rxq,
645 int buf_size)
646{
647 u32 val;
648
649 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
650
651 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
652 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
653
654 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
655}
656
657/* Disable buffer management (BM) */
658static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
659 struct mvneta_rx_queue *rxq)
660{
661 u32 val;
662
663 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
664 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
665 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
666}
667
668
669
670/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
Greg KH03ce7582012-12-21 13:42:15 +0000671static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300672{
673 u32 val;
674
675 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
676
677 if (enable)
678 val |= MVNETA_GMAC2_PORT_RGMII;
679 else
680 val &= ~MVNETA_GMAC2_PORT_RGMII;
681
682 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
683}
684
685/* Config SGMII port */
Greg KH03ce7582012-12-21 13:42:15 +0000686static void mvneta_port_sgmii_config(struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300687{
688 u32 val;
689
690 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
691 val |= MVNETA_GMAC2_PSC_ENABLE;
692 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +0200693
694 mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300695}
696
697/* Start the Ethernet port RX and TX activity */
698static void mvneta_port_up(struct mvneta_port *pp)
699{
700 int queue;
701 u32 q_map;
702
703 /* Enable all initialized TXs. */
704 mvneta_mib_counters_clear(pp);
705 q_map = 0;
706 for (queue = 0; queue < txq_number; queue++) {
707 struct mvneta_tx_queue *txq = &pp->txqs[queue];
708 if (txq->descs != NULL)
709 q_map |= (1 << queue);
710 }
711 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
712
713 /* Enable all initialized RXQs. */
714 q_map = 0;
715 for (queue = 0; queue < rxq_number; queue++) {
716 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
717 if (rxq->descs != NULL)
718 q_map |= (1 << queue);
719 }
720
721 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
722}
723
724/* Stop the Ethernet port activity */
725static void mvneta_port_down(struct mvneta_port *pp)
726{
727 u32 val;
728 int count;
729
730 /* Stop Rx port activity. Check port Rx activity. */
731 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
732
733 /* Issue stop command for active channels only */
734 if (val != 0)
735 mvreg_write(pp, MVNETA_RXQ_CMD,
736 val << MVNETA_RXQ_DISABLE_SHIFT);
737
738 /* Wait for all Rx activity to terminate. */
739 count = 0;
740 do {
741 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
742 netdev_warn(pp->dev,
743 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
744 val);
745 break;
746 }
747 mdelay(1);
748
749 val = mvreg_read(pp, MVNETA_RXQ_CMD);
750 } while (val & 0xff);
751
752 /* Stop Tx port activity. Check port Tx activity. Issue stop
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100753 * command for active channels only
754 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300755 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
756
757 if (val != 0)
758 mvreg_write(pp, MVNETA_TXQ_CMD,
759 (val << MVNETA_TXQ_DISABLE_SHIFT));
760
761 /* Wait for all Tx activity to terminate. */
762 count = 0;
763 do {
764 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
765 netdev_warn(pp->dev,
766 "TIMEOUT for TX stopped status=0x%08x\n",
767 val);
768 break;
769 }
770 mdelay(1);
771
772 /* Check TX Command reg that all Txqs are stopped */
773 val = mvreg_read(pp, MVNETA_TXQ_CMD);
774
775 } while (val & 0xff);
776
777 /* Double check to verify that TX FIFO is empty */
778 count = 0;
779 do {
780 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
781 netdev_warn(pp->dev,
782 "TX FIFO empty timeout status=0x08%x\n",
783 val);
784 break;
785 }
786 mdelay(1);
787
788 val = mvreg_read(pp, MVNETA_PORT_STATUS);
789 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
790 (val & MVNETA_TX_IN_PRGRS));
791
792 udelay(200);
793}
794
795/* Enable the port by setting the port enable bit of the MAC control register */
796static void mvneta_port_enable(struct mvneta_port *pp)
797{
798 u32 val;
799
800 /* Enable port */
801 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
802 val |= MVNETA_GMAC0_PORT_ENABLE;
803 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
804}
805
806/* Disable the port and wait for about 200 usec before retuning */
807static void mvneta_port_disable(struct mvneta_port *pp)
808{
809 u32 val;
810
811 /* Reset the Enable bit in the Serial Control Register */
812 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
813 val &= ~MVNETA_GMAC0_PORT_ENABLE;
814 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
815
816 udelay(200);
817}
818
819/* Multicast tables methods */
820
821/* Set all entries in Unicast MAC Table; queue==-1 means reject all */
822static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
823{
824 int offset;
825 u32 val;
826
827 if (queue == -1) {
828 val = 0;
829 } else {
830 val = 0x1 | (queue << 1);
831 val |= (val << 24) | (val << 16) | (val << 8);
832 }
833
834 for (offset = 0; offset <= 0xc; offset += 4)
835 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
836}
837
838/* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
839static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
840{
841 int offset;
842 u32 val;
843
844 if (queue == -1) {
845 val = 0;
846 } else {
847 val = 0x1 | (queue << 1);
848 val |= (val << 24) | (val << 16) | (val << 8);
849 }
850
851 for (offset = 0; offset <= 0xfc; offset += 4)
852 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
853
854}
855
856/* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
857static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
858{
859 int offset;
860 u32 val;
861
862 if (queue == -1) {
863 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
864 val = 0;
865 } else {
866 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
867 val = 0x1 | (queue << 1);
868 val |= (val << 24) | (val << 16) | (val << 8);
869 }
870
871 for (offset = 0; offset <= 0xfc; offset += 4)
872 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
873}
874
875/* This method sets defaults to the NETA port:
876 * Clears interrupt Cause and Mask registers.
877 * Clears all MAC tables.
878 * Sets defaults to all registers.
879 * Resets RX and TX descriptor rings.
880 * Resets PHY.
881 * This method can be called after mvneta_port_down() to return the port
882 * settings to defaults.
883 */
884static void mvneta_defaults_set(struct mvneta_port *pp)
885{
886 int cpu;
887 int queue;
888 u32 val;
889
890 /* Clear all Cause registers */
891 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
892 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
893 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
894
895 /* Mask all interrupts */
896 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
897 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
898 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
899 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
900
901 /* Enable MBUS Retry bit16 */
902 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
903
904 /* Set CPU queue access map - all CPUs have access to all RX
Thomas Petazzoni6a20c172012-11-19 11:41:25 +0100905 * queues and to all TX queues
906 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300907 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
908 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
909 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
910 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
911
912 /* Reset RX and TX DMAs */
913 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
914 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
915
916 /* Disable Legacy WRR, Disable EJP, Release from reset */
917 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
918 for (queue = 0; queue < txq_number; queue++) {
919 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
920 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
921 }
922
923 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
924 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
925
926 /* Set Port Acceleration Mode */
927 val = MVNETA_ACC_MODE_EXT;
928 mvreg_write(pp, MVNETA_ACC_MODE, val);
929
930 /* Update val of portCfg register accordingly with all RxQueue types */
931 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
932 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
933
934 val = 0;
935 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
936 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
937
938 /* Build PORT_SDMA_CONFIG_REG */
939 val = 0;
940
941 /* Default burst size */
942 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
943 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200944 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300945
Thomas Petazzoni9ad8fef2013-07-29 15:21:28 +0200946#if defined(__BIG_ENDIAN)
947 val |= MVNETA_DESC_SWAP;
948#endif
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300949
950 /* Assign port SDMA configuration */
951 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
952
Thomas Petazzoni71408602013-09-04 16:21:18 +0200953 /* Disable PHY polling in hardware, since we're using the
954 * kernel phylib to do this.
955 */
956 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
957 val &= ~MVNETA_PHY_POLLING_ENABLE;
958 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
959
Thomas Petazzonic5aff182012-08-17 14:04:28 +0300960 mvneta_set_ucast_table(pp, -1);
961 mvneta_set_special_mcast_table(pp, -1);
962 mvneta_set_other_mcast_table(pp, -1);
963
964 /* Set port interrupt enable register - default enable all */
965 mvreg_write(pp, MVNETA_INTR_ENABLE,
966 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
967 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
968}
969
970/* Set max sizes for tx queues */
971static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
972
973{
974 u32 val, size, mtu;
975 int queue;
976
977 mtu = max_tx_size * 8;
978 if (mtu > MVNETA_TX_MTU_MAX)
979 mtu = MVNETA_TX_MTU_MAX;
980
981 /* Set MTU */
982 val = mvreg_read(pp, MVNETA_TX_MTU);
983 val &= ~MVNETA_TX_MTU_MAX;
984 val |= mtu;
985 mvreg_write(pp, MVNETA_TX_MTU, val);
986
987 /* TX token size and all TXQs token size must be larger that MTU */
988 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
989
990 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
991 if (size < mtu) {
992 size = mtu;
993 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
994 val |= size;
995 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
996 }
997 for (queue = 0; queue < txq_number; queue++) {
998 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
999
1000 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1001 if (size < mtu) {
1002 size = mtu;
1003 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1004 val |= size;
1005 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1006 }
1007 }
1008}
1009
1010/* Set unicast address */
1011static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1012 int queue)
1013{
1014 unsigned int unicast_reg;
1015 unsigned int tbl_offset;
1016 unsigned int reg_offset;
1017
1018 /* Locate the Unicast table entry */
1019 last_nibble = (0xf & last_nibble);
1020
1021 /* offset from unicast tbl base */
1022 tbl_offset = (last_nibble / 4) * 4;
1023
1024 /* offset within the above reg */
1025 reg_offset = last_nibble % 4;
1026
1027 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1028
1029 if (queue == -1) {
1030 /* Clear accepts frame bit at specified unicast DA tbl entry */
1031 unicast_reg &= ~(0xff << (8 * reg_offset));
1032 } else {
1033 unicast_reg &= ~(0xff << (8 * reg_offset));
1034 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1035 }
1036
1037 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1038}
1039
1040/* Set mac address */
1041static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1042 int queue)
1043{
1044 unsigned int mac_h;
1045 unsigned int mac_l;
1046
1047 if (queue != -1) {
1048 mac_l = (addr[4] << 8) | (addr[5]);
1049 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1050 (addr[2] << 8) | (addr[3] << 0);
1051
1052 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1053 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1054 }
1055
1056 /* Accept frames of this address */
1057 mvneta_set_ucast_addr(pp, addr[5], queue);
1058}
1059
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001060/* Set the number of packets that will be received before RX interrupt
1061 * will be generated by HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001062 */
1063static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1064 struct mvneta_rx_queue *rxq, u32 value)
1065{
1066 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1067 value | MVNETA_RXQ_NON_OCCUPIED(0));
1068 rxq->pkts_coal = value;
1069}
1070
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001071/* Set the time delay in usec before RX interrupt will be generated by
1072 * HW.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001073 */
1074static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1075 struct mvneta_rx_queue *rxq, u32 value)
1076{
Thomas Petazzoni189dd622012-11-19 14:15:25 +01001077 u32 val;
1078 unsigned long clk_rate;
1079
1080 clk_rate = clk_get_rate(pp->clk);
1081 val = (clk_rate / 1000000) * value;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001082
1083 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1084 rxq->time_coal = value;
1085}
1086
1087/* Set threshold for TX_DONE pkts coalescing */
1088static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1089 struct mvneta_tx_queue *txq, u32 value)
1090{
1091 u32 val;
1092
1093 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1094
1095 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1096 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1097
1098 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1099
1100 txq->done_pkts_coal = value;
1101}
1102
1103/* Trigger tx done timer in MVNETA_TX_DONE_TIMER_PERIOD msecs */
1104static void mvneta_add_tx_done_timer(struct mvneta_port *pp)
1105{
1106 if (test_and_set_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags) == 0) {
1107 pp->tx_done_timer.expires = jiffies +
1108 msecs_to_jiffies(MVNETA_TX_DONE_TIMER_PERIOD);
1109 add_timer(&pp->tx_done_timer);
1110 }
1111}
1112
1113
1114/* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1115static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1116 u32 phys_addr, u32 cookie)
1117{
1118 rx_desc->buf_cookie = cookie;
1119 rx_desc->buf_phys_addr = phys_addr;
1120}
1121
1122/* Decrement sent descriptors counter */
1123static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1124 struct mvneta_tx_queue *txq,
1125 int sent_desc)
1126{
1127 u32 val;
1128
1129 /* Only 255 TX descriptors can be updated at once */
1130 while (sent_desc > 0xff) {
1131 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1132 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1133 sent_desc = sent_desc - 0xff;
1134 }
1135
1136 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1137 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1138}
1139
1140/* Get number of TX descriptors already sent by HW */
1141static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1142 struct mvneta_tx_queue *txq)
1143{
1144 u32 val;
1145 int sent_desc;
1146
1147 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1148 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1149 MVNETA_TXQ_SENT_DESC_SHIFT;
1150
1151 return sent_desc;
1152}
1153
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001154/* Get number of sent descriptors and decrement counter.
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001155 * The number of sent descriptors is returned.
1156 */
1157static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1158 struct mvneta_tx_queue *txq)
1159{
1160 int sent_desc;
1161
1162 /* Get number of sent descriptors */
1163 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1164
1165 /* Decrement sent descriptors counter */
1166 if (sent_desc)
1167 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1168
1169 return sent_desc;
1170}
1171
1172/* Set TXQ descriptors fields relevant for CSUM calculation */
1173static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1174 int ip_hdr_len, int l4_proto)
1175{
1176 u32 command;
1177
1178 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001179 * G_L4_chk, L4_type; required only for checksum
1180 * calculation
1181 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001182 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1183 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1184
1185 if (l3_proto == swab16(ETH_P_IP))
1186 command |= MVNETA_TXD_IP_CSUM;
1187 else
1188 command |= MVNETA_TX_L3_IP6;
1189
1190 if (l4_proto == IPPROTO_TCP)
1191 command |= MVNETA_TX_L4_CSUM_FULL;
1192 else if (l4_proto == IPPROTO_UDP)
1193 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1194 else
1195 command |= MVNETA_TX_L4_CSUM_NOT;
1196
1197 return command;
1198}
1199
1200
1201/* Display more error info */
1202static void mvneta_rx_error(struct mvneta_port *pp,
1203 struct mvneta_rx_desc *rx_desc)
1204{
1205 u32 status = rx_desc->status;
1206
1207 if (!mvneta_rxq_desc_is_first_last(rx_desc)) {
1208 netdev_err(pp->dev,
1209 "bad rx status %08x (buffer oversize), size=%d\n",
1210 rx_desc->status, rx_desc->data_size);
1211 return;
1212 }
1213
1214 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1215 case MVNETA_RXD_ERR_CRC:
1216 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1217 status, rx_desc->data_size);
1218 break;
1219 case MVNETA_RXD_ERR_OVERRUN:
1220 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1221 status, rx_desc->data_size);
1222 break;
1223 case MVNETA_RXD_ERR_LEN:
1224 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1225 status, rx_desc->data_size);
1226 break;
1227 case MVNETA_RXD_ERR_RESOURCE:
1228 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1229 status, rx_desc->data_size);
1230 break;
1231 }
1232}
1233
1234/* Handle RX checksum offload */
1235static void mvneta_rx_csum(struct mvneta_port *pp,
1236 struct mvneta_rx_desc *rx_desc,
1237 struct sk_buff *skb)
1238{
1239 if ((rx_desc->status & MVNETA_RXD_L3_IP4) &&
1240 (rx_desc->status & MVNETA_RXD_L4_CSUM_OK)) {
1241 skb->csum = 0;
1242 skb->ip_summed = CHECKSUM_UNNECESSARY;
1243 return;
1244 }
1245
1246 skb->ip_summed = CHECKSUM_NONE;
1247}
1248
1249/* Return tx queue pointer (find last set bit) according to causeTxDone reg */
1250static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1251 u32 cause)
1252{
1253 int queue = fls(cause) - 1;
1254
1255 return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue];
1256}
1257
1258/* Free tx queue skbuffs */
1259static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1260 struct mvneta_tx_queue *txq, int num)
1261{
1262 int i;
1263
1264 for (i = 0; i < num; i++) {
1265 struct mvneta_tx_desc *tx_desc = txq->descs +
1266 txq->txq_get_index;
1267 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1268
1269 mvneta_txq_inc_get(txq);
1270
1271 if (!skb)
1272 continue;
1273
1274 dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
1275 tx_desc->data_size, DMA_TO_DEVICE);
1276 dev_kfree_skb_any(skb);
1277 }
1278}
1279
1280/* Handle end of transmission */
1281static int mvneta_txq_done(struct mvneta_port *pp,
1282 struct mvneta_tx_queue *txq)
1283{
1284 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1285 int tx_done;
1286
1287 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1288 if (tx_done == 0)
1289 return tx_done;
1290 mvneta_txq_bufs_free(pp, txq, tx_done);
1291
1292 txq->count -= tx_done;
1293
1294 if (netif_tx_queue_stopped(nq)) {
1295 if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
1296 netif_tx_wake_queue(nq);
1297 }
1298
1299 return tx_done;
1300}
1301
1302/* Refill processing */
1303static int mvneta_rx_refill(struct mvneta_port *pp,
1304 struct mvneta_rx_desc *rx_desc)
1305
1306{
1307 dma_addr_t phys_addr;
1308 struct sk_buff *skb;
1309
1310 skb = netdev_alloc_skb(pp->dev, pp->pkt_size);
1311 if (!skb)
1312 return -ENOMEM;
1313
1314 phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
1315 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1316 DMA_FROM_DEVICE);
1317 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1318 dev_kfree_skb(skb);
1319 return -ENOMEM;
1320 }
1321
1322 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1323
1324 return 0;
1325}
1326
1327/* Handle tx checksum */
1328static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1329{
1330 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1331 int ip_hdr_len = 0;
1332 u8 l4_proto;
1333
1334 if (skb->protocol == htons(ETH_P_IP)) {
1335 struct iphdr *ip4h = ip_hdr(skb);
1336
1337 /* Calculate IPv4 checksum and L4 checksum */
1338 ip_hdr_len = ip4h->ihl;
1339 l4_proto = ip4h->protocol;
1340 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1341 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1342
1343 /* Read l4_protocol from one of IPv6 extra headers */
1344 if (skb_network_header_len(skb) > 0)
1345 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1346 l4_proto = ip6h->nexthdr;
1347 } else
1348 return MVNETA_TX_L4_CSUM_NOT;
1349
1350 return mvneta_txq_desc_csum(skb_network_offset(skb),
1351 skb->protocol, ip_hdr_len, l4_proto);
1352 }
1353
1354 return MVNETA_TX_L4_CSUM_NOT;
1355}
1356
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001357/* Returns rx queue pointer (find last set bit) according to causeRxTx
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001358 * value
1359 */
1360static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1361 u32 cause)
1362{
1363 int queue = fls(cause >> 8) - 1;
1364
1365 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1366}
1367
1368/* Drop packets received by the RXQ and free buffers */
1369static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1370 struct mvneta_rx_queue *rxq)
1371{
1372 int rx_done, i;
1373
1374 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1375 for (i = 0; i < rxq->size; i++) {
1376 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1377 struct sk_buff *skb = (struct sk_buff *)rx_desc->buf_cookie;
1378
1379 dev_kfree_skb_any(skb);
1380 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001381 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001382 }
1383
1384 if (rx_done)
1385 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1386}
1387
1388/* Main rx processing */
1389static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1390 struct mvneta_rx_queue *rxq)
1391{
1392 struct net_device *dev = pp->dev;
1393 int rx_done, rx_filled;
willy tarreaudc4277d2014-01-16 08:20:07 +01001394 u32 rcvd_pkts = 0;
1395 u32 rcvd_bytes = 0;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001396
1397 /* Get number of received packets */
1398 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1399
1400 if (rx_todo > rx_done)
1401 rx_todo = rx_done;
1402
1403 rx_done = 0;
1404 rx_filled = 0;
1405
1406 /* Fairness NAPI loop */
1407 while (rx_done < rx_todo) {
1408 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1409 struct sk_buff *skb;
1410 u32 rx_status;
1411 int rx_bytes, err;
1412
1413 prefetch(rx_desc);
1414 rx_done++;
1415 rx_filled++;
1416 rx_status = rx_desc->status;
1417 skb = (struct sk_buff *)rx_desc->buf_cookie;
1418
1419 if (!mvneta_rxq_desc_is_first_last(rx_desc) ||
1420 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1421 dev->stats.rx_errors++;
1422 mvneta_rx_error(pp, rx_desc);
1423 mvneta_rx_desc_fill(rx_desc, rx_desc->buf_phys_addr,
1424 (u32)skb);
1425 continue;
1426 }
1427
1428 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
Ezequiel Garciaa328f3a2013-12-05 13:35:37 -03001429 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001430
1431 rx_bytes = rx_desc->data_size -
1432 (ETH_FCS_LEN + MVNETA_MH_SIZE);
willy tarreaudc4277d2014-01-16 08:20:07 +01001433 rcvd_pkts++;
1434 rcvd_bytes += rx_bytes;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001435
1436 /* Linux processing */
1437 skb_reserve(skb, MVNETA_MH_SIZE);
1438 skb_put(skb, rx_bytes);
1439
1440 skb->protocol = eth_type_trans(skb, dev);
1441
1442 mvneta_rx_csum(pp, rx_desc, skb);
1443
1444 napi_gro_receive(&pp->napi, skb);
1445
1446 /* Refill processing */
1447 err = mvneta_rx_refill(pp, rx_desc);
1448 if (err) {
1449 netdev_err(pp->dev, "Linux processing - Can't refill\n");
1450 rxq->missed++;
1451 rx_filled--;
1452 }
1453 }
1454
willy tarreaudc4277d2014-01-16 08:20:07 +01001455 if (rcvd_pkts) {
1456 u64_stats_update_begin(&pp->rx_stats.syncp);
1457 pp->rx_stats.packets += rcvd_pkts;
1458 pp->rx_stats.bytes += rcvd_bytes;
1459 u64_stats_update_end(&pp->rx_stats.syncp);
1460 }
1461
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001462 /* Update rxq management counters */
1463 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled);
1464
1465 return rx_done;
1466}
1467
1468/* Handle tx fragmentation processing */
1469static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1470 struct mvneta_tx_queue *txq)
1471{
1472 struct mvneta_tx_desc *tx_desc;
1473 int i;
1474
1475 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1476 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1477 void *addr = page_address(frag->page.p) + frag->page_offset;
1478
1479 tx_desc = mvneta_txq_next_desc_get(txq);
1480 tx_desc->data_size = frag->size;
1481
1482 tx_desc->buf_phys_addr =
1483 dma_map_single(pp->dev->dev.parent, addr,
1484 tx_desc->data_size, DMA_TO_DEVICE);
1485
1486 if (dma_mapping_error(pp->dev->dev.parent,
1487 tx_desc->buf_phys_addr)) {
1488 mvneta_txq_desc_put(txq);
1489 goto error;
1490 }
1491
1492 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
1493 /* Last descriptor */
1494 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1495
1496 txq->tx_skb[txq->txq_put_index] = skb;
1497
1498 mvneta_txq_inc_put(txq);
1499 } else {
1500 /* Descriptor in the middle: Not First, Not Last */
1501 tx_desc->command = 0;
1502
1503 txq->tx_skb[txq->txq_put_index] = NULL;
1504 mvneta_txq_inc_put(txq);
1505 }
1506 }
1507
1508 return 0;
1509
1510error:
1511 /* Release all descriptors that were used to map fragments of
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001512 * this packet, as well as the corresponding DMA mappings
1513 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001514 for (i = i - 1; i >= 0; i--) {
1515 tx_desc = txq->descs + i;
1516 dma_unmap_single(pp->dev->dev.parent,
1517 tx_desc->buf_phys_addr,
1518 tx_desc->data_size,
1519 DMA_TO_DEVICE);
1520 mvneta_txq_desc_put(txq);
1521 }
1522
1523 return -ENOMEM;
1524}
1525
1526/* Main tx processing */
1527static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1528{
1529 struct mvneta_port *pp = netdev_priv(dev);
Willy Tarreauee40a112013-04-11 23:00:37 +02001530 u16 txq_id = skb_get_queue_mapping(skb);
1531 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001532 struct mvneta_tx_desc *tx_desc;
1533 struct netdev_queue *nq;
1534 int frags = 0;
1535 u32 tx_cmd;
1536
1537 if (!netif_running(dev))
1538 goto out;
1539
1540 frags = skb_shinfo(skb)->nr_frags + 1;
Willy Tarreauee40a112013-04-11 23:00:37 +02001541 nq = netdev_get_tx_queue(dev, txq_id);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001542
1543 /* Get a descriptor for the first part of the packet */
1544 tx_desc = mvneta_txq_next_desc_get(txq);
1545
1546 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1547
1548 tx_desc->data_size = skb_headlen(skb);
1549
1550 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1551 tx_desc->data_size,
1552 DMA_TO_DEVICE);
1553 if (unlikely(dma_mapping_error(dev->dev.parent,
1554 tx_desc->buf_phys_addr))) {
1555 mvneta_txq_desc_put(txq);
1556 frags = 0;
1557 goto out;
1558 }
1559
1560 if (frags == 1) {
1561 /* First and Last descriptor */
1562 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1563 tx_desc->command = tx_cmd;
1564 txq->tx_skb[txq->txq_put_index] = skb;
1565 mvneta_txq_inc_put(txq);
1566 } else {
1567 /* First but not Last */
1568 tx_cmd |= MVNETA_TXD_F_DESC;
1569 txq->tx_skb[txq->txq_put_index] = NULL;
1570 mvneta_txq_inc_put(txq);
1571 tx_desc->command = tx_cmd;
1572 /* Continue with other skb fragments */
1573 if (mvneta_tx_frag_process(pp, skb, txq)) {
1574 dma_unmap_single(dev->dev.parent,
1575 tx_desc->buf_phys_addr,
1576 tx_desc->data_size,
1577 DMA_TO_DEVICE);
1578 mvneta_txq_desc_put(txq);
1579 frags = 0;
1580 goto out;
1581 }
1582 }
1583
1584 txq->count += frags;
1585 mvneta_txq_pend_desc_add(pp, txq, frags);
1586
1587 if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
1588 netif_tx_stop_queue(nq);
1589
1590out:
1591 if (frags > 0) {
1592 u64_stats_update_begin(&pp->tx_stats.syncp);
1593 pp->tx_stats.packets++;
1594 pp->tx_stats.bytes += skb->len;
1595 u64_stats_update_end(&pp->tx_stats.syncp);
1596
1597 } else {
1598 dev->stats.tx_dropped++;
1599 dev_kfree_skb_any(skb);
1600 }
1601
1602 if (txq->count >= MVNETA_TXDONE_COAL_PKTS)
1603 mvneta_txq_done(pp, txq);
1604
1605 /* If after calling mvneta_txq_done, count equals
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001606 * frags, we need to set the timer
1607 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001608 if (txq->count == frags && frags > 0)
1609 mvneta_add_tx_done_timer(pp);
1610
1611 return NETDEV_TX_OK;
1612}
1613
1614
1615/* Free tx resources, when resetting a port */
1616static void mvneta_txq_done_force(struct mvneta_port *pp,
1617 struct mvneta_tx_queue *txq)
1618
1619{
1620 int tx_done = txq->count;
1621
1622 mvneta_txq_bufs_free(pp, txq, tx_done);
1623
1624 /* reset txq */
1625 txq->count = 0;
1626 txq->txq_put_index = 0;
1627 txq->txq_get_index = 0;
1628}
1629
1630/* handle tx done - called from tx done timer callback */
1631static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
1632 int *tx_todo)
1633{
1634 struct mvneta_tx_queue *txq;
1635 u32 tx_done = 0;
1636 struct netdev_queue *nq;
1637
1638 *tx_todo = 0;
1639 while (cause_tx_done != 0) {
1640 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1641 if (!txq)
1642 break;
1643
1644 nq = netdev_get_tx_queue(pp->dev, txq->id);
1645 __netif_tx_lock(nq, smp_processor_id());
1646
1647 if (txq->count) {
1648 tx_done += mvneta_txq_done(pp, txq);
1649 *tx_todo += txq->count;
1650 }
1651
1652 __netif_tx_unlock(nq);
1653 cause_tx_done &= ~((1 << txq->id));
1654 }
1655
1656 return tx_done;
1657}
1658
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001659/* Compute crc8 of the specified address, using a unique algorithm ,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001660 * according to hw spec, different than generic crc8 algorithm
1661 */
1662static int mvneta_addr_crc(unsigned char *addr)
1663{
1664 int crc = 0;
1665 int i;
1666
1667 for (i = 0; i < ETH_ALEN; i++) {
1668 int j;
1669
1670 crc = (crc ^ addr[i]) << 8;
1671 for (j = 7; j >= 0; j--) {
1672 if (crc & (0x100 << j))
1673 crc ^= 0x107 << j;
1674 }
1675 }
1676
1677 return crc;
1678}
1679
1680/* This method controls the net device special MAC multicast support.
1681 * The Special Multicast Table for MAC addresses supports MAC of the form
1682 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1683 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1684 * Table entries in the DA-Filter table. This method set the Special
1685 * Multicast Table appropriate entry.
1686 */
1687static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1688 unsigned char last_byte,
1689 int queue)
1690{
1691 unsigned int smc_table_reg;
1692 unsigned int tbl_offset;
1693 unsigned int reg_offset;
1694
1695 /* Register offset from SMC table base */
1696 tbl_offset = (last_byte / 4);
1697 /* Entry offset within the above reg */
1698 reg_offset = last_byte % 4;
1699
1700 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1701 + tbl_offset * 4));
1702
1703 if (queue == -1)
1704 smc_table_reg &= ~(0xff << (8 * reg_offset));
1705 else {
1706 smc_table_reg &= ~(0xff << (8 * reg_offset));
1707 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1708 }
1709
1710 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1711 smc_table_reg);
1712}
1713
1714/* This method controls the network device Other MAC multicast support.
1715 * The Other Multicast Table is used for multicast of another type.
1716 * A CRC-8 is used as an index to the Other Multicast Table entries
1717 * in the DA-Filter table.
1718 * The method gets the CRC-8 value from the calling routine and
1719 * sets the Other Multicast Table appropriate entry according to the
1720 * specified CRC-8 .
1721 */
1722static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1723 unsigned char crc8,
1724 int queue)
1725{
1726 unsigned int omc_table_reg;
1727 unsigned int tbl_offset;
1728 unsigned int reg_offset;
1729
1730 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1731 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1732
1733 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1734
1735 if (queue == -1) {
1736 /* Clear accepts frame bit at specified Other DA table entry */
1737 omc_table_reg &= ~(0xff << (8 * reg_offset));
1738 } else {
1739 omc_table_reg &= ~(0xff << (8 * reg_offset));
1740 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1741 }
1742
1743 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1744}
1745
1746/* The network device supports multicast using two tables:
1747 * 1) Special Multicast Table for MAC addresses of the form
1748 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1749 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1750 * Table entries in the DA-Filter table.
1751 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1752 * is used as an index to the Other Multicast Table entries in the
1753 * DA-Filter table.
1754 */
1755static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1756 int queue)
1757{
1758 unsigned char crc_result = 0;
1759
1760 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1761 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1762 return 0;
1763 }
1764
1765 crc_result = mvneta_addr_crc(p_addr);
1766 if (queue == -1) {
1767 if (pp->mcast_count[crc_result] == 0) {
1768 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1769 crc_result);
1770 return -EINVAL;
1771 }
1772
1773 pp->mcast_count[crc_result]--;
1774 if (pp->mcast_count[crc_result] != 0) {
1775 netdev_info(pp->dev,
1776 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1777 pp->mcast_count[crc_result], crc_result);
1778 return -EINVAL;
1779 }
1780 } else
1781 pp->mcast_count[crc_result]++;
1782
1783 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1784
1785 return 0;
1786}
1787
1788/* Configure Fitering mode of Ethernet port */
1789static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1790 int is_promisc)
1791{
1792 u32 port_cfg_reg, val;
1793
1794 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1795
1796 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1797
1798 /* Set / Clear UPM bit in port configuration register */
1799 if (is_promisc) {
1800 /* Accept all Unicast addresses */
1801 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1802 val |= MVNETA_FORCE_UNI;
1803 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1804 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1805 } else {
1806 /* Reject all Unicast addresses */
1807 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1808 val &= ~MVNETA_FORCE_UNI;
1809 }
1810
1811 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1812 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1813}
1814
1815/* register unicast and multicast addresses */
1816static void mvneta_set_rx_mode(struct net_device *dev)
1817{
1818 struct mvneta_port *pp = netdev_priv(dev);
1819 struct netdev_hw_addr *ha;
1820
1821 if (dev->flags & IFF_PROMISC) {
1822 /* Accept all: Multicast + Unicast */
1823 mvneta_rx_unicast_promisc_set(pp, 1);
1824 mvneta_set_ucast_table(pp, rxq_def);
1825 mvneta_set_special_mcast_table(pp, rxq_def);
1826 mvneta_set_other_mcast_table(pp, rxq_def);
1827 } else {
1828 /* Accept single Unicast */
1829 mvneta_rx_unicast_promisc_set(pp, 0);
1830 mvneta_set_ucast_table(pp, -1);
1831 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
1832
1833 if (dev->flags & IFF_ALLMULTI) {
1834 /* Accept all multicast */
1835 mvneta_set_special_mcast_table(pp, rxq_def);
1836 mvneta_set_other_mcast_table(pp, rxq_def);
1837 } else {
1838 /* Accept only initialized multicast */
1839 mvneta_set_special_mcast_table(pp, -1);
1840 mvneta_set_other_mcast_table(pp, -1);
1841
1842 if (!netdev_mc_empty(dev)) {
1843 netdev_for_each_mc_addr(ha, dev) {
1844 mvneta_mcast_addr_set(pp, ha->addr,
1845 rxq_def);
1846 }
1847 }
1848 }
1849 }
1850}
1851
1852/* Interrupt handling - the callback for request_irq() */
1853static irqreturn_t mvneta_isr(int irq, void *dev_id)
1854{
1855 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
1856
1857 /* Mask all interrupts */
1858 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1859
1860 napi_schedule(&pp->napi);
1861
1862 return IRQ_HANDLED;
1863}
1864
1865/* NAPI handler
1866 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
1867 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
1868 * Bits 8 -15 of the cause Rx Tx register indicate that are received
1869 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
1870 * Each CPU has its own causeRxTx register
1871 */
1872static int mvneta_poll(struct napi_struct *napi, int budget)
1873{
1874 int rx_done = 0;
1875 u32 cause_rx_tx;
1876 unsigned long flags;
1877 struct mvneta_port *pp = netdev_priv(napi->dev);
1878
1879 if (!netif_running(pp->dev)) {
1880 napi_complete(napi);
1881 return rx_done;
1882 }
1883
1884 /* Read cause register */
1885 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
1886 MVNETA_RX_INTR_MASK(rxq_number);
1887
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001888 /* For the case where the last mvneta_poll did not process all
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001889 * RX packets
1890 */
1891 cause_rx_tx |= pp->cause_rx_tx;
1892 if (rxq_number > 1) {
1893 while ((cause_rx_tx != 0) && (budget > 0)) {
1894 int count;
1895 struct mvneta_rx_queue *rxq;
1896 /* get rx queue number from cause_rx_tx */
1897 rxq = mvneta_rx_policy(pp, cause_rx_tx);
1898 if (!rxq)
1899 break;
1900
1901 /* process the packet in that rx queue */
1902 count = mvneta_rx(pp, budget, rxq);
1903 rx_done += count;
1904 budget -= count;
1905 if (budget > 0) {
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001906 /* set off the rx bit of the
1907 * corresponding bit in the cause rx
1908 * tx register, so that next iteration
1909 * will find the next rx queue where
1910 * packets are received on
1911 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001912 cause_rx_tx &= ~((1 << rxq->id) << 8);
1913 }
1914 }
1915 } else {
1916 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
1917 budget -= rx_done;
1918 }
1919
1920 if (budget > 0) {
1921 cause_rx_tx = 0;
1922 napi_complete(napi);
1923 local_irq_save(flags);
1924 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1925 MVNETA_RX_INTR_MASK(rxq_number));
1926 local_irq_restore(flags);
1927 }
1928
1929 pp->cause_rx_tx = cause_rx_tx;
1930 return rx_done;
1931}
1932
1933/* tx done timer callback */
1934static void mvneta_tx_done_timer_callback(unsigned long data)
1935{
1936 struct net_device *dev = (struct net_device *)data;
1937 struct mvneta_port *pp = netdev_priv(dev);
1938 int tx_done = 0, tx_todo = 0;
1939
1940 if (!netif_running(dev))
1941 return ;
1942
1943 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
1944
1945 tx_done = mvneta_tx_done_gbe(pp,
1946 (((1 << txq_number) - 1) &
1947 MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK),
1948 &tx_todo);
1949 if (tx_todo > 0)
1950 mvneta_add_tx_done_timer(pp);
1951}
1952
1953/* Handle rxq fill: allocates rxq skbs; called when initializing a port */
1954static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
1955 int num)
1956{
1957 struct net_device *dev = pp->dev;
1958 int i;
1959
1960 for (i = 0; i < num; i++) {
1961 struct sk_buff *skb;
1962 struct mvneta_rx_desc *rx_desc;
1963 unsigned long phys_addr;
1964
1965 skb = dev_alloc_skb(pp->pkt_size);
1966 if (!skb) {
1967 netdev_err(dev, "%s:rxq %d, %d of %d buffs filled\n",
1968 __func__, rxq->id, i, num);
1969 break;
1970 }
1971
1972 rx_desc = rxq->descs + i;
1973 memset(rx_desc, 0, sizeof(struct mvneta_rx_desc));
1974 phys_addr = dma_map_single(dev->dev.parent, skb->head,
1975 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1976 DMA_FROM_DEVICE);
1977 if (unlikely(dma_mapping_error(dev->dev.parent, phys_addr))) {
1978 dev_kfree_skb(skb);
1979 break;
1980 }
1981
1982 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)skb);
1983 }
1984
1985 /* Add this number of RX descriptors as non occupied (ready to
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01001986 * get packets)
1987 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03001988 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
1989
1990 return i;
1991}
1992
1993/* Free all packets pending transmit from all TXQs and reset TX port */
1994static void mvneta_tx_reset(struct mvneta_port *pp)
1995{
1996 int queue;
1997
1998 /* free the skb's in the hal tx ring */
1999 for (queue = 0; queue < txq_number; queue++)
2000 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2001
2002 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2003 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2004}
2005
2006static void mvneta_rx_reset(struct mvneta_port *pp)
2007{
2008 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2009 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2010}
2011
2012/* Rx/Tx queue initialization/cleanup methods */
2013
2014/* Create a specified RX queue */
2015static int mvneta_rxq_init(struct mvneta_port *pp,
2016 struct mvneta_rx_queue *rxq)
2017
2018{
2019 rxq->size = pp->rx_ring_size;
2020
2021 /* Allocate memory for RX descriptors */
2022 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2023 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2024 &rxq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002025 if (rxq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002026 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002027
2028 BUG_ON(rxq->descs !=
2029 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2030
2031 rxq->last_desc = rxq->size - 1;
2032
2033 /* Set Rx descriptors queue starting address */
2034 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2035 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2036
2037 /* Set Offset */
2038 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2039
2040 /* Set coalescing pkts and time */
2041 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2042 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2043
2044 /* Fill RXQ with buffers from RX pool */
2045 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2046 mvneta_rxq_bm_disable(pp, rxq);
2047 mvneta_rxq_fill(pp, rxq, rxq->size);
2048
2049 return 0;
2050}
2051
2052/* Cleanup Rx queue */
2053static void mvneta_rxq_deinit(struct mvneta_port *pp,
2054 struct mvneta_rx_queue *rxq)
2055{
2056 mvneta_rxq_drop_pkts(pp, rxq);
2057
2058 if (rxq->descs)
2059 dma_free_coherent(pp->dev->dev.parent,
2060 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2061 rxq->descs,
2062 rxq->descs_phys);
2063
2064 rxq->descs = NULL;
2065 rxq->last_desc = 0;
2066 rxq->next_desc_to_proc = 0;
2067 rxq->descs_phys = 0;
2068}
2069
2070/* Create and initialize a tx queue */
2071static int mvneta_txq_init(struct mvneta_port *pp,
2072 struct mvneta_tx_queue *txq)
2073{
2074 txq->size = pp->tx_ring_size;
2075
2076 /* Allocate memory for TX descriptors */
2077 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2078 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2079 &txq->descs_phys, GFP_KERNEL);
Joe Perchesd0320f72013-03-14 13:07:21 +00002080 if (txq->descs == NULL)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002081 return -ENOMEM;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002082
2083 /* Make sure descriptor address is cache line size aligned */
2084 BUG_ON(txq->descs !=
2085 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2086
2087 txq->last_desc = txq->size - 1;
2088
2089 /* Set maximum bandwidth for enabled TXQs */
2090 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2091 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2092
2093 /* Set Tx descriptors queue starting address */
2094 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2095 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2096
2097 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2098 if (txq->tx_skb == NULL) {
2099 dma_free_coherent(pp->dev->dev.parent,
2100 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2101 txq->descs, txq->descs_phys);
2102 return -ENOMEM;
2103 }
2104 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2105
2106 return 0;
2107}
2108
2109/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2110static void mvneta_txq_deinit(struct mvneta_port *pp,
2111 struct mvneta_tx_queue *txq)
2112{
2113 kfree(txq->tx_skb);
2114
2115 if (txq->descs)
2116 dma_free_coherent(pp->dev->dev.parent,
2117 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2118 txq->descs, txq->descs_phys);
2119
2120 txq->descs = NULL;
2121 txq->last_desc = 0;
2122 txq->next_desc_to_proc = 0;
2123 txq->descs_phys = 0;
2124
2125 /* Set minimum bandwidth for disabled TXQs */
2126 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2127 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2128
2129 /* Set Tx descriptors queue starting address and size */
2130 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2131 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2132}
2133
2134/* Cleanup all Tx queues */
2135static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2136{
2137 int queue;
2138
2139 for (queue = 0; queue < txq_number; queue++)
2140 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2141}
2142
2143/* Cleanup all Rx queues */
2144static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2145{
2146 int queue;
2147
2148 for (queue = 0; queue < rxq_number; queue++)
2149 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2150}
2151
2152
2153/* Init all Rx queues */
2154static int mvneta_setup_rxqs(struct mvneta_port *pp)
2155{
2156 int queue;
2157
2158 for (queue = 0; queue < rxq_number; queue++) {
2159 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2160 if (err) {
2161 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2162 __func__, queue);
2163 mvneta_cleanup_rxqs(pp);
2164 return err;
2165 }
2166 }
2167
2168 return 0;
2169}
2170
2171/* Init all tx queues */
2172static int mvneta_setup_txqs(struct mvneta_port *pp)
2173{
2174 int queue;
2175
2176 for (queue = 0; queue < txq_number; queue++) {
2177 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2178 if (err) {
2179 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2180 __func__, queue);
2181 mvneta_cleanup_txqs(pp);
2182 return err;
2183 }
2184 }
2185
2186 return 0;
2187}
2188
2189static void mvneta_start_dev(struct mvneta_port *pp)
2190{
2191 mvneta_max_rx_size_set(pp, pp->pkt_size);
2192 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2193
2194 /* start the Rx/Tx activity */
2195 mvneta_port_enable(pp);
2196
2197 /* Enable polling on the port */
2198 napi_enable(&pp->napi);
2199
2200 /* Unmask interrupts */
2201 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2202 MVNETA_RX_INTR_MASK(rxq_number));
2203
2204 phy_start(pp->phy_dev);
2205 netif_tx_start_all_queues(pp->dev);
2206}
2207
2208static void mvneta_stop_dev(struct mvneta_port *pp)
2209{
2210 phy_stop(pp->phy_dev);
2211
2212 napi_disable(&pp->napi);
2213
2214 netif_carrier_off(pp->dev);
2215
2216 mvneta_port_down(pp);
2217 netif_tx_stop_all_queues(pp->dev);
2218
2219 /* Stop the port activity */
2220 mvneta_port_disable(pp);
2221
2222 /* Clear all ethernet port interrupts */
2223 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2224 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2225
2226 /* Mask all ethernet port interrupts */
2227 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2228 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2229 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2230
2231 mvneta_tx_reset(pp);
2232 mvneta_rx_reset(pp);
2233}
2234
2235/* tx timeout callback - display a message and stop/start the network device */
2236static void mvneta_tx_timeout(struct net_device *dev)
2237{
2238 struct mvneta_port *pp = netdev_priv(dev);
2239
2240 netdev_info(dev, "tx timeout\n");
2241 mvneta_stop_dev(pp);
2242 mvneta_start_dev(pp);
2243}
2244
2245/* Return positive if MTU is valid */
2246static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2247{
2248 if (mtu < 68) {
2249 netdev_err(dev, "cannot change mtu to less than 68\n");
2250 return -EINVAL;
2251 }
2252
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002253 /* 9676 == 9700 - 20 and rounding to 8 */
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002254 if (mtu > 9676) {
2255 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2256 mtu = 9676;
2257 }
2258
2259 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2260 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2261 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2262 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2263 }
2264
2265 return mtu;
2266}
2267
2268/* Change the device mtu */
2269static int mvneta_change_mtu(struct net_device *dev, int mtu)
2270{
2271 struct mvneta_port *pp = netdev_priv(dev);
2272 int ret;
2273
2274 mtu = mvneta_check_mtu_valid(dev, mtu);
2275 if (mtu < 0)
2276 return -EINVAL;
2277
2278 dev->mtu = mtu;
2279
2280 if (!netif_running(dev))
2281 return 0;
2282
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002283 /* The interface is running, so we have to force a
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002284 * reallocation of the RXQs
2285 */
2286 mvneta_stop_dev(pp);
2287
2288 mvneta_cleanup_txqs(pp);
2289 mvneta_cleanup_rxqs(pp);
2290
2291 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2292
2293 ret = mvneta_setup_rxqs(pp);
2294 if (ret) {
2295 netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
2296 return ret;
2297 }
2298
2299 mvneta_setup_txqs(pp);
2300
2301 mvneta_start_dev(pp);
2302 mvneta_port_up(pp);
2303
2304 return 0;
2305}
2306
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002307/* Get mac address */
2308static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2309{
2310 u32 mac_addr_l, mac_addr_h;
2311
2312 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2313 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2314 addr[0] = (mac_addr_h >> 24) & 0xFF;
2315 addr[1] = (mac_addr_h >> 16) & 0xFF;
2316 addr[2] = (mac_addr_h >> 8) & 0xFF;
2317 addr[3] = mac_addr_h & 0xFF;
2318 addr[4] = (mac_addr_l >> 8) & 0xFF;
2319 addr[5] = mac_addr_l & 0xFF;
2320}
2321
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002322/* Handle setting mac address */
2323static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2324{
2325 struct mvneta_port *pp = netdev_priv(dev);
2326 u8 *mac = addr + 2;
2327 int i;
2328
2329 if (netif_running(dev))
2330 return -EBUSY;
2331
2332 /* Remove previous address table entry */
2333 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2334
2335 /* Set new addr in hw */
2336 mvneta_mac_addr_set(pp, mac, rxq_def);
2337
2338 /* Set addr in the device */
2339 for (i = 0; i < ETH_ALEN; i++)
2340 dev->dev_addr[i] = mac[i];
2341
2342 return 0;
2343}
2344
2345static void mvneta_adjust_link(struct net_device *ndev)
2346{
2347 struct mvneta_port *pp = netdev_priv(ndev);
2348 struct phy_device *phydev = pp->phy_dev;
2349 int status_change = 0;
2350
2351 if (phydev->link) {
2352 if ((pp->speed != phydev->speed) ||
2353 (pp->duplex != phydev->duplex)) {
2354 u32 val;
2355
2356 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2357 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2358 MVNETA_GMAC_CONFIG_GMII_SPEED |
Thomas Petazzoni71408602013-09-04 16:21:18 +02002359 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2360 MVNETA_GMAC_AN_SPEED_EN |
2361 MVNETA_GMAC_AN_DUPLEX_EN);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002362
2363 if (phydev->duplex)
2364 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2365
2366 if (phydev->speed == SPEED_1000)
2367 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2368 else
2369 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2370
2371 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2372
2373 pp->duplex = phydev->duplex;
2374 pp->speed = phydev->speed;
2375 }
2376 }
2377
2378 if (phydev->link != pp->link) {
2379 if (!phydev->link) {
2380 pp->duplex = -1;
2381 pp->speed = 0;
2382 }
2383
2384 pp->link = phydev->link;
2385 status_change = 1;
2386 }
2387
2388 if (status_change) {
2389 if (phydev->link) {
2390 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2391 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2392 MVNETA_GMAC_FORCE_LINK_DOWN);
2393 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2394 mvneta_port_up(pp);
2395 netdev_info(pp->dev, "link up\n");
2396 } else {
2397 mvneta_port_down(pp);
2398 netdev_info(pp->dev, "link down\n");
2399 }
2400 }
2401}
2402
2403static int mvneta_mdio_probe(struct mvneta_port *pp)
2404{
2405 struct phy_device *phy_dev;
2406
2407 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2408 pp->phy_interface);
2409 if (!phy_dev) {
2410 netdev_err(pp->dev, "could not find the PHY\n");
2411 return -ENODEV;
2412 }
2413
2414 phy_dev->supported &= PHY_GBIT_FEATURES;
2415 phy_dev->advertising = phy_dev->supported;
2416
2417 pp->phy_dev = phy_dev;
2418 pp->link = 0;
2419 pp->duplex = 0;
2420 pp->speed = 0;
2421
2422 return 0;
2423}
2424
2425static void mvneta_mdio_remove(struct mvneta_port *pp)
2426{
2427 phy_disconnect(pp->phy_dev);
2428 pp->phy_dev = NULL;
2429}
2430
2431static int mvneta_open(struct net_device *dev)
2432{
2433 struct mvneta_port *pp = netdev_priv(dev);
2434 int ret;
2435
2436 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2437
2438 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2439
2440 ret = mvneta_setup_rxqs(pp);
2441 if (ret)
2442 return ret;
2443
2444 ret = mvneta_setup_txqs(pp);
2445 if (ret)
2446 goto err_cleanup_rxqs;
2447
2448 /* Connect to port interrupt line */
2449 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2450 MVNETA_DRIVER_NAME, pp);
2451 if (ret) {
2452 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2453 goto err_cleanup_txqs;
2454 }
2455
2456 /* In default link is down */
2457 netif_carrier_off(pp->dev);
2458
2459 ret = mvneta_mdio_probe(pp);
2460 if (ret < 0) {
2461 netdev_err(dev, "cannot probe MDIO bus\n");
2462 goto err_free_irq;
2463 }
2464
2465 mvneta_start_dev(pp);
2466
2467 return 0;
2468
2469err_free_irq:
2470 free_irq(pp->dev->irq, pp);
2471err_cleanup_txqs:
2472 mvneta_cleanup_txqs(pp);
2473err_cleanup_rxqs:
2474 mvneta_cleanup_rxqs(pp);
2475 return ret;
2476}
2477
2478/* Stop the port, free port interrupt line */
2479static int mvneta_stop(struct net_device *dev)
2480{
2481 struct mvneta_port *pp = netdev_priv(dev);
2482
2483 mvneta_stop_dev(pp);
2484 mvneta_mdio_remove(pp);
2485 free_irq(dev->irq, pp);
2486 mvneta_cleanup_rxqs(pp);
2487 mvneta_cleanup_txqs(pp);
2488 del_timer(&pp->tx_done_timer);
2489 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
2490
2491 return 0;
2492}
2493
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002494static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2495{
2496 struct mvneta_port *pp = netdev_priv(dev);
2497 int ret;
2498
2499 if (!pp->phy_dev)
2500 return -ENOTSUPP;
2501
2502 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2503 if (!ret)
2504 mvneta_adjust_link(dev);
2505
2506 return ret;
2507}
2508
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002509/* Ethtool methods */
2510
2511/* Get settings (phy address, speed) for ethtools */
2512int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2513{
2514 struct mvneta_port *pp = netdev_priv(dev);
2515
2516 if (!pp->phy_dev)
2517 return -ENODEV;
2518
2519 return phy_ethtool_gset(pp->phy_dev, cmd);
2520}
2521
2522/* Set settings (phy address, speed) for ethtools */
2523int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2524{
2525 struct mvneta_port *pp = netdev_priv(dev);
2526
2527 if (!pp->phy_dev)
2528 return -ENODEV;
2529
2530 return phy_ethtool_sset(pp->phy_dev, cmd);
2531}
2532
2533/* Set interrupt coalescing for ethtools */
2534static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2535 struct ethtool_coalesce *c)
2536{
2537 struct mvneta_port *pp = netdev_priv(dev);
2538 int queue;
2539
2540 for (queue = 0; queue < rxq_number; queue++) {
2541 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2542 rxq->time_coal = c->rx_coalesce_usecs;
2543 rxq->pkts_coal = c->rx_max_coalesced_frames;
2544 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2545 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2546 }
2547
2548 for (queue = 0; queue < txq_number; queue++) {
2549 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2550 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2551 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2552 }
2553
2554 return 0;
2555}
2556
2557/* get coalescing for ethtools */
2558static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2559 struct ethtool_coalesce *c)
2560{
2561 struct mvneta_port *pp = netdev_priv(dev);
2562
2563 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2564 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2565
2566 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2567 return 0;
2568}
2569
2570
2571static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2572 struct ethtool_drvinfo *drvinfo)
2573{
2574 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2575 sizeof(drvinfo->driver));
2576 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2577 sizeof(drvinfo->version));
2578 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2579 sizeof(drvinfo->bus_info));
2580}
2581
2582
2583static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2584 struct ethtool_ringparam *ring)
2585{
2586 struct mvneta_port *pp = netdev_priv(netdev);
2587
2588 ring->rx_max_pending = MVNETA_MAX_RXD;
2589 ring->tx_max_pending = MVNETA_MAX_TXD;
2590 ring->rx_pending = pp->rx_ring_size;
2591 ring->tx_pending = pp->tx_ring_size;
2592}
2593
2594static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2595 struct ethtool_ringparam *ring)
2596{
2597 struct mvneta_port *pp = netdev_priv(dev);
2598
2599 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2600 return -EINVAL;
2601 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2602 ring->rx_pending : MVNETA_MAX_RXD;
2603 pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
2604 ring->tx_pending : MVNETA_MAX_TXD;
2605
2606 if (netif_running(dev)) {
2607 mvneta_stop(dev);
2608 if (mvneta_open(dev)) {
2609 netdev_err(dev,
2610 "error on opening device after ring param change\n");
2611 return -ENOMEM;
2612 }
2613 }
2614
2615 return 0;
2616}
2617
2618static const struct net_device_ops mvneta_netdev_ops = {
2619 .ndo_open = mvneta_open,
2620 .ndo_stop = mvneta_stop,
2621 .ndo_start_xmit = mvneta_tx,
2622 .ndo_set_rx_mode = mvneta_set_rx_mode,
2623 .ndo_set_mac_address = mvneta_set_mac_addr,
2624 .ndo_change_mtu = mvneta_change_mtu,
2625 .ndo_tx_timeout = mvneta_tx_timeout,
2626 .ndo_get_stats64 = mvneta_get_stats64,
Thomas Petazzoni15f59452013-09-04 16:26:52 +02002627 .ndo_do_ioctl = mvneta_ioctl,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002628};
2629
2630const struct ethtool_ops mvneta_eth_tool_ops = {
2631 .get_link = ethtool_op_get_link,
2632 .get_settings = mvneta_ethtool_get_settings,
2633 .set_settings = mvneta_ethtool_set_settings,
2634 .set_coalesce = mvneta_ethtool_set_coalesce,
2635 .get_coalesce = mvneta_ethtool_get_coalesce,
2636 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2637 .get_ringparam = mvneta_ethtool_get_ringparam,
2638 .set_ringparam = mvneta_ethtool_set_ringparam,
2639};
2640
2641/* Initialize hw */
Greg KH03ce7582012-12-21 13:42:15 +00002642static int mvneta_init(struct mvneta_port *pp, int phy_addr)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002643{
2644 int queue;
2645
2646 /* Disable port */
2647 mvneta_port_disable(pp);
2648
2649 /* Set port default values */
2650 mvneta_defaults_set(pp);
2651
2652 pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
2653 GFP_KERNEL);
2654 if (!pp->txqs)
2655 return -ENOMEM;
2656
2657 /* Initialize TX descriptor rings */
2658 for (queue = 0; queue < txq_number; queue++) {
2659 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2660 txq->id = queue;
2661 txq->size = pp->tx_ring_size;
2662 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2663 }
2664
2665 pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
2666 GFP_KERNEL);
2667 if (!pp->rxqs) {
2668 kfree(pp->txqs);
2669 return -ENOMEM;
2670 }
2671
2672 /* Create Rx descriptor rings */
2673 for (queue = 0; queue < rxq_number; queue++) {
2674 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2675 rxq->id = queue;
2676 rxq->size = pp->rx_ring_size;
2677 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2678 rxq->time_coal = MVNETA_RX_COAL_USEC;
2679 }
2680
2681 return 0;
2682}
2683
Thomas Petazzoni70eeaf92012-11-19 14:40:02 +01002684static void mvneta_deinit(struct mvneta_port *pp)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002685{
2686 kfree(pp->txqs);
2687 kfree(pp->rxqs);
2688}
2689
2690/* platform glue : initialize decoding windows */
Greg KH03ce7582012-12-21 13:42:15 +00002691static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2692 const struct mbus_dram_target_info *dram)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002693{
2694 u32 win_enable;
2695 u32 win_protect;
2696 int i;
2697
2698 for (i = 0; i < 6; i++) {
2699 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2700 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2701
2702 if (i < 4)
2703 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2704 }
2705
2706 win_enable = 0x3f;
2707 win_protect = 0;
2708
2709 for (i = 0; i < dram->num_cs; i++) {
2710 const struct mbus_dram_window *cs = dram->cs + i;
2711 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2712 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2713
2714 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2715 (cs->size - 1) & 0xffff0000);
2716
2717 win_enable &= ~(1 << i);
2718 win_protect |= 3 << (2 * i);
2719 }
2720
2721 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2722}
2723
2724/* Power up the port */
Greg KH03ce7582012-12-21 13:42:15 +00002725static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002726{
2727 u32 val;
2728
2729 /* MAC Cause register should be cleared */
2730 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2731
2732 if (phy_mode == PHY_INTERFACE_MODE_SGMII)
2733 mvneta_port_sgmii_config(pp);
2734
2735 mvneta_gmac_rgmii_set(pp, 1);
2736
2737 /* Cancel Port Reset */
2738 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2739 val &= ~MVNETA_GMAC2_PORT_RESET;
2740 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
2741
2742 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2743 MVNETA_GMAC2_PORT_RESET) != 0)
2744 continue;
2745}
2746
2747/* Device initialization routine */
Greg KH03ce7582012-12-21 13:42:15 +00002748static int mvneta_probe(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002749{
2750 const struct mbus_dram_target_info *dram_target_info;
2751 struct device_node *dn = pdev->dev.of_node;
2752 struct device_node *phy_node;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01002753 u32 phy_addr;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002754 struct mvneta_port *pp;
2755 struct net_device *dev;
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002756 const char *dt_mac_addr;
2757 char hw_mac_addr[ETH_ALEN];
2758 const char *mac_from;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002759 int phy_mode;
2760 int err;
2761
Thomas Petazzoni6a20c172012-11-19 11:41:25 +01002762 /* Our multiqueue support is not complete, so for now, only
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002763 * allow the usage of the first RX queue
2764 */
2765 if (rxq_def != 0) {
2766 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2767 return -EINVAL;
2768 }
2769
Willy Tarreauee40a112013-04-11 23:00:37 +02002770 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002771 if (!dev)
2772 return -ENOMEM;
2773
2774 dev->irq = irq_of_parse_and_map(dn, 0);
2775 if (dev->irq == 0) {
2776 err = -EINVAL;
2777 goto err_free_netdev;
2778 }
2779
2780 phy_node = of_parse_phandle(dn, "phy", 0);
2781 if (!phy_node) {
2782 dev_err(&pdev->dev, "no associated PHY\n");
2783 err = -ENODEV;
2784 goto err_free_irq;
2785 }
2786
2787 phy_mode = of_get_phy_mode(dn);
2788 if (phy_mode < 0) {
2789 dev_err(&pdev->dev, "incorrect phy-mode\n");
2790 err = -EINVAL;
2791 goto err_free_irq;
2792 }
2793
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002794 dev->tx_queue_len = MVNETA_MAX_TXD;
2795 dev->watchdog_timeo = 5 * HZ;
2796 dev->netdev_ops = &mvneta_netdev_ops;
2797
2798 SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
2799
2800 pp = netdev_priv(dev);
2801
John Stultz827da442013-10-07 15:51:58 -07002802 u64_stats_init(&pp->tx_stats.syncp);
2803 u64_stats_init(&pp->rx_stats.syncp);
2804
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002805 pp->weight = MVNETA_RX_POLL_WEIGHT;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002806 pp->phy_node = phy_node;
2807 pp->phy_interface = phy_mode;
2808
Thomas Petazzoni189dd622012-11-19 14:15:25 +01002809 pp->clk = devm_clk_get(&pdev->dev, NULL);
2810 if (IS_ERR(pp->clk)) {
2811 err = PTR_ERR(pp->clk);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02002812 goto err_free_irq;
Thomas Petazzoni189dd622012-11-19 14:15:25 +01002813 }
2814
2815 clk_prepare_enable(pp->clk);
2816
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02002817 pp->base = of_iomap(dn, 0);
2818 if (pp->base == NULL) {
2819 err = -ENOMEM;
2820 goto err_clk;
2821 }
2822
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002823 dt_mac_addr = of_get_mac_address(dn);
Luka Perkov6c7a9a32013-10-30 00:10:01 +01002824 if (dt_mac_addr) {
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002825 mac_from = "device tree";
2826 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
2827 } else {
2828 mvneta_get_mac_addr(pp, hw_mac_addr);
2829 if (is_valid_ether_addr(hw_mac_addr)) {
2830 mac_from = "hardware";
2831 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
2832 } else {
2833 mac_from = "random";
2834 eth_hw_addr_random(dev);
2835 }
2836 }
2837
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002838 pp->tx_done_timer.data = (unsigned long)dev;
Arnaud Patard \(Rtp\)aded0952013-07-29 21:56:47 +02002839 pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
2840 init_timer(&pp->tx_done_timer);
2841 clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002842
2843 pp->tx_ring_size = MVNETA_MAX_TXD;
2844 pp->rx_ring_size = MVNETA_MAX_RXD;
2845
2846 pp->dev = dev;
2847 SET_NETDEV_DEV(dev, &pdev->dev);
2848
2849 err = mvneta_init(pp, phy_addr);
2850 if (err < 0) {
2851 dev_err(&pdev->dev, "can't init eth hal\n");
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02002852 goto err_unmap;
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002853 }
2854 mvneta_port_power_up(pp, phy_mode);
2855
2856 dram_target_info = mv_mbus_dram_info();
2857 if (dram_target_info)
2858 mvneta_conf_mbus_windows(pp, dram_target_info);
2859
2860 netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
2861
willy tarreaub50b72d2013-04-06 08:47:01 +00002862 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2863 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2864 dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2865 dev->priv_flags |= IFF_UNICAST_FLT;
2866
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002867 err = register_netdev(dev);
2868 if (err < 0) {
2869 dev_err(&pdev->dev, "failed to register\n");
2870 goto err_deinit;
2871 }
2872
Thomas Petazzoni8cc3e432013-06-04 04:52:23 +00002873 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
2874 dev->dev_addr);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002875
2876 platform_set_drvdata(pdev, pp->dev);
2877
2878 return 0;
2879
2880err_deinit:
2881 mvneta_deinit(pp);
2882err_unmap:
2883 iounmap(pp->base);
Arnaud Patard \(Rtp\)5445eaf2013-07-29 21:56:48 +02002884err_clk:
2885 clk_disable_unprepare(pp->clk);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002886err_free_irq:
2887 irq_dispose_mapping(dev->irq);
2888err_free_netdev:
2889 free_netdev(dev);
2890 return err;
2891}
2892
2893/* Device removal routine */
Greg KH03ce7582012-12-21 13:42:15 +00002894static int mvneta_remove(struct platform_device *pdev)
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002895{
2896 struct net_device *dev = platform_get_drvdata(pdev);
2897 struct mvneta_port *pp = netdev_priv(dev);
2898
2899 unregister_netdev(dev);
2900 mvneta_deinit(pp);
Thomas Petazzoni189dd622012-11-19 14:15:25 +01002901 clk_disable_unprepare(pp->clk);
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002902 iounmap(pp->base);
2903 irq_dispose_mapping(dev->irq);
2904 free_netdev(dev);
2905
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002906 return 0;
2907}
2908
2909static const struct of_device_id mvneta_match[] = {
2910 { .compatible = "marvell,armada-370-neta" },
2911 { }
2912};
2913MODULE_DEVICE_TABLE(of, mvneta_match);
2914
2915static struct platform_driver mvneta_driver = {
2916 .probe = mvneta_probe,
Greg KH03ce7582012-12-21 13:42:15 +00002917 .remove = mvneta_remove,
Thomas Petazzonic5aff182012-08-17 14:04:28 +03002918 .driver = {
2919 .name = MVNETA_DRIVER_NAME,
2920 .of_match_table = mvneta_match,
2921 },
2922};
2923
2924module_platform_driver(mvneta_driver);
2925
2926MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
2927MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
2928MODULE_LICENSE("GPL");
2929
2930module_param(rxq_number, int, S_IRUGO);
2931module_param(txq_number, int, S_IRUGO);
2932
2933module_param(rxq_def, int, S_IRUGO);