blob: ed4abb3d7e23ef9dfcd46310dc9a8af0b1c615ac [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
28#include <linux/phy.h>
29#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020030#include <linux/hrtimer.h>
31#include <linux/ktime.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030032#include <uapi/linux/ppp_defs.h>
33#include <net/ip.h>
34#include <net/ipv6.h>
35
36/* RX Fifo Registers */
37#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
38#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
39#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
40#define MVPP2_RX_FIFO_INIT_REG 0x64
41
42/* RX DMA Top Registers */
43#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
44#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
45#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
46#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
47#define MVPP2_POOL_BUF_SIZE_OFFSET 5
48#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
49#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
50#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
51#define MVPP2_RXQ_POOL_SHORT_OFFS 20
52#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000
53#define MVPP2_RXQ_POOL_LONG_OFFS 24
54#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000
55#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
56#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
57#define MVPP2_RXQ_DISABLE_MASK BIT(31)
58
59/* Parser Registers */
60#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
61#define MVPP2_PRS_PORT_LU_MAX 0xf
62#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
63#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
64#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
65#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
66#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
67#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
68#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
69#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
70#define MVPP2_PRS_TCAM_IDX_REG 0x1100
71#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
72#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
73#define MVPP2_PRS_SRAM_IDX_REG 0x1200
74#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
75#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
76#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
77
78/* Classifier Registers */
79#define MVPP2_CLS_MODE_REG 0x1800
80#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
81#define MVPP2_CLS_PORT_WAY_REG 0x1810
82#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
83#define MVPP2_CLS_LKP_INDEX_REG 0x1814
84#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
85#define MVPP2_CLS_LKP_TBL_REG 0x1818
86#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
87#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
88#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
89#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
90#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
91#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
92#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
93#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
94#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
95#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
96#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
97#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
98
99/* Descriptor Manager Top Registers */
100#define MVPP2_RXQ_NUM_REG 0x2040
101#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
102#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
103#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
104#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
105#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
106#define MVPP2_RXQ_NUM_NEW_OFFSET 16
107#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
108#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
109#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
110#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
111#define MVPP2_RXQ_THRESH_REG 0x204c
112#define MVPP2_OCCUPIED_THRESH_OFFSET 0
113#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
114#define MVPP2_RXQ_INDEX_REG 0x2050
115#define MVPP2_TXQ_NUM_REG 0x2080
116#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
117#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
118#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
119#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
120#define MVPP2_TXQ_THRESH_REG 0x2094
121#define MVPP2_TRANSMITTED_THRESH_OFFSET 16
122#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000
123#define MVPP2_TXQ_INDEX_REG 0x2098
124#define MVPP2_TXQ_PREF_BUF_REG 0x209c
125#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
126#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
127#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
128#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
129#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
130#define MVPP2_TXQ_PENDING_REG 0x20a0
131#define MVPP2_TXQ_PENDING_MASK 0x3fff
132#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
133#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
134#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
135#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
136#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
137#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
138#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
139#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
140#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
141#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
142#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
143#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
144#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
145#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
146#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
147#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
148
149/* MBUS bridge registers */
150#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
151#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
152#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
153#define MVPP2_BASE_ADDR_ENABLE 0x4060
154
155/* Interrupt Cause and Mask registers */
156#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100157#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300158#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
159#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
160#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
161#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
162#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
163#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
164#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
165#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
166#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
167#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
168#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
169#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
170#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
171#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
172#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
173#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
174#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
175#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
176#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
177
178/* Buffer Manager registers */
179#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
180#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
181#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
182#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
183#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
184#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
185#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
186#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
187#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
188#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
189#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
190#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
191#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
192#define MVPP2_BM_START_MASK BIT(0)
193#define MVPP2_BM_STOP_MASK BIT(1)
194#define MVPP2_BM_STATE_MASK BIT(4)
195#define MVPP2_BM_LOW_THRESH_OFFS 8
196#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
197#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
198 MVPP2_BM_LOW_THRESH_OFFS)
199#define MVPP2_BM_HIGH_THRESH_OFFS 16
200#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
201#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
202 MVPP2_BM_HIGH_THRESH_OFFS)
203#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
204#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
205#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
206#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
207#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
208#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
209#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
210#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
211#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
212#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
213#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
214#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
215#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
216#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
217#define MVPP2_BM_VIRT_RLS_REG 0x64c0
218#define MVPP2_BM_MC_RLS_REG 0x64c4
219#define MVPP2_BM_MC_ID_MASK 0xfff
220#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12)
221
222/* TX Scheduler registers */
223#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
224#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
225#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
226#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
227#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
228#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
229#define MVPP2_TXP_SCHED_MTU_REG 0x801c
230#define MVPP2_TXP_MTU_MAX 0x7FFFF
231#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
232#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
233#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
234#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
235#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
236#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
237#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
238#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
239#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
240#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
241#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
242#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
243#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
244#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
245
246/* TX general registers */
247#define MVPP2_TX_SNOOP_REG 0x8800
248#define MVPP2_TX_PORT_FLUSH_REG 0x8810
249#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
250
251/* LMS registers */
252#define MVPP2_SRC_ADDR_MIDDLE 0x24
253#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300254#define MVPP2_PHY_AN_CFG0_REG 0x34
255#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300256#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100257#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300258
259/* Per-port registers */
260#define MVPP2_GMAC_CTRL_0_REG 0x0
261#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
262#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
263#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
264#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
265#define MVPP2_GMAC_CTRL_1_REG 0x4
Marcin Wojtasb5c0a802014-07-21 13:48:11 -0300266#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300267#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
268#define MVPP2_GMAC_PCS_LB_EN_BIT 6
269#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
270#define MVPP2_GMAC_SA_LOW_OFFS 7
271#define MVPP2_GMAC_CTRL_2_REG 0x8
272#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
273#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
274#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
275#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
276#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
277#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
278#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
279#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
280#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
281#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
Marcin Wojtas08a23752014-07-21 13:48:12 -0300282#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300283#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
284#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
285#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
286#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
287#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
288#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
289 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
290
291#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
292
293/* Descriptor ring Macros */
294#define MVPP2_QUEUE_NEXT_DESC(q, index) \
295 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
296
297/* Various constants */
298
299/* Coalescing */
300#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200301#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Marcin Wojtas3f518502014-07-10 16:52:13 -0300302#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100
304
305/* The two bytes Marvell header. Either contains a special value used
306 * by Marvell switches when a specific hardware mode is enabled (not
307 * supported by this driver) or is filled automatically by zeroes on
308 * the RX side. Those two bytes being at the front of the Ethernet
309 * header, they allow to have the IP header aligned on a 4 bytes
310 * boundary automatically: the hardware skips those two bytes on its
311 * own.
312 */
313#define MVPP2_MH_SIZE 2
314#define MVPP2_ETH_TYPE_LEN 2
315#define MVPP2_PPPOE_HDR_SIZE 8
316#define MVPP2_VLAN_TAG_LEN 4
317
318/* Lbtd 802.3 type */
319#define MVPP2_IP_LBDT_TYPE 0xfffa
320
Marcin Wojtas3f518502014-07-10 16:52:13 -0300321#define MVPP2_TX_CSUM_MAX_SIZE 9800
322
323/* Timeout constants */
324#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
325#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
326
327#define MVPP2_TX_MTU_MAX 0x7ffff
328
329/* Maximum number of T-CONTs of PON port */
330#define MVPP2_MAX_TCONT 16
331
332/* Maximum number of supported ports */
333#define MVPP2_MAX_PORTS 4
334
335/* Maximum number of TXQs used by single port */
336#define MVPP2_MAX_TXQ 8
337
338/* Maximum number of RXQs used by single port */
339#define MVPP2_MAX_RXQ 8
340
341/* Dfault number of RXQs in use */
342#define MVPP2_DEFAULT_RXQ 4
343
344/* Total number of RXQs available to all ports */
345#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
346
347/* Max number of Rx descriptors */
348#define MVPP2_MAX_RXD 128
349
350/* Max number of Tx descriptors */
351#define MVPP2_MAX_TXD 1024
352
353/* Amount of Tx descriptors that can be reserved at once by CPU */
354#define MVPP2_CPU_DESC_CHUNK 64
355
356/* Max number of Tx descriptors in each aggregated queue */
357#define MVPP2_AGGR_TXQ_SIZE 256
358
359/* Descriptor aligned size */
360#define MVPP2_DESC_ALIGNED_SIZE 32
361
362/* Descriptor alignment mask */
363#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
364
365/* RX FIFO constants */
366#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
367#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
368#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
369
370/* RX buffer constants */
371#define MVPP2_SKB_SHINFO_SIZE \
372 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
373
374#define MVPP2_RX_PKT_SIZE(mtu) \
375 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800376 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300377
378#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
379#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
380#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
381 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
382
383#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
384
385/* IPv6 max L3 address size */
386#define MVPP2_MAX_L3_ADDR_SIZE 16
387
388/* Port flags */
389#define MVPP2_F_LOOPBACK BIT(0)
390
391/* Marvell tag types */
392enum mvpp2_tag_type {
393 MVPP2_TAG_TYPE_NONE = 0,
394 MVPP2_TAG_TYPE_MH = 1,
395 MVPP2_TAG_TYPE_DSA = 2,
396 MVPP2_TAG_TYPE_EDSA = 3,
397 MVPP2_TAG_TYPE_VLAN = 4,
398 MVPP2_TAG_TYPE_LAST = 5
399};
400
401/* Parser constants */
402#define MVPP2_PRS_TCAM_SRAM_SIZE 256
403#define MVPP2_PRS_TCAM_WORDS 6
404#define MVPP2_PRS_SRAM_WORDS 4
405#define MVPP2_PRS_FLOW_ID_SIZE 64
406#define MVPP2_PRS_FLOW_ID_MASK 0x3f
407#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
408#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
409#define MVPP2_PRS_IPV4_HEAD 0x40
410#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
411#define MVPP2_PRS_IPV4_MC 0xe0
412#define MVPP2_PRS_IPV4_MC_MASK 0xf0
413#define MVPP2_PRS_IPV4_BC_MASK 0xff
414#define MVPP2_PRS_IPV4_IHL 0x5
415#define MVPP2_PRS_IPV4_IHL_MASK 0xf
416#define MVPP2_PRS_IPV6_MC 0xff
417#define MVPP2_PRS_IPV6_MC_MASK 0xff
418#define MVPP2_PRS_IPV6_HOP_MASK 0xff
419#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
420#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
421#define MVPP2_PRS_DBL_VLANS_MAX 100
422
423/* Tcam structure:
424 * - lookup ID - 4 bits
425 * - port ID - 1 byte
426 * - additional information - 1 byte
427 * - header data - 8 bytes
428 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
429 */
430#define MVPP2_PRS_AI_BITS 8
431#define MVPP2_PRS_PORT_MASK 0xff
432#define MVPP2_PRS_LU_MASK 0xf
433#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
434 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
435#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
436 (((offs) * 2) - ((offs) % 2) + 2)
437#define MVPP2_PRS_TCAM_AI_BYTE 16
438#define MVPP2_PRS_TCAM_PORT_BYTE 17
439#define MVPP2_PRS_TCAM_LU_BYTE 20
440#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
441#define MVPP2_PRS_TCAM_INV_WORD 5
442/* Tcam entries ID */
443#define MVPP2_PE_DROP_ALL 0
444#define MVPP2_PE_FIRST_FREE_TID 1
445#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
446#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
447#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
448#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
449#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
450#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
451#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
452#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
453#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
454#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
455#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
456#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
457#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
458#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
459#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
460#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
461#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
462#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
463#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
464#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
465#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
466#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
467#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
468#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
469#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
470
471/* Sram structure
472 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
473 */
474#define MVPP2_PRS_SRAM_RI_OFFS 0
475#define MVPP2_PRS_SRAM_RI_WORD 0
476#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
477#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
478#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
479#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
480#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
481#define MVPP2_PRS_SRAM_UDF_OFFS 73
482#define MVPP2_PRS_SRAM_UDF_BITS 8
483#define MVPP2_PRS_SRAM_UDF_MASK 0xff
484#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
485#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
486#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
487#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
488#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
489#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
490#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
491#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
492#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
493#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
494#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
495#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
496#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
497#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
498#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
499#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
500#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
501#define MVPP2_PRS_SRAM_AI_OFFS 90
502#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
503#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
504#define MVPP2_PRS_SRAM_AI_MASK 0xff
505#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
506#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
507#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
508#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
509
510/* Sram result info bits assignment */
511#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
512#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100513#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
514#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300515#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
516#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
517#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
518#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
519#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100520#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
521#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300522#define MVPP2_PRS_RI_L2_MCAST BIT(9)
523#define MVPP2_PRS_RI_L2_BCAST BIT(10)
524#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100525#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
526#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300527#define MVPP2_PRS_RI_L3_IP4 BIT(12)
528#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
529#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
530#define MVPP2_PRS_RI_L3_IP6 BIT(14)
531#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
532#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100533#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
534#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300535#define MVPP2_PRS_RI_L3_MCAST BIT(15)
536#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
537#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
538#define MVPP2_PRS_RI_UDF3_MASK 0x300000
539#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
540#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
541#define MVPP2_PRS_RI_L4_TCP BIT(22)
542#define MVPP2_PRS_RI_L4_UDP BIT(23)
543#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
544#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
545#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
546#define MVPP2_PRS_RI_DROP_MASK 0x80000000
547
548/* Sram additional info bits assignment */
549#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
550#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
551#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
552#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
553#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
554#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
555#define MVPP2_PRS_SINGLE_VLAN_AI 0
556#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
557
558/* DSA/EDSA type */
559#define MVPP2_PRS_TAGGED true
560#define MVPP2_PRS_UNTAGGED false
561#define MVPP2_PRS_EDSA true
562#define MVPP2_PRS_DSA false
563
564/* MAC entries, shadow udf */
565enum mvpp2_prs_udf {
566 MVPP2_PRS_UDF_MAC_DEF,
567 MVPP2_PRS_UDF_MAC_RANGE,
568 MVPP2_PRS_UDF_L2_DEF,
569 MVPP2_PRS_UDF_L2_DEF_COPY,
570 MVPP2_PRS_UDF_L2_USER,
571};
572
573/* Lookup ID */
574enum mvpp2_prs_lookup {
575 MVPP2_PRS_LU_MH,
576 MVPP2_PRS_LU_MAC,
577 MVPP2_PRS_LU_DSA,
578 MVPP2_PRS_LU_VLAN,
579 MVPP2_PRS_LU_L2,
580 MVPP2_PRS_LU_PPPOE,
581 MVPP2_PRS_LU_IP4,
582 MVPP2_PRS_LU_IP6,
583 MVPP2_PRS_LU_FLOWS,
584 MVPP2_PRS_LU_LAST,
585};
586
587/* L3 cast enum */
588enum mvpp2_prs_l3_cast {
589 MVPP2_PRS_L3_UNI_CAST,
590 MVPP2_PRS_L3_MULTI_CAST,
591 MVPP2_PRS_L3_BROAD_CAST
592};
593
594/* Classifier constants */
595#define MVPP2_CLS_FLOWS_TBL_SIZE 512
596#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
597#define MVPP2_CLS_LKP_TBL_SIZE 64
598
599/* BM constants */
600#define MVPP2_BM_POOLS_NUM 8
601#define MVPP2_BM_LONG_BUF_NUM 1024
602#define MVPP2_BM_SHORT_BUF_NUM 2048
603#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
604#define MVPP2_BM_POOL_PTR_ALIGN 128
605#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
606#define MVPP2_BM_SWF_SHORT_POOL 3
607
608/* BM cookie (32 bits) definition */
609#define MVPP2_BM_COOKIE_POOL_OFFS 8
610#define MVPP2_BM_COOKIE_CPU_OFFS 24
611
612/* BM short pool packet size
613 * These value assure that for SWF the total number
614 * of bytes allocated for each buffer will be 512
615 */
616#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
617
618enum mvpp2_bm_type {
619 MVPP2_BM_FREE,
620 MVPP2_BM_SWF_LONG,
621 MVPP2_BM_SWF_SHORT
622};
623
624/* Definitions */
625
626/* Shared Packet Processor resources */
627struct mvpp2 {
628 /* Shared registers' base addresses */
629 void __iomem *base;
630 void __iomem *lms_base;
631
632 /* Common clocks */
633 struct clk *pp_clk;
634 struct clk *gop_clk;
635
636 /* List of pointers to port structures */
637 struct mvpp2_port **port_list;
638
639 /* Aggregated TXQs */
640 struct mvpp2_tx_queue *aggr_txqs;
641
642 /* BM pools */
643 struct mvpp2_bm_pool *bm_pools;
644
645 /* PRS shadow table */
646 struct mvpp2_prs_shadow *prs_shadow;
647 /* PRS auxiliary table for double vlan entries control */
648 bool *prs_double_vlans;
649
650 /* Tclk value */
651 u32 tclk;
652};
653
654struct mvpp2_pcpu_stats {
655 struct u64_stats_sync syncp;
656 u64 rx_packets;
657 u64 rx_bytes;
658 u64 tx_packets;
659 u64 tx_bytes;
660};
661
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200662/* Per-CPU port control */
663struct mvpp2_port_pcpu {
664 struct hrtimer tx_done_timer;
665 bool timer_scheduled;
666 /* Tasklet for egress finalization */
667 struct tasklet_struct tx_done_tasklet;
668};
669
Marcin Wojtas3f518502014-07-10 16:52:13 -0300670struct mvpp2_port {
671 u8 id;
672
673 int irq;
674
675 struct mvpp2 *priv;
676
677 /* Per-port registers' base address */
678 void __iomem *base;
679
680 struct mvpp2_rx_queue **rxqs;
681 struct mvpp2_tx_queue **txqs;
682 struct net_device *dev;
683
684 int pkt_size;
685
686 u32 pending_cause_rx;
687 struct napi_struct napi;
688
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200689 /* Per-CPU port control */
690 struct mvpp2_port_pcpu __percpu *pcpu;
691
Marcin Wojtas3f518502014-07-10 16:52:13 -0300692 /* Flags */
693 unsigned long flags;
694
695 u16 tx_ring_size;
696 u16 rx_ring_size;
697 struct mvpp2_pcpu_stats __percpu *stats;
698
Marcin Wojtas3f518502014-07-10 16:52:13 -0300699 phy_interface_t phy_interface;
700 struct device_node *phy_node;
701 unsigned int link;
702 unsigned int duplex;
703 unsigned int speed;
704
705 struct mvpp2_bm_pool *pool_long;
706 struct mvpp2_bm_pool *pool_short;
707
708 /* Index of first port's physical RXQ */
709 u8 first_rxq;
710};
711
712/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
713 * layout of the transmit and reception DMA descriptors, and their
714 * layout is therefore defined by the hardware design
715 */
716
717#define MVPP2_TXD_L3_OFF_SHIFT 0
718#define MVPP2_TXD_IP_HLEN_SHIFT 8
719#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
720#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
721#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
722#define MVPP2_TXD_PADDING_DISABLE BIT(23)
723#define MVPP2_TXD_L4_UDP BIT(24)
724#define MVPP2_TXD_L3_IP6 BIT(26)
725#define MVPP2_TXD_L_DESC BIT(28)
726#define MVPP2_TXD_F_DESC BIT(29)
727
728#define MVPP2_RXD_ERR_SUMMARY BIT(15)
729#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
730#define MVPP2_RXD_ERR_CRC 0x0
731#define MVPP2_RXD_ERR_OVERRUN BIT(13)
732#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
733#define MVPP2_RXD_BM_POOL_ID_OFFS 16
734#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
735#define MVPP2_RXD_HWF_SYNC BIT(21)
736#define MVPP2_RXD_L4_CSUM_OK BIT(22)
737#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
738#define MVPP2_RXD_L4_TCP BIT(25)
739#define MVPP2_RXD_L4_UDP BIT(26)
740#define MVPP2_RXD_L3_IP4 BIT(28)
741#define MVPP2_RXD_L3_IP6 BIT(30)
742#define MVPP2_RXD_BUF_HDR BIT(31)
743
744struct mvpp2_tx_desc {
745 u32 command; /* Options used by HW for packet transmitting.*/
746 u8 packet_offset; /* the offset from the buffer beginning */
747 u8 phys_txq; /* destination queue ID */
748 u16 data_size; /* data size of transmitted packet in bytes */
749 u32 buf_phys_addr; /* physical addr of transmitted buffer */
750 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
751 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
752 u32 reserved2; /* reserved (for future use) */
753};
754
755struct mvpp2_rx_desc {
756 u32 status; /* info about received packet */
757 u16 reserved1; /* parser_info (for future use, PnC) */
758 u16 data_size; /* size of received packet in bytes */
759 u32 buf_phys_addr; /* physical address of the buffer */
760 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
761 u16 reserved2; /* gem_port_id (for future use, PON) */
762 u16 reserved3; /* csum_l4 (for future use, PnC) */
763 u8 reserved4; /* bm_qset (for future use, BM) */
764 u8 reserved5;
765 u16 reserved6; /* classify_info (for future use, PnC) */
766 u32 reserved7; /* flow_id (for future use, PnC) */
767 u32 reserved8;
768};
769
Thomas Petazzoni83544912016-12-21 11:28:49 +0100770struct mvpp2_txq_pcpu_buf {
771 /* Transmitted SKB */
772 struct sk_buff *skb;
773
774 /* Physical address of transmitted buffer */
775 dma_addr_t phys;
776
777 /* Size transmitted */
778 size_t size;
779};
780
Marcin Wojtas3f518502014-07-10 16:52:13 -0300781/* Per-CPU Tx queue control */
782struct mvpp2_txq_pcpu {
783 int cpu;
784
785 /* Number of Tx DMA descriptors in the descriptor ring */
786 int size;
787
788 /* Number of currently used Tx DMA descriptor in the
789 * descriptor ring
790 */
791 int count;
792
793 /* Number of Tx DMA descriptors reserved for each CPU */
794 int reserved_num;
795
Thomas Petazzoni83544912016-12-21 11:28:49 +0100796 /* Infos about transmitted buffers */
797 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +0200798
Marcin Wojtas3f518502014-07-10 16:52:13 -0300799 /* Index of last TX DMA descriptor that was inserted */
800 int txq_put_index;
801
802 /* Index of the TX DMA descriptor to be cleaned up */
803 int txq_get_index;
804};
805
806struct mvpp2_tx_queue {
807 /* Physical number of this Tx queue */
808 u8 id;
809
810 /* Logical number of this Tx queue */
811 u8 log_id;
812
813 /* Number of Tx DMA descriptors in the descriptor ring */
814 int size;
815
816 /* Number of currently used Tx DMA descriptor in the descriptor ring */
817 int count;
818
819 /* Per-CPU control of physical Tx queues */
820 struct mvpp2_txq_pcpu __percpu *pcpu;
821
Marcin Wojtas3f518502014-07-10 16:52:13 -0300822 u32 done_pkts_coal;
823
824 /* Virtual address of thex Tx DMA descriptors array */
825 struct mvpp2_tx_desc *descs;
826
827 /* DMA address of the Tx DMA descriptors array */
828 dma_addr_t descs_phys;
829
830 /* Index of the last Tx DMA descriptor */
831 int last_desc;
832
833 /* Index of the next Tx DMA descriptor to process */
834 int next_desc_to_proc;
835};
836
837struct mvpp2_rx_queue {
838 /* RX queue number, in the range 0-31 for physical RXQs */
839 u8 id;
840
841 /* Num of rx descriptors in the rx descriptor ring */
842 int size;
843
844 u32 pkts_coal;
845 u32 time_coal;
846
847 /* Virtual address of the RX DMA descriptors array */
848 struct mvpp2_rx_desc *descs;
849
850 /* DMA address of the RX DMA descriptors array */
851 dma_addr_t descs_phys;
852
853 /* Index of the last RX DMA descriptor */
854 int last_desc;
855
856 /* Index of the next RX DMA descriptor to process */
857 int next_desc_to_proc;
858
859 /* ID of port to which physical RXQ is mapped */
860 int port;
861
862 /* Port's logic RXQ number to which physical RXQ is mapped */
863 int logic_rxq;
864};
865
866union mvpp2_prs_tcam_entry {
867 u32 word[MVPP2_PRS_TCAM_WORDS];
868 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
869};
870
871union mvpp2_prs_sram_entry {
872 u32 word[MVPP2_PRS_SRAM_WORDS];
873 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
874};
875
876struct mvpp2_prs_entry {
877 u32 index;
878 union mvpp2_prs_tcam_entry tcam;
879 union mvpp2_prs_sram_entry sram;
880};
881
882struct mvpp2_prs_shadow {
883 bool valid;
884 bool finish;
885
886 /* Lookup ID */
887 int lu;
888
889 /* User defined offset */
890 int udf;
891
892 /* Result info */
893 u32 ri;
894 u32 ri_mask;
895};
896
897struct mvpp2_cls_flow_entry {
898 u32 index;
899 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
900};
901
902struct mvpp2_cls_lookup_entry {
903 u32 lkpid;
904 u32 way;
905 u32 data;
906};
907
908struct mvpp2_bm_pool {
909 /* Pool number in the range 0-7 */
910 int id;
911 enum mvpp2_bm_type type;
912
913 /* Buffer Pointers Pool External (BPPE) size */
914 int size;
915 /* Number of buffers for this pool */
916 int buf_num;
917 /* Pool buffer size */
918 int buf_size;
919 /* Packet size */
920 int pkt_size;
921
922 /* BPPE virtual base address */
923 u32 *virt_addr;
924 /* BPPE physical base address */
925 dma_addr_t phys_addr;
926
927 /* Ports using BM pool */
928 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300929};
930
931struct mvpp2_buff_hdr {
932 u32 next_buff_phys_addr;
933 u32 next_buff_virt_addr;
934 u16 byte_count;
935 u16 info;
936 u8 reserved1; /* bm_qset (for future use, BM) */
937};
938
939/* Buffer header info bits */
940#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
941#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
942#define MVPP2_B_HDR_INFO_LAST_OFFS 12
943#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
944#define MVPP2_B_HDR_INFO_IS_LAST(info) \
945 ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
946
947/* Static declaractions */
948
949/* Number of RXQs used by single port */
950static int rxq_number = MVPP2_DEFAULT_RXQ;
951/* Number of TXQs used by single port */
952static int txq_number = MVPP2_MAX_TXQ;
953
954#define MVPP2_DRIVER_NAME "mvpp2"
955#define MVPP2_DRIVER_VERSION "1.0"
956
957/* Utility/helper methods */
958
959static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
960{
961 writel(data, priv->base + offset);
962}
963
964static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
965{
966 return readl(priv->base + offset);
967}
968
969static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
970{
971 txq_pcpu->txq_get_index++;
972 if (txq_pcpu->txq_get_index == txq_pcpu->size)
973 txq_pcpu->txq_get_index = 0;
974}
975
976static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +0200977 struct sk_buff *skb,
978 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300979{
Thomas Petazzoni83544912016-12-21 11:28:49 +0100980 struct mvpp2_txq_pcpu_buf *tx_buf =
981 txq_pcpu->buffs + txq_pcpu->txq_put_index;
982 tx_buf->skb = skb;
983 tx_buf->size = tx_desc->data_size;
Thomas Petazzoni239a3b62017-02-21 11:28:01 +0100984 tx_buf->phys = tx_desc->buf_phys_addr + tx_desc->packet_offset;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300985 txq_pcpu->txq_put_index++;
986 if (txq_pcpu->txq_put_index == txq_pcpu->size)
987 txq_pcpu->txq_put_index = 0;
988}
989
990/* Get number of physical egress port */
991static inline int mvpp2_egress_port(struct mvpp2_port *port)
992{
993 return MVPP2_MAX_TCONT + port->id;
994}
995
996/* Get number of physical TXQ */
997static inline int mvpp2_txq_phys(int port, int txq)
998{
999 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1000}
1001
1002/* Parser configuration routines */
1003
1004/* Update parser tcam and sram hw entries */
1005static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1006{
1007 int i;
1008
1009 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1010 return -EINVAL;
1011
1012 /* Clear entry invalidation bit */
1013 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1014
1015 /* Write tcam index - indirect access */
1016 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1017 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1018 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1019
1020 /* Write sram index - indirect access */
1021 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1022 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1023 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1024
1025 return 0;
1026}
1027
1028/* Read tcam entry from hw */
1029static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1030{
1031 int i;
1032
1033 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1034 return -EINVAL;
1035
1036 /* Write tcam index - indirect access */
1037 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1038
1039 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1040 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1041 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1042 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1043
1044 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1045 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1046
1047 /* Write sram index - indirect access */
1048 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1049 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1050 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1051
1052 return 0;
1053}
1054
1055/* Invalidate tcam hw entry */
1056static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1057{
1058 /* Write index - indirect access */
1059 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1060 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1061 MVPP2_PRS_TCAM_INV_MASK);
1062}
1063
1064/* Enable shadow table entry and set its lookup ID */
1065static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1066{
1067 priv->prs_shadow[index].valid = true;
1068 priv->prs_shadow[index].lu = lu;
1069}
1070
1071/* Update ri fields in shadow table entry */
1072static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1073 unsigned int ri, unsigned int ri_mask)
1074{
1075 priv->prs_shadow[index].ri_mask = ri_mask;
1076 priv->prs_shadow[index].ri = ri;
1077}
1078
1079/* Update lookup field in tcam sw entry */
1080static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1081{
1082 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1083
1084 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1085 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1086}
1087
1088/* Update mask for single port in tcam sw entry */
1089static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1090 unsigned int port, bool add)
1091{
1092 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1093
1094 if (add)
1095 pe->tcam.byte[enable_off] &= ~(1 << port);
1096 else
1097 pe->tcam.byte[enable_off] |= 1 << port;
1098}
1099
1100/* Update port map in tcam sw entry */
1101static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1102 unsigned int ports)
1103{
1104 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1105 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1106
1107 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1108 pe->tcam.byte[enable_off] &= ~port_mask;
1109 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1110}
1111
1112/* Obtain port map from tcam sw entry */
1113static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1114{
1115 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1116
1117 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1118}
1119
1120/* Set byte of data and its enable bits in tcam sw entry */
1121static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1122 unsigned int offs, unsigned char byte,
1123 unsigned char enable)
1124{
1125 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1126 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1127}
1128
1129/* Get byte of data and its enable bits from tcam sw entry */
1130static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1131 unsigned int offs, unsigned char *byte,
1132 unsigned char *enable)
1133{
1134 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1135 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1136}
1137
1138/* Compare tcam data bytes with a pattern */
1139static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1140 u16 data)
1141{
1142 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1143 u16 tcam_data;
1144
1145 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1146 if (tcam_data != data)
1147 return false;
1148 return true;
1149}
1150
1151/* Update ai bits in tcam sw entry */
1152static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1153 unsigned int bits, unsigned int enable)
1154{
1155 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1156
1157 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1158
1159 if (!(enable & BIT(i)))
1160 continue;
1161
1162 if (bits & BIT(i))
1163 pe->tcam.byte[ai_idx] |= 1 << i;
1164 else
1165 pe->tcam.byte[ai_idx] &= ~(1 << i);
1166 }
1167
1168 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1169}
1170
1171/* Get ai bits from tcam sw entry */
1172static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1173{
1174 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1175}
1176
1177/* Set ethertype in tcam sw entry */
1178static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1179 unsigned short ethertype)
1180{
1181 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1182 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1183}
1184
1185/* Set bits in sram sw entry */
1186static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1187 int val)
1188{
1189 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1190}
1191
1192/* Clear bits in sram sw entry */
1193static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1194 int val)
1195{
1196 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1197}
1198
1199/* Update ri bits in sram sw entry */
1200static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1201 unsigned int bits, unsigned int mask)
1202{
1203 unsigned int i;
1204
1205 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1206 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1207
1208 if (!(mask & BIT(i)))
1209 continue;
1210
1211 if (bits & BIT(i))
1212 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1213 else
1214 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1215
1216 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1217 }
1218}
1219
1220/* Obtain ri bits from sram sw entry */
1221static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1222{
1223 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1224}
1225
1226/* Update ai bits in sram sw entry */
1227static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1228 unsigned int bits, unsigned int mask)
1229{
1230 unsigned int i;
1231 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1232
1233 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1234
1235 if (!(mask & BIT(i)))
1236 continue;
1237
1238 if (bits & BIT(i))
1239 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1240 else
1241 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1242
1243 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1244 }
1245}
1246
1247/* Read ai bits from sram sw entry */
1248static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1249{
1250 u8 bits;
1251 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1252 int ai_en_off = ai_off + 1;
1253 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1254
1255 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1256 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1257
1258 return bits;
1259}
1260
1261/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1262 * lookup interation
1263 */
1264static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1265 unsigned int lu)
1266{
1267 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1268
1269 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1270 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1271 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1272}
1273
1274/* In the sram sw entry set sign and value of the next lookup offset
1275 * and the offset value generated to the classifier
1276 */
1277static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1278 unsigned int op)
1279{
1280 /* Set sign */
1281 if (shift < 0) {
1282 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1283 shift = 0 - shift;
1284 } else {
1285 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1286 }
1287
1288 /* Set value */
1289 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1290 (unsigned char)shift;
1291
1292 /* Reset and set operation */
1293 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1294 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1295 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1296
1297 /* Set base offset as current */
1298 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1299}
1300
1301/* In the sram sw entry set sign and value of the user defined offset
1302 * generated to the classifier
1303 */
1304static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1305 unsigned int type, int offset,
1306 unsigned int op)
1307{
1308 /* Set sign */
1309 if (offset < 0) {
1310 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1311 offset = 0 - offset;
1312 } else {
1313 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1314 }
1315
1316 /* Set value */
1317 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1318 MVPP2_PRS_SRAM_UDF_MASK);
1319 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1320 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1321 MVPP2_PRS_SRAM_UDF_BITS)] &=
1322 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1323 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1324 MVPP2_PRS_SRAM_UDF_BITS)] |=
1325 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1326
1327 /* Set offset type */
1328 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1329 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1330 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1331
1332 /* Set offset operation */
1333 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1334 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1335 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1336
1337 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1338 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1339 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1340 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1341
1342 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1343 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1344 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1345
1346 /* Set base offset as current */
1347 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1348}
1349
1350/* Find parser flow entry */
1351static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1352{
1353 struct mvpp2_prs_entry *pe;
1354 int tid;
1355
1356 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1357 if (!pe)
1358 return NULL;
1359 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1360
1361 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1362 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1363 u8 bits;
1364
1365 if (!priv->prs_shadow[tid].valid ||
1366 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1367 continue;
1368
1369 pe->index = tid;
1370 mvpp2_prs_hw_read(priv, pe);
1371 bits = mvpp2_prs_sram_ai_get(pe);
1372
1373 /* Sram store classification lookup ID in AI bits [5:0] */
1374 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1375 return pe;
1376 }
1377 kfree(pe);
1378
1379 return NULL;
1380}
1381
1382/* Return first free tcam index, seeking from start to end */
1383static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1384 unsigned char end)
1385{
1386 int tid;
1387
1388 if (start > end)
1389 swap(start, end);
1390
1391 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1392 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1393
1394 for (tid = start; tid <= end; tid++) {
1395 if (!priv->prs_shadow[tid].valid)
1396 return tid;
1397 }
1398
1399 return -EINVAL;
1400}
1401
1402/* Enable/disable dropping all mac da's */
1403static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1404{
1405 struct mvpp2_prs_entry pe;
1406
1407 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1408 /* Entry exist - update port only */
1409 pe.index = MVPP2_PE_DROP_ALL;
1410 mvpp2_prs_hw_read(priv, &pe);
1411 } else {
1412 /* Entry doesn't exist - create new */
1413 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1414 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1415 pe.index = MVPP2_PE_DROP_ALL;
1416
1417 /* Non-promiscuous mode for all ports - DROP unknown packets */
1418 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1419 MVPP2_PRS_RI_DROP_MASK);
1420
1421 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1422 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1423
1424 /* Update shadow table */
1425 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1426
1427 /* Mask all ports */
1428 mvpp2_prs_tcam_port_map_set(&pe, 0);
1429 }
1430
1431 /* Update port mask */
1432 mvpp2_prs_tcam_port_set(&pe, port, add);
1433
1434 mvpp2_prs_hw_write(priv, &pe);
1435}
1436
1437/* Set port to promiscuous mode */
1438static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1439{
1440 struct mvpp2_prs_entry pe;
1441
Joe Perchesdbedd442015-03-06 20:49:12 -08001442 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001443
1444 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1445 /* Entry exist - update port only */
1446 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1447 mvpp2_prs_hw_read(priv, &pe);
1448 } else {
1449 /* Entry doesn't exist - create new */
1450 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1451 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1452 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1453
1454 /* Continue - set next lookup */
1455 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1456
1457 /* Set result info bits */
1458 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1459 MVPP2_PRS_RI_L2_CAST_MASK);
1460
1461 /* Shift to ethertype */
1462 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1463 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1464
1465 /* Mask all ports */
1466 mvpp2_prs_tcam_port_map_set(&pe, 0);
1467
1468 /* Update shadow table */
1469 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1470 }
1471
1472 /* Update port mask */
1473 mvpp2_prs_tcam_port_set(&pe, port, add);
1474
1475 mvpp2_prs_hw_write(priv, &pe);
1476}
1477
1478/* Accept multicast */
1479static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1480 bool add)
1481{
1482 struct mvpp2_prs_entry pe;
1483 unsigned char da_mc;
1484
1485 /* Ethernet multicast address first byte is
1486 * 0x01 for IPv4 and 0x33 for IPv6
1487 */
1488 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1489
1490 if (priv->prs_shadow[index].valid) {
1491 /* Entry exist - update port only */
1492 pe.index = index;
1493 mvpp2_prs_hw_read(priv, &pe);
1494 } else {
1495 /* Entry doesn't exist - create new */
1496 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1497 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1498 pe.index = index;
1499
1500 /* Continue - set next lookup */
1501 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1502
1503 /* Set result info bits */
1504 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1505 MVPP2_PRS_RI_L2_CAST_MASK);
1506
1507 /* Update tcam entry data first byte */
1508 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1509
1510 /* Shift to ethertype */
1511 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1512 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1513
1514 /* Mask all ports */
1515 mvpp2_prs_tcam_port_map_set(&pe, 0);
1516
1517 /* Update shadow table */
1518 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1519 }
1520
1521 /* Update port mask */
1522 mvpp2_prs_tcam_port_set(&pe, port, add);
1523
1524 mvpp2_prs_hw_write(priv, &pe);
1525}
1526
1527/* Set entry for dsa packets */
1528static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1529 bool tagged, bool extend)
1530{
1531 struct mvpp2_prs_entry pe;
1532 int tid, shift;
1533
1534 if (extend) {
1535 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1536 shift = 8;
1537 } else {
1538 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1539 shift = 4;
1540 }
1541
1542 if (priv->prs_shadow[tid].valid) {
1543 /* Entry exist - update port only */
1544 pe.index = tid;
1545 mvpp2_prs_hw_read(priv, &pe);
1546 } else {
1547 /* Entry doesn't exist - create new */
1548 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1549 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1550 pe.index = tid;
1551
1552 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1553 mvpp2_prs_sram_shift_set(&pe, shift,
1554 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1555
1556 /* Update shadow table */
1557 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1558
1559 if (tagged) {
1560 /* Set tagged bit in DSA tag */
1561 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1562 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1563 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1564 /* Clear all ai bits for next iteration */
1565 mvpp2_prs_sram_ai_update(&pe, 0,
1566 MVPP2_PRS_SRAM_AI_MASK);
1567 /* If packet is tagged continue check vlans */
1568 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1569 } else {
1570 /* Set result info bits to 'no vlans' */
1571 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1572 MVPP2_PRS_RI_VLAN_MASK);
1573 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1574 }
1575
1576 /* Mask all ports */
1577 mvpp2_prs_tcam_port_map_set(&pe, 0);
1578 }
1579
1580 /* Update port mask */
1581 mvpp2_prs_tcam_port_set(&pe, port, add);
1582
1583 mvpp2_prs_hw_write(priv, &pe);
1584}
1585
1586/* Set entry for dsa ethertype */
1587static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1588 bool add, bool tagged, bool extend)
1589{
1590 struct mvpp2_prs_entry pe;
1591 int tid, shift, port_mask;
1592
1593 if (extend) {
1594 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1595 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1596 port_mask = 0;
1597 shift = 8;
1598 } else {
1599 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1600 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1601 port_mask = MVPP2_PRS_PORT_MASK;
1602 shift = 4;
1603 }
1604
1605 if (priv->prs_shadow[tid].valid) {
1606 /* Entry exist - update port only */
1607 pe.index = tid;
1608 mvpp2_prs_hw_read(priv, &pe);
1609 } else {
1610 /* Entry doesn't exist - create new */
1611 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1612 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1613 pe.index = tid;
1614
1615 /* Set ethertype */
1616 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1617 mvpp2_prs_match_etype(&pe, 2, 0);
1618
1619 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1620 MVPP2_PRS_RI_DSA_MASK);
1621 /* Shift ethertype + 2 byte reserved + tag*/
1622 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1623 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1624
1625 /* Update shadow table */
1626 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1627
1628 if (tagged) {
1629 /* Set tagged bit in DSA tag */
1630 mvpp2_prs_tcam_data_byte_set(&pe,
1631 MVPP2_ETH_TYPE_LEN + 2 + 3,
1632 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1633 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1634 /* Clear all ai bits for next iteration */
1635 mvpp2_prs_sram_ai_update(&pe, 0,
1636 MVPP2_PRS_SRAM_AI_MASK);
1637 /* If packet is tagged continue check vlans */
1638 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1639 } else {
1640 /* Set result info bits to 'no vlans' */
1641 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1642 MVPP2_PRS_RI_VLAN_MASK);
1643 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1644 }
1645 /* Mask/unmask all ports, depending on dsa type */
1646 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1647 }
1648
1649 /* Update port mask */
1650 mvpp2_prs_tcam_port_set(&pe, port, add);
1651
1652 mvpp2_prs_hw_write(priv, &pe);
1653}
1654
1655/* Search for existing single/triple vlan entry */
1656static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1657 unsigned short tpid, int ai)
1658{
1659 struct mvpp2_prs_entry *pe;
1660 int tid;
1661
1662 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1663 if (!pe)
1664 return NULL;
1665 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1666
1667 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1668 for (tid = MVPP2_PE_FIRST_FREE_TID;
1669 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1670 unsigned int ri_bits, ai_bits;
1671 bool match;
1672
1673 if (!priv->prs_shadow[tid].valid ||
1674 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1675 continue;
1676
1677 pe->index = tid;
1678
1679 mvpp2_prs_hw_read(priv, pe);
1680 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1681 if (!match)
1682 continue;
1683
1684 /* Get vlan type */
1685 ri_bits = mvpp2_prs_sram_ri_get(pe);
1686 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1687
1688 /* Get current ai value from tcam */
1689 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1690 /* Clear double vlan bit */
1691 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1692
1693 if (ai != ai_bits)
1694 continue;
1695
1696 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1697 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1698 return pe;
1699 }
1700 kfree(pe);
1701
1702 return NULL;
1703}
1704
1705/* Add/update single/triple vlan entry */
1706static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1707 unsigned int port_map)
1708{
1709 struct mvpp2_prs_entry *pe;
1710 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05301711 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001712
1713 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1714
1715 if (!pe) {
1716 /* Create new tcam entry */
1717 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1718 MVPP2_PE_FIRST_FREE_TID);
1719 if (tid < 0)
1720 return tid;
1721
1722 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1723 if (!pe)
1724 return -ENOMEM;
1725
1726 /* Get last double vlan tid */
1727 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1728 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1729 unsigned int ri_bits;
1730
1731 if (!priv->prs_shadow[tid_aux].valid ||
1732 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1733 continue;
1734
1735 pe->index = tid_aux;
1736 mvpp2_prs_hw_read(priv, pe);
1737 ri_bits = mvpp2_prs_sram_ri_get(pe);
1738 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1739 MVPP2_PRS_RI_VLAN_DOUBLE)
1740 break;
1741 }
1742
Sudip Mukherjee43737472014-11-01 16:59:34 +05301743 if (tid <= tid_aux) {
1744 ret = -EINVAL;
1745 goto error;
1746 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03001747
1748 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1749 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1750 pe->index = tid;
1751
1752 mvpp2_prs_match_etype(pe, 0, tpid);
1753
1754 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1755 /* Shift 4 bytes - skip 1 vlan tag */
1756 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1757 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1758 /* Clear all ai bits for next iteration */
1759 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1760
1761 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
1762 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
1763 MVPP2_PRS_RI_VLAN_MASK);
1764 } else {
1765 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
1766 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
1767 MVPP2_PRS_RI_VLAN_MASK);
1768 }
1769 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
1770
1771 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1772 }
1773 /* Update ports' mask */
1774 mvpp2_prs_tcam_port_map_set(pe, port_map);
1775
1776 mvpp2_prs_hw_write(priv, pe);
1777
Sudip Mukherjee43737472014-11-01 16:59:34 +05301778error:
Marcin Wojtas3f518502014-07-10 16:52:13 -03001779 kfree(pe);
1780
Sudip Mukherjee43737472014-11-01 16:59:34 +05301781 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001782}
1783
1784/* Get first free double vlan ai number */
1785static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
1786{
1787 int i;
1788
1789 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
1790 if (!priv->prs_double_vlans[i])
1791 return i;
1792 }
1793
1794 return -EINVAL;
1795}
1796
1797/* Search for existing double vlan entry */
1798static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
1799 unsigned short tpid1,
1800 unsigned short tpid2)
1801{
1802 struct mvpp2_prs_entry *pe;
1803 int tid;
1804
1805 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1806 if (!pe)
1807 return NULL;
1808 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1809
1810 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1811 for (tid = MVPP2_PE_FIRST_FREE_TID;
1812 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1813 unsigned int ri_mask;
1814 bool match;
1815
1816 if (!priv->prs_shadow[tid].valid ||
1817 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1818 continue;
1819
1820 pe->index = tid;
1821 mvpp2_prs_hw_read(priv, pe);
1822
1823 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
1824 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
1825
1826 if (!match)
1827 continue;
1828
1829 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
1830 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
1831 return pe;
1832 }
1833 kfree(pe);
1834
1835 return NULL;
1836}
1837
1838/* Add or update double vlan entry */
1839static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1840 unsigned short tpid2,
1841 unsigned int port_map)
1842{
1843 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05301844 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001845
1846 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1847
1848 if (!pe) {
1849 /* Create new tcam entry */
1850 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1851 MVPP2_PE_LAST_FREE_TID);
1852 if (tid < 0)
1853 return tid;
1854
1855 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1856 if (!pe)
1857 return -ENOMEM;
1858
1859 /* Set ai value for new double vlan entry */
1860 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05301861 if (ai < 0) {
1862 ret = ai;
1863 goto error;
1864 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03001865
1866 /* Get first single/triple vlan tid */
1867 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
1868 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
1869 unsigned int ri_bits;
1870
1871 if (!priv->prs_shadow[tid_aux].valid ||
1872 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1873 continue;
1874
1875 pe->index = tid_aux;
1876 mvpp2_prs_hw_read(priv, pe);
1877 ri_bits = mvpp2_prs_sram_ri_get(pe);
1878 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1879 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1880 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1881 break;
1882 }
1883
Sudip Mukherjee43737472014-11-01 16:59:34 +05301884 if (tid >= tid_aux) {
1885 ret = -ERANGE;
1886 goto error;
1887 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03001888
1889 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1890 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1891 pe->index = tid;
1892
1893 priv->prs_double_vlans[ai] = true;
1894
1895 mvpp2_prs_match_etype(pe, 0, tpid1);
1896 mvpp2_prs_match_etype(pe, 4, tpid2);
1897
1898 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
1899 /* Shift 8 bytes - skip 2 vlan tags */
1900 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
1901 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1902 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1903 MVPP2_PRS_RI_VLAN_MASK);
1904 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
1905 MVPP2_PRS_SRAM_AI_MASK);
1906
1907 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
1908 }
1909
1910 /* Update ports' mask */
1911 mvpp2_prs_tcam_port_map_set(pe, port_map);
1912 mvpp2_prs_hw_write(priv, pe);
1913
Sudip Mukherjee43737472014-11-01 16:59:34 +05301914error:
Marcin Wojtas3f518502014-07-10 16:52:13 -03001915 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05301916 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001917}
1918
1919/* IPv4 header parsing for fragmentation and L4 offset */
1920static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
1921 unsigned int ri, unsigned int ri_mask)
1922{
1923 struct mvpp2_prs_entry pe;
1924 int tid;
1925
1926 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1927 (proto != IPPROTO_IGMP))
1928 return -EINVAL;
1929
1930 /* Fragmented packet */
1931 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1932 MVPP2_PE_LAST_FREE_TID);
1933 if (tid < 0)
1934 return tid;
1935
1936 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1937 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1938 pe.index = tid;
1939
1940 /* Set next lu to IPv4 */
1941 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1942 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1943 /* Set L4 offset */
1944 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1945 sizeof(struct iphdr) - 4,
1946 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1947 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1948 MVPP2_PRS_IPV4_DIP_AI_BIT);
1949 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
1950 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
1951
1952 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1953 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1954 /* Unmask all ports */
1955 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1956
1957 /* Update shadow table and hw entry */
1958 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1959 mvpp2_prs_hw_write(priv, &pe);
1960
1961 /* Not fragmented packet */
1962 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1963 MVPP2_PE_LAST_FREE_TID);
1964 if (tid < 0)
1965 return tid;
1966
1967 pe.index = tid;
1968 /* Clear ri before updating */
1969 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1970 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1971 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1972
1973 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
1974 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
1975
1976 /* Update shadow table and hw entry */
1977 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1978 mvpp2_prs_hw_write(priv, &pe);
1979
1980 return 0;
1981}
1982
1983/* IPv4 L3 multicast or broadcast */
1984static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
1985{
1986 struct mvpp2_prs_entry pe;
1987 int mask, tid;
1988
1989 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1990 MVPP2_PE_LAST_FREE_TID);
1991 if (tid < 0)
1992 return tid;
1993
1994 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1995 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1996 pe.index = tid;
1997
1998 switch (l3_cast) {
1999 case MVPP2_PRS_L3_MULTI_CAST:
2000 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2001 MVPP2_PRS_IPV4_MC_MASK);
2002 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2003 MVPP2_PRS_RI_L3_ADDR_MASK);
2004 break;
2005 case MVPP2_PRS_L3_BROAD_CAST:
2006 mask = MVPP2_PRS_IPV4_BC_MASK;
2007 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2008 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2009 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2010 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2011 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2012 MVPP2_PRS_RI_L3_ADDR_MASK);
2013 break;
2014 default:
2015 return -EINVAL;
2016 }
2017
2018 /* Finished: go to flowid generation */
2019 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2020 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2021
2022 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2023 MVPP2_PRS_IPV4_DIP_AI_BIT);
2024 /* Unmask all ports */
2025 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2026
2027 /* Update shadow table and hw entry */
2028 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2029 mvpp2_prs_hw_write(priv, &pe);
2030
2031 return 0;
2032}
2033
2034/* Set entries for protocols over IPv6 */
2035static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2036 unsigned int ri, unsigned int ri_mask)
2037{
2038 struct mvpp2_prs_entry pe;
2039 int tid;
2040
2041 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2042 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2043 return -EINVAL;
2044
2045 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2046 MVPP2_PE_LAST_FREE_TID);
2047 if (tid < 0)
2048 return tid;
2049
2050 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2051 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2052 pe.index = tid;
2053
2054 /* Finished: go to flowid generation */
2055 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2056 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2057 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2058 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2059 sizeof(struct ipv6hdr) - 6,
2060 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2061
2062 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2063 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2064 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2065 /* Unmask all ports */
2066 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2067
2068 /* Write HW */
2069 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2070 mvpp2_prs_hw_write(priv, &pe);
2071
2072 return 0;
2073}
2074
2075/* IPv6 L3 multicast entry */
2076static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2077{
2078 struct mvpp2_prs_entry pe;
2079 int tid;
2080
2081 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2082 return -EINVAL;
2083
2084 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2085 MVPP2_PE_LAST_FREE_TID);
2086 if (tid < 0)
2087 return tid;
2088
2089 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2090 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2091 pe.index = tid;
2092
2093 /* Finished: go to flowid generation */
2094 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2095 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2096 MVPP2_PRS_RI_L3_ADDR_MASK);
2097 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2098 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2099 /* Shift back to IPv6 NH */
2100 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2101
2102 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2103 MVPP2_PRS_IPV6_MC_MASK);
2104 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2105 /* Unmask all ports */
2106 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2107
2108 /* Update shadow table and hw entry */
2109 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2110 mvpp2_prs_hw_write(priv, &pe);
2111
2112 return 0;
2113}
2114
2115/* Parser per-port initialization */
2116static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2117 int lu_max, int offset)
2118{
2119 u32 val;
2120
2121 /* Set lookup ID */
2122 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2123 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2124 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2125 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2126
2127 /* Set maximum number of loops for packet received from port */
2128 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2129 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2130 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2131 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2132
2133 /* Set initial offset for packet header extraction for the first
2134 * searching loop
2135 */
2136 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2137 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2138 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2139 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2140}
2141
2142/* Default flow entries initialization for all ports */
2143static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2144{
2145 struct mvpp2_prs_entry pe;
2146 int port;
2147
2148 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2149 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2150 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2151 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2152
2153 /* Mask all ports */
2154 mvpp2_prs_tcam_port_map_set(&pe, 0);
2155
2156 /* Set flow ID*/
2157 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2158 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2159
2160 /* Update shadow table and hw entry */
2161 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2162 mvpp2_prs_hw_write(priv, &pe);
2163 }
2164}
2165
2166/* Set default entry for Marvell Header field */
2167static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2168{
2169 struct mvpp2_prs_entry pe;
2170
2171 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2172
2173 pe.index = MVPP2_PE_MH_DEFAULT;
2174 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2175 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2176 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2177 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2178
2179 /* Unmask all ports */
2180 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2181
2182 /* Update shadow table and hw entry */
2183 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2184 mvpp2_prs_hw_write(priv, &pe);
2185}
2186
2187/* Set default entires (place holder) for promiscuous, non-promiscuous and
2188 * multicast MAC addresses
2189 */
2190static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2191{
2192 struct mvpp2_prs_entry pe;
2193
2194 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2195
2196 /* Non-promiscuous mode for all ports - DROP unknown packets */
2197 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2198 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2199
2200 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2201 MVPP2_PRS_RI_DROP_MASK);
2202 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2203 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2204
2205 /* Unmask all ports */
2206 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2207
2208 /* Update shadow table and hw entry */
2209 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2210 mvpp2_prs_hw_write(priv, &pe);
2211
2212 /* place holders only - no ports */
2213 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2214 mvpp2_prs_mac_promisc_set(priv, 0, false);
2215 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2216 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2217}
2218
2219/* Set default entries for various types of dsa packets */
2220static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2221{
2222 struct mvpp2_prs_entry pe;
2223
2224 /* None tagged EDSA entry - place holder */
2225 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2226 MVPP2_PRS_EDSA);
2227
2228 /* Tagged EDSA entry - place holder */
2229 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2230
2231 /* None tagged DSA entry - place holder */
2232 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2233 MVPP2_PRS_DSA);
2234
2235 /* Tagged DSA entry - place holder */
2236 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2237
2238 /* None tagged EDSA ethertype entry - place holder*/
2239 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2240 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2241
2242 /* Tagged EDSA ethertype entry - place holder*/
2243 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2244 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2245
2246 /* None tagged DSA ethertype entry */
2247 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2248 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2249
2250 /* Tagged DSA ethertype entry */
2251 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2252 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2253
2254 /* Set default entry, in case DSA or EDSA tag not found */
2255 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2256 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2257 pe.index = MVPP2_PE_DSA_DEFAULT;
2258 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2259
2260 /* Shift 0 bytes */
2261 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2262 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2263
2264 /* Clear all sram ai bits for next iteration */
2265 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2266
2267 /* Unmask all ports */
2268 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2269
2270 mvpp2_prs_hw_write(priv, &pe);
2271}
2272
2273/* Match basic ethertypes */
2274static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2275{
2276 struct mvpp2_prs_entry pe;
2277 int tid;
2278
2279 /* Ethertype: PPPoE */
2280 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2281 MVPP2_PE_LAST_FREE_TID);
2282 if (tid < 0)
2283 return tid;
2284
2285 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2286 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2287 pe.index = tid;
2288
2289 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2290
2291 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2292 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2293 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2294 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2295 MVPP2_PRS_RI_PPPOE_MASK);
2296
2297 /* Update shadow table and hw entry */
2298 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2299 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2300 priv->prs_shadow[pe.index].finish = false;
2301 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2302 MVPP2_PRS_RI_PPPOE_MASK);
2303 mvpp2_prs_hw_write(priv, &pe);
2304
2305 /* Ethertype: ARP */
2306 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2307 MVPP2_PE_LAST_FREE_TID);
2308 if (tid < 0)
2309 return tid;
2310
2311 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2312 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2313 pe.index = tid;
2314
2315 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2316
2317 /* Generate flow in the next iteration*/
2318 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2319 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2320 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2321 MVPP2_PRS_RI_L3_PROTO_MASK);
2322 /* Set L3 offset */
2323 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2324 MVPP2_ETH_TYPE_LEN,
2325 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2326
2327 /* Update shadow table and hw entry */
2328 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2329 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2330 priv->prs_shadow[pe.index].finish = true;
2331 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2332 MVPP2_PRS_RI_L3_PROTO_MASK);
2333 mvpp2_prs_hw_write(priv, &pe);
2334
2335 /* Ethertype: LBTD */
2336 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2337 MVPP2_PE_LAST_FREE_TID);
2338 if (tid < 0)
2339 return tid;
2340
2341 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2342 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2343 pe.index = tid;
2344
2345 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2346
2347 /* Generate flow in the next iteration*/
2348 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2349 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2350 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2351 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2352 MVPP2_PRS_RI_CPU_CODE_MASK |
2353 MVPP2_PRS_RI_UDF3_MASK);
2354 /* Set L3 offset */
2355 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2356 MVPP2_ETH_TYPE_LEN,
2357 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2358
2359 /* Update shadow table and hw entry */
2360 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2361 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2362 priv->prs_shadow[pe.index].finish = true;
2363 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2364 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2365 MVPP2_PRS_RI_CPU_CODE_MASK |
2366 MVPP2_PRS_RI_UDF3_MASK);
2367 mvpp2_prs_hw_write(priv, &pe);
2368
2369 /* Ethertype: IPv4 without options */
2370 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2371 MVPP2_PE_LAST_FREE_TID);
2372 if (tid < 0)
2373 return tid;
2374
2375 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2376 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2377 pe.index = tid;
2378
2379 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2380 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2381 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2382 MVPP2_PRS_IPV4_HEAD_MASK |
2383 MVPP2_PRS_IPV4_IHL_MASK);
2384
2385 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2386 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2387 MVPP2_PRS_RI_L3_PROTO_MASK);
2388 /* Skip eth_type + 4 bytes of IP header */
2389 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2390 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2391 /* Set L3 offset */
2392 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2393 MVPP2_ETH_TYPE_LEN,
2394 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2395
2396 /* Update shadow table and hw entry */
2397 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2398 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2399 priv->prs_shadow[pe.index].finish = false;
2400 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2401 MVPP2_PRS_RI_L3_PROTO_MASK);
2402 mvpp2_prs_hw_write(priv, &pe);
2403
2404 /* Ethertype: IPv4 with options */
2405 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2406 MVPP2_PE_LAST_FREE_TID);
2407 if (tid < 0)
2408 return tid;
2409
2410 pe.index = tid;
2411
2412 /* Clear tcam data before updating */
2413 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2414 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2415
2416 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2417 MVPP2_PRS_IPV4_HEAD,
2418 MVPP2_PRS_IPV4_HEAD_MASK);
2419
2420 /* Clear ri before updating */
2421 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2422 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2423 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2424 MVPP2_PRS_RI_L3_PROTO_MASK);
2425
2426 /* Update shadow table and hw entry */
2427 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2428 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2429 priv->prs_shadow[pe.index].finish = false;
2430 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2431 MVPP2_PRS_RI_L3_PROTO_MASK);
2432 mvpp2_prs_hw_write(priv, &pe);
2433
2434 /* Ethertype: IPv6 without options */
2435 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2436 MVPP2_PE_LAST_FREE_TID);
2437 if (tid < 0)
2438 return tid;
2439
2440 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2441 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2442 pe.index = tid;
2443
2444 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2445
2446 /* Skip DIP of IPV6 header */
2447 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2448 MVPP2_MAX_L3_ADDR_SIZE,
2449 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2450 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2451 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2452 MVPP2_PRS_RI_L3_PROTO_MASK);
2453 /* Set L3 offset */
2454 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2455 MVPP2_ETH_TYPE_LEN,
2456 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2457
2458 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2459 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2460 priv->prs_shadow[pe.index].finish = false;
2461 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2462 MVPP2_PRS_RI_L3_PROTO_MASK);
2463 mvpp2_prs_hw_write(priv, &pe);
2464
2465 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2466 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2467 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2468 pe.index = MVPP2_PE_ETH_TYPE_UN;
2469
2470 /* Unmask all ports */
2471 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2472
2473 /* Generate flow in the next iteration*/
2474 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2475 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2476 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2477 MVPP2_PRS_RI_L3_PROTO_MASK);
2478 /* Set L3 offset even it's unknown L3 */
2479 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2480 MVPP2_ETH_TYPE_LEN,
2481 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2482
2483 /* Update shadow table and hw entry */
2484 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2485 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2486 priv->prs_shadow[pe.index].finish = true;
2487 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2488 MVPP2_PRS_RI_L3_PROTO_MASK);
2489 mvpp2_prs_hw_write(priv, &pe);
2490
2491 return 0;
2492}
2493
2494/* Configure vlan entries and detect up to 2 successive VLAN tags.
2495 * Possible options:
2496 * 0x8100, 0x88A8
2497 * 0x8100, 0x8100
2498 * 0x8100
2499 * 0x88A8
2500 */
2501static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2502{
2503 struct mvpp2_prs_entry pe;
2504 int err;
2505
2506 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2507 MVPP2_PRS_DBL_VLANS_MAX,
2508 GFP_KERNEL);
2509 if (!priv->prs_double_vlans)
2510 return -ENOMEM;
2511
2512 /* Double VLAN: 0x8100, 0x88A8 */
2513 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2514 MVPP2_PRS_PORT_MASK);
2515 if (err)
2516 return err;
2517
2518 /* Double VLAN: 0x8100, 0x8100 */
2519 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2520 MVPP2_PRS_PORT_MASK);
2521 if (err)
2522 return err;
2523
2524 /* Single VLAN: 0x88a8 */
2525 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2526 MVPP2_PRS_PORT_MASK);
2527 if (err)
2528 return err;
2529
2530 /* Single VLAN: 0x8100 */
2531 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2532 MVPP2_PRS_PORT_MASK);
2533 if (err)
2534 return err;
2535
2536 /* Set default double vlan entry */
2537 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2538 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2539 pe.index = MVPP2_PE_VLAN_DBL;
2540
2541 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2542 /* Clear ai for next iterations */
2543 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2544 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2545 MVPP2_PRS_RI_VLAN_MASK);
2546
2547 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2548 MVPP2_PRS_DBL_VLAN_AI_BIT);
2549 /* Unmask all ports */
2550 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2551
2552 /* Update shadow table and hw entry */
2553 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2554 mvpp2_prs_hw_write(priv, &pe);
2555
2556 /* Set default vlan none entry */
2557 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2558 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2559 pe.index = MVPP2_PE_VLAN_NONE;
2560
2561 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2562 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2563 MVPP2_PRS_RI_VLAN_MASK);
2564
2565 /* Unmask all ports */
2566 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2567
2568 /* Update shadow table and hw entry */
2569 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2570 mvpp2_prs_hw_write(priv, &pe);
2571
2572 return 0;
2573}
2574
2575/* Set entries for PPPoE ethertype */
2576static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2577{
2578 struct mvpp2_prs_entry pe;
2579 int tid;
2580
2581 /* IPv4 over PPPoE with options */
2582 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2583 MVPP2_PE_LAST_FREE_TID);
2584 if (tid < 0)
2585 return tid;
2586
2587 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2588 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2589 pe.index = tid;
2590
2591 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2592
2593 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2594 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2595 MVPP2_PRS_RI_L3_PROTO_MASK);
2596 /* Skip eth_type + 4 bytes of IP header */
2597 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2598 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2599 /* Set L3 offset */
2600 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2601 MVPP2_ETH_TYPE_LEN,
2602 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2603
2604 /* Update shadow table and hw entry */
2605 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2606 mvpp2_prs_hw_write(priv, &pe);
2607
2608 /* IPv4 over PPPoE without options */
2609 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2610 MVPP2_PE_LAST_FREE_TID);
2611 if (tid < 0)
2612 return tid;
2613
2614 pe.index = tid;
2615
2616 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2617 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2618 MVPP2_PRS_IPV4_HEAD_MASK |
2619 MVPP2_PRS_IPV4_IHL_MASK);
2620
2621 /* Clear ri before updating */
2622 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2623 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2624 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2625 MVPP2_PRS_RI_L3_PROTO_MASK);
2626
2627 /* Update shadow table and hw entry */
2628 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2629 mvpp2_prs_hw_write(priv, &pe);
2630
2631 /* IPv6 over PPPoE */
2632 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2633 MVPP2_PE_LAST_FREE_TID);
2634 if (tid < 0)
2635 return tid;
2636
2637 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2638 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2639 pe.index = tid;
2640
2641 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2642
2643 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2644 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2645 MVPP2_PRS_RI_L3_PROTO_MASK);
2646 /* Skip eth_type + 4 bytes of IPv6 header */
2647 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2648 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2649 /* Set L3 offset */
2650 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2651 MVPP2_ETH_TYPE_LEN,
2652 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2653
2654 /* Update shadow table and hw entry */
2655 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2656 mvpp2_prs_hw_write(priv, &pe);
2657
2658 /* Non-IP over PPPoE */
2659 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2660 MVPP2_PE_LAST_FREE_TID);
2661 if (tid < 0)
2662 return tid;
2663
2664 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2665 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2666 pe.index = tid;
2667
2668 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2669 MVPP2_PRS_RI_L3_PROTO_MASK);
2670
2671 /* Finished: go to flowid generation */
2672 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2673 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2674 /* Set L3 offset even if it's unknown L3 */
2675 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2676 MVPP2_ETH_TYPE_LEN,
2677 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2678
2679 /* Update shadow table and hw entry */
2680 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2681 mvpp2_prs_hw_write(priv, &pe);
2682
2683 return 0;
2684}
2685
2686/* Initialize entries for IPv4 */
2687static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2688{
2689 struct mvpp2_prs_entry pe;
2690 int err;
2691
2692 /* Set entries for TCP, UDP and IGMP over IPv4 */
2693 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2694 MVPP2_PRS_RI_L4_PROTO_MASK);
2695 if (err)
2696 return err;
2697
2698 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2699 MVPP2_PRS_RI_L4_PROTO_MASK);
2700 if (err)
2701 return err;
2702
2703 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2704 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2705 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2706 MVPP2_PRS_RI_CPU_CODE_MASK |
2707 MVPP2_PRS_RI_UDF3_MASK);
2708 if (err)
2709 return err;
2710
2711 /* IPv4 Broadcast */
2712 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2713 if (err)
2714 return err;
2715
2716 /* IPv4 Multicast */
2717 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2718 if (err)
2719 return err;
2720
2721 /* Default IPv4 entry for unknown protocols */
2722 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2723 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2724 pe.index = MVPP2_PE_IP4_PROTO_UN;
2725
2726 /* Set next lu to IPv4 */
2727 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2728 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2729 /* Set L4 offset */
2730 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2731 sizeof(struct iphdr) - 4,
2732 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2733 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2734 MVPP2_PRS_IPV4_DIP_AI_BIT);
2735 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2736 MVPP2_PRS_RI_L4_PROTO_MASK);
2737
2738 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2739 /* Unmask all ports */
2740 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2741
2742 /* Update shadow table and hw entry */
2743 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2744 mvpp2_prs_hw_write(priv, &pe);
2745
2746 /* Default IPv4 entry for unicast address */
2747 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2748 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2749 pe.index = MVPP2_PE_IP4_ADDR_UN;
2750
2751 /* Finished: go to flowid generation */
2752 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2753 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2754 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2755 MVPP2_PRS_RI_L3_ADDR_MASK);
2756
2757 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2758 MVPP2_PRS_IPV4_DIP_AI_BIT);
2759 /* Unmask all ports */
2760 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2761
2762 /* Update shadow table and hw entry */
2763 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2764 mvpp2_prs_hw_write(priv, &pe);
2765
2766 return 0;
2767}
2768
2769/* Initialize entries for IPv6 */
2770static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
2771{
2772 struct mvpp2_prs_entry pe;
2773 int tid, err;
2774
2775 /* Set entries for TCP, UDP and ICMP over IPv6 */
2776 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
2777 MVPP2_PRS_RI_L4_TCP,
2778 MVPP2_PRS_RI_L4_PROTO_MASK);
2779 if (err)
2780 return err;
2781
2782 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
2783 MVPP2_PRS_RI_L4_UDP,
2784 MVPP2_PRS_RI_L4_PROTO_MASK);
2785 if (err)
2786 return err;
2787
2788 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
2789 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2790 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2791 MVPP2_PRS_RI_CPU_CODE_MASK |
2792 MVPP2_PRS_RI_UDF3_MASK);
2793 if (err)
2794 return err;
2795
2796 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
2797 /* Result Info: UDF7=1, DS lite */
2798 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
2799 MVPP2_PRS_RI_UDF7_IP6_LITE,
2800 MVPP2_PRS_RI_UDF7_MASK);
2801 if (err)
2802 return err;
2803
2804 /* IPv6 multicast */
2805 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2806 if (err)
2807 return err;
2808
2809 /* Entry for checking hop limit */
2810 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2811 MVPP2_PE_LAST_FREE_TID);
2812 if (tid < 0)
2813 return tid;
2814
2815 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2816 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2817 pe.index = tid;
2818
2819 /* Finished: go to flowid generation */
2820 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2821 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2822 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
2823 MVPP2_PRS_RI_DROP_MASK,
2824 MVPP2_PRS_RI_L3_PROTO_MASK |
2825 MVPP2_PRS_RI_DROP_MASK);
2826
2827 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
2828 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2829 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2830
2831 /* Update shadow table and hw entry */
2832 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2833 mvpp2_prs_hw_write(priv, &pe);
2834
2835 /* Default IPv6 entry for unknown protocols */
2836 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2837 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2838 pe.index = MVPP2_PE_IP6_PROTO_UN;
2839
2840 /* Finished: go to flowid generation */
2841 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2842 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2843 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2844 MVPP2_PRS_RI_L4_PROTO_MASK);
2845 /* Set L4 offset relatively to our current place */
2846 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2847 sizeof(struct ipv6hdr) - 4,
2848 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2849
2850 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2851 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2852 /* Unmask all ports */
2853 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2854
2855 /* Update shadow table and hw entry */
2856 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2857 mvpp2_prs_hw_write(priv, &pe);
2858
2859 /* Default IPv6 entry for unknown ext protocols */
2860 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2861 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2862 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
2863
2864 /* Finished: go to flowid generation */
2865 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2866 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2867 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2868 MVPP2_PRS_RI_L4_PROTO_MASK);
2869
2870 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
2871 MVPP2_PRS_IPV6_EXT_AI_BIT);
2872 /* Unmask all ports */
2873 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2874
2875 /* Update shadow table and hw entry */
2876 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2877 mvpp2_prs_hw_write(priv, &pe);
2878
2879 /* Default IPv6 entry for unicast address */
2880 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2881 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2882 pe.index = MVPP2_PE_IP6_ADDR_UN;
2883
2884 /* Finished: go to IPv6 again */
2885 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2886 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2887 MVPP2_PRS_RI_L3_ADDR_MASK);
2888 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2889 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2890 /* Shift back to IPV6 NH */
2891 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2892
2893 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2894 /* Unmask all ports */
2895 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2896
2897 /* Update shadow table and hw entry */
2898 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2899 mvpp2_prs_hw_write(priv, &pe);
2900
2901 return 0;
2902}
2903
2904/* Parser default initialization */
2905static int mvpp2_prs_default_init(struct platform_device *pdev,
2906 struct mvpp2 *priv)
2907{
2908 int err, index, i;
2909
2910 /* Enable tcam table */
2911 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2912
2913 /* Clear all tcam and sram entries */
2914 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2915 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2916 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2917 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2918
2919 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2920 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2921 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2922 }
2923
2924 /* Invalidate all tcam entries */
2925 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2926 mvpp2_prs_hw_inv(priv, index);
2927
2928 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2929 sizeof(struct mvpp2_prs_shadow),
2930 GFP_KERNEL);
2931 if (!priv->prs_shadow)
2932 return -ENOMEM;
2933
2934 /* Always start from lookup = 0 */
2935 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2936 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2937 MVPP2_PRS_PORT_LU_MAX, 0);
2938
2939 mvpp2_prs_def_flow_init(priv);
2940
2941 mvpp2_prs_mh_init(priv);
2942
2943 mvpp2_prs_mac_init(priv);
2944
2945 mvpp2_prs_dsa_init(priv);
2946
2947 err = mvpp2_prs_etype_init(priv);
2948 if (err)
2949 return err;
2950
2951 err = mvpp2_prs_vlan_init(pdev, priv);
2952 if (err)
2953 return err;
2954
2955 err = mvpp2_prs_pppoe_init(priv);
2956 if (err)
2957 return err;
2958
2959 err = mvpp2_prs_ip6_init(priv);
2960 if (err)
2961 return err;
2962
2963 err = mvpp2_prs_ip4_init(priv);
2964 if (err)
2965 return err;
2966
2967 return 0;
2968}
2969
2970/* Compare MAC DA with tcam entry data */
2971static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2972 const u8 *da, unsigned char *mask)
2973{
2974 unsigned char tcam_byte, tcam_mask;
2975 int index;
2976
2977 for (index = 0; index < ETH_ALEN; index++) {
2978 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2979 if (tcam_mask != mask[index])
2980 return false;
2981
2982 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2983 return false;
2984 }
2985
2986 return true;
2987}
2988
2989/* Find tcam entry with matched pair <MAC DA, port> */
2990static struct mvpp2_prs_entry *
2991mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2992 unsigned char *mask, int udf_type)
2993{
2994 struct mvpp2_prs_entry *pe;
2995 int tid;
2996
2997 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2998 if (!pe)
2999 return NULL;
3000 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3001
3002 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3003 for (tid = MVPP2_PE_FIRST_FREE_TID;
3004 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3005 unsigned int entry_pmap;
3006
3007 if (!priv->prs_shadow[tid].valid ||
3008 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3009 (priv->prs_shadow[tid].udf != udf_type))
3010 continue;
3011
3012 pe->index = tid;
3013 mvpp2_prs_hw_read(priv, pe);
3014 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3015
3016 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3017 entry_pmap == pmap)
3018 return pe;
3019 }
3020 kfree(pe);
3021
3022 return NULL;
3023}
3024
3025/* Update parser's mac da entry */
3026static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3027 const u8 *da, bool add)
3028{
3029 struct mvpp2_prs_entry *pe;
3030 unsigned int pmap, len, ri;
3031 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3032 int tid;
3033
3034 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3035 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3036 MVPP2_PRS_UDF_MAC_DEF);
3037
3038 /* No such entry */
3039 if (!pe) {
3040 if (!add)
3041 return 0;
3042
3043 /* Create new TCAM entry */
3044 /* Find first range mac entry*/
3045 for (tid = MVPP2_PE_FIRST_FREE_TID;
3046 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3047 if (priv->prs_shadow[tid].valid &&
3048 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3049 (priv->prs_shadow[tid].udf ==
3050 MVPP2_PRS_UDF_MAC_RANGE))
3051 break;
3052
3053 /* Go through the all entries from first to last */
3054 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3055 tid - 1);
3056 if (tid < 0)
3057 return tid;
3058
3059 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3060 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303061 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003062 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3063 pe->index = tid;
3064
3065 /* Mask all ports */
3066 mvpp2_prs_tcam_port_map_set(pe, 0);
3067 }
3068
3069 /* Update port mask */
3070 mvpp2_prs_tcam_port_set(pe, port, add);
3071
3072 /* Invalidate the entry if no ports are left enabled */
3073 pmap = mvpp2_prs_tcam_port_map_get(pe);
3074 if (pmap == 0) {
3075 if (add) {
3076 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303077 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003078 }
3079 mvpp2_prs_hw_inv(priv, pe->index);
3080 priv->prs_shadow[pe->index].valid = false;
3081 kfree(pe);
3082 return 0;
3083 }
3084
3085 /* Continue - set next lookup */
3086 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3087
3088 /* Set match on DA */
3089 len = ETH_ALEN;
3090 while (len--)
3091 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3092
3093 /* Set result info bits */
3094 if (is_broadcast_ether_addr(da))
3095 ri = MVPP2_PRS_RI_L2_BCAST;
3096 else if (is_multicast_ether_addr(da))
3097 ri = MVPP2_PRS_RI_L2_MCAST;
3098 else
3099 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3100
3101 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3102 MVPP2_PRS_RI_MAC_ME_MASK);
3103 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3104 MVPP2_PRS_RI_MAC_ME_MASK);
3105
3106 /* Shift to ethertype */
3107 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3108 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3109
3110 /* Update shadow table and hw entry */
3111 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3112 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3113 mvpp2_prs_hw_write(priv, pe);
3114
3115 kfree(pe);
3116
3117 return 0;
3118}
3119
3120static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3121{
3122 struct mvpp2_port *port = netdev_priv(dev);
3123 int err;
3124
3125 /* Remove old parser entry */
3126 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3127 false);
3128 if (err)
3129 return err;
3130
3131 /* Add new parser entry */
3132 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3133 if (err)
3134 return err;
3135
3136 /* Set addr in the device */
3137 ether_addr_copy(dev->dev_addr, da);
3138
3139 return 0;
3140}
3141
3142/* Delete all port's multicast simple (not range) entries */
3143static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3144{
3145 struct mvpp2_prs_entry pe;
3146 int index, tid;
3147
3148 for (tid = MVPP2_PE_FIRST_FREE_TID;
3149 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3150 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3151
3152 if (!priv->prs_shadow[tid].valid ||
3153 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3154 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3155 continue;
3156
3157 /* Only simple mac entries */
3158 pe.index = tid;
3159 mvpp2_prs_hw_read(priv, &pe);
3160
3161 /* Read mac addr from entry */
3162 for (index = 0; index < ETH_ALEN; index++)
3163 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3164 &da_mask[index]);
3165
3166 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3167 /* Delete this entry */
3168 mvpp2_prs_mac_da_accept(priv, port, da, false);
3169 }
3170}
3171
3172static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3173{
3174 switch (type) {
3175 case MVPP2_TAG_TYPE_EDSA:
3176 /* Add port to EDSA entries */
3177 mvpp2_prs_dsa_tag_set(priv, port, true,
3178 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3179 mvpp2_prs_dsa_tag_set(priv, port, true,
3180 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3181 /* Remove port from DSA entries */
3182 mvpp2_prs_dsa_tag_set(priv, port, false,
3183 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3184 mvpp2_prs_dsa_tag_set(priv, port, false,
3185 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3186 break;
3187
3188 case MVPP2_TAG_TYPE_DSA:
3189 /* Add port to DSA entries */
3190 mvpp2_prs_dsa_tag_set(priv, port, true,
3191 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3192 mvpp2_prs_dsa_tag_set(priv, port, true,
3193 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3194 /* Remove port from EDSA entries */
3195 mvpp2_prs_dsa_tag_set(priv, port, false,
3196 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3197 mvpp2_prs_dsa_tag_set(priv, port, false,
3198 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3199 break;
3200
3201 case MVPP2_TAG_TYPE_MH:
3202 case MVPP2_TAG_TYPE_NONE:
3203 /* Remove port form EDSA and DSA entries */
3204 mvpp2_prs_dsa_tag_set(priv, port, false,
3205 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3206 mvpp2_prs_dsa_tag_set(priv, port, false,
3207 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3208 mvpp2_prs_dsa_tag_set(priv, port, false,
3209 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3210 mvpp2_prs_dsa_tag_set(priv, port, false,
3211 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3212 break;
3213
3214 default:
3215 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3216 return -EINVAL;
3217 }
3218
3219 return 0;
3220}
3221
3222/* Set prs flow for the port */
3223static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3224{
3225 struct mvpp2_prs_entry *pe;
3226 int tid;
3227
3228 pe = mvpp2_prs_flow_find(port->priv, port->id);
3229
3230 /* Such entry not exist */
3231 if (!pe) {
3232 /* Go through the all entires from last to first */
3233 tid = mvpp2_prs_tcam_first_free(port->priv,
3234 MVPP2_PE_LAST_FREE_TID,
3235 MVPP2_PE_FIRST_FREE_TID);
3236 if (tid < 0)
3237 return tid;
3238
3239 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3240 if (!pe)
3241 return -ENOMEM;
3242
3243 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3244 pe->index = tid;
3245
3246 /* Set flow ID*/
3247 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3248 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3249
3250 /* Update shadow table */
3251 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3252 }
3253
3254 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3255 mvpp2_prs_hw_write(port->priv, pe);
3256 kfree(pe);
3257
3258 return 0;
3259}
3260
3261/* Classifier configuration routines */
3262
3263/* Update classification flow table registers */
3264static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3265 struct mvpp2_cls_flow_entry *fe)
3266{
3267 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3268 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3269 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3270 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3271}
3272
3273/* Update classification lookup table register */
3274static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3275 struct mvpp2_cls_lookup_entry *le)
3276{
3277 u32 val;
3278
3279 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3280 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3281 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3282}
3283
3284/* Classifier default initialization */
3285static void mvpp2_cls_init(struct mvpp2 *priv)
3286{
3287 struct mvpp2_cls_lookup_entry le;
3288 struct mvpp2_cls_flow_entry fe;
3289 int index;
3290
3291 /* Enable classifier */
3292 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3293
3294 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01003295 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003296 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3297 fe.index = index;
3298 mvpp2_cls_flow_write(priv, &fe);
3299 }
3300
3301 /* Clear classifier lookup table */
3302 le.data = 0;
3303 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3304 le.lkpid = index;
3305 le.way = 0;
3306 mvpp2_cls_lookup_write(priv, &le);
3307
3308 le.way = 1;
3309 mvpp2_cls_lookup_write(priv, &le);
3310 }
3311}
3312
3313static void mvpp2_cls_port_config(struct mvpp2_port *port)
3314{
3315 struct mvpp2_cls_lookup_entry le;
3316 u32 val;
3317
3318 /* Set way for the port */
3319 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3320 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3321 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3322
3323 /* Pick the entry to be accessed in lookup ID decoding table
3324 * according to the way and lkpid.
3325 */
3326 le.lkpid = port->id;
3327 le.way = 0;
3328 le.data = 0;
3329
3330 /* Set initial CPU queue for receiving packets */
3331 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3332 le.data |= port->first_rxq;
3333
3334 /* Disable classification engines */
3335 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3336
3337 /* Update lookup ID table entry */
3338 mvpp2_cls_lookup_write(port->priv, &le);
3339}
3340
3341/* Set CPU queue number for oversize packets */
3342static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3343{
3344 u32 val;
3345
3346 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3347 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3348
3349 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3350 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3351
3352 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3353 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3354 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3355}
3356
3357/* Buffer Manager configuration routines */
3358
3359/* Create pool */
3360static int mvpp2_bm_pool_create(struct platform_device *pdev,
3361 struct mvpp2 *priv,
3362 struct mvpp2_bm_pool *bm_pool, int size)
3363{
3364 int size_bytes;
3365 u32 val;
3366
3367 size_bytes = sizeof(u32) * size;
3368 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
3369 &bm_pool->phys_addr,
3370 GFP_KERNEL);
3371 if (!bm_pool->virt_addr)
3372 return -ENOMEM;
3373
3374 if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
3375 dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
3376 bm_pool->phys_addr);
3377 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3378 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3379 return -ENOMEM;
3380 }
3381
3382 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
3383 bm_pool->phys_addr);
3384 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3385
3386 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3387 val |= MVPP2_BM_START_MASK;
3388 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3389
3390 bm_pool->type = MVPP2_BM_FREE;
3391 bm_pool->size = size;
3392 bm_pool->pkt_size = 0;
3393 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003394
3395 return 0;
3396}
3397
3398/* Set pool buffer size */
3399static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3400 struct mvpp2_bm_pool *bm_pool,
3401 int buf_size)
3402{
3403 u32 val;
3404
3405 bm_pool->buf_size = buf_size;
3406
3407 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3408 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3409}
3410
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003411/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003412static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3413 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003414{
3415 int i;
3416
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003417 for (i = 0; i < bm_pool->buf_num; i++) {
Marcin Wojtas4229d502015-12-03 15:20:50 +01003418 dma_addr_t buf_phys_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003419 u32 vaddr;
3420
Joe Perchesdbedd442015-03-06 20:49:12 -08003421 /* Get buffer virtual address (indirect access) */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003422 buf_phys_addr = mvpp2_read(priv,
3423 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003424 vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
Marcin Wojtas4229d502015-12-03 15:20:50 +01003425
3426 dma_unmap_single(dev, buf_phys_addr,
3427 bm_pool->buf_size, DMA_FROM_DEVICE);
3428
Marcin Wojtas3f518502014-07-10 16:52:13 -03003429 if (!vaddr)
3430 break;
3431 dev_kfree_skb_any((struct sk_buff *)vaddr);
3432 }
3433
3434 /* Update BM driver with number of buffers removed from pool */
3435 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003436}
3437
3438/* Cleanup pool */
3439static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3440 struct mvpp2 *priv,
3441 struct mvpp2_bm_pool *bm_pool)
3442{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003443 u32 val;
3444
Marcin Wojtas4229d502015-12-03 15:20:50 +01003445 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03003446 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003447 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3448 return 0;
3449 }
3450
3451 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3452 val |= MVPP2_BM_STOP_MASK;
3453 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3454
3455 dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
3456 bm_pool->virt_addr,
3457 bm_pool->phys_addr);
3458 return 0;
3459}
3460
3461static int mvpp2_bm_pools_init(struct platform_device *pdev,
3462 struct mvpp2 *priv)
3463{
3464 int i, err, size;
3465 struct mvpp2_bm_pool *bm_pool;
3466
3467 /* Create all pools with maximum size */
3468 size = MVPP2_BM_POOL_SIZE_MAX;
3469 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3470 bm_pool = &priv->bm_pools[i];
3471 bm_pool->id = i;
3472 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3473 if (err)
3474 goto err_unroll_pools;
3475 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3476 }
3477 return 0;
3478
3479err_unroll_pools:
3480 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3481 for (i = i - 1; i >= 0; i--)
3482 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3483 return err;
3484}
3485
3486static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3487{
3488 int i, err;
3489
3490 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3491 /* Mask BM all interrupts */
3492 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3493 /* Clear BM cause register */
3494 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3495 }
3496
3497 /* Allocate and initialize BM pools */
3498 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3499 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3500 if (!priv->bm_pools)
3501 return -ENOMEM;
3502
3503 err = mvpp2_bm_pools_init(pdev, priv);
3504 if (err < 0)
3505 return err;
3506 return 0;
3507}
3508
3509/* Attach long pool to rxq */
3510static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3511 int lrxq, int long_pool)
3512{
3513 u32 val;
3514 int prxq;
3515
3516 /* Get queue physical ID */
3517 prxq = port->rxqs[lrxq]->id;
3518
3519 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3520 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3521 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
3522 MVPP2_RXQ_POOL_LONG_MASK);
3523
3524 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3525}
3526
3527/* Attach short pool to rxq */
3528static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3529 int lrxq, int short_pool)
3530{
3531 u32 val;
3532 int prxq;
3533
3534 /* Get queue physical ID */
3535 prxq = port->rxqs[lrxq]->id;
3536
3537 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3538 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3539 val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
3540 MVPP2_RXQ_POOL_SHORT_MASK);
3541
3542 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3543}
3544
3545/* Allocate skb for BM pool */
3546static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
3547 struct mvpp2_bm_pool *bm_pool,
3548 dma_addr_t *buf_phys_addr,
3549 gfp_t gfp_mask)
3550{
3551 struct sk_buff *skb;
3552 dma_addr_t phys_addr;
3553
3554 skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
3555 if (!skb)
3556 return NULL;
3557
3558 phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
3559 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3560 DMA_FROM_DEVICE);
3561 if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
3562 dev_kfree_skb_any(skb);
3563 return NULL;
3564 }
3565 *buf_phys_addr = phys_addr;
3566
3567 return skb;
3568}
3569
3570/* Set pool number in a BM cookie */
3571static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3572{
3573 u32 bm;
3574
3575 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3576 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3577
3578 return bm;
3579}
3580
3581/* Get pool number from a BM cookie */
3582static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
3583{
3584 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3585}
3586
3587/* Release buffer to BM */
3588static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
3589 u32 buf_phys_addr, u32 buf_virt_addr)
3590{
3591 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
3592 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
3593}
3594
3595/* Release multicast buffer */
3596static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
3597 u32 buf_phys_addr, u32 buf_virt_addr,
3598 int mc_id)
3599{
3600 u32 val = 0;
3601
3602 val |= (mc_id & MVPP2_BM_MC_ID_MASK);
3603 mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
3604
3605 mvpp2_bm_pool_put(port, pool,
3606 buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
3607 buf_virt_addr);
3608}
3609
3610/* Refill BM pool */
3611static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
3612 u32 phys_addr, u32 cookie)
3613{
3614 int pool = mvpp2_bm_cookie_pool_get(bm);
3615
3616 mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
3617}
3618
3619/* Allocate buffers for the pool */
3620static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3621 struct mvpp2_bm_pool *bm_pool, int buf_num)
3622{
3623 struct sk_buff *skb;
3624 int i, buf_size, total_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003625 dma_addr_t phys_addr;
3626
3627 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3628 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3629
3630 if (buf_num < 0 ||
3631 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3632 netdev_err(port->dev,
3633 "cannot allocate %d buffers for pool %d\n",
3634 buf_num, bm_pool->id);
3635 return 0;
3636 }
3637
Marcin Wojtas3f518502014-07-10 16:52:13 -03003638 for (i = 0; i < buf_num; i++) {
3639 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
3640 if (!skb)
3641 break;
3642
Thomas Petazzoni3eb2d992017-02-21 11:28:08 +01003643 mvpp2_bm_pool_put(port, bm_pool->id, (u32)phys_addr, (u32)skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003644 }
3645
3646 /* Update BM driver with number of buffers added to pool */
3647 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003648
3649 netdev_dbg(port->dev,
3650 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3651 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3652 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3653
3654 netdev_dbg(port->dev,
3655 "%s pool %d: %d of %d buffers added\n",
3656 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3657 bm_pool->id, i, buf_num);
3658 return i;
3659}
3660
3661/* Notify the driver that BM pool is being used as specific type and return the
3662 * pool pointer on success
3663 */
3664static struct mvpp2_bm_pool *
3665mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3666 int pkt_size)
3667{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3669 int num;
3670
3671 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3672 netdev_err(port->dev, "mixing pool types is forbidden\n");
3673 return NULL;
3674 }
3675
Marcin Wojtas3f518502014-07-10 16:52:13 -03003676 if (new_pool->type == MVPP2_BM_FREE)
3677 new_pool->type = type;
3678
3679 /* Allocate buffers in case BM pool is used as long pool, but packet
3680 * size doesn't match MTU or BM pool hasn't being used yet
3681 */
3682 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
3683 (new_pool->pkt_size == 0)) {
3684 int pkts_num;
3685
3686 /* Set default buffer number or free all the buffers in case
3687 * the pool is not empty
3688 */
3689 pkts_num = new_pool->buf_num;
3690 if (pkts_num == 0)
3691 pkts_num = type == MVPP2_BM_SWF_LONG ?
3692 MVPP2_BM_LONG_BUF_NUM :
3693 MVPP2_BM_SHORT_BUF_NUM;
3694 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01003695 mvpp2_bm_bufs_free(port->dev->dev.parent,
3696 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003697
3698 new_pool->pkt_size = pkt_size;
3699
3700 /* Allocate buffers for this pool */
3701 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
3702 if (num != pkts_num) {
3703 WARN(1, "pool %d: %d of %d allocated\n",
3704 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003705 return NULL;
3706 }
3707 }
3708
3709 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3710 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3711
Marcin Wojtas3f518502014-07-10 16:52:13 -03003712 return new_pool;
3713}
3714
3715/* Initialize pools for swf */
3716static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3717{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003718 int rxq;
3719
3720 if (!port->pool_long) {
3721 port->pool_long =
3722 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
3723 MVPP2_BM_SWF_LONG,
3724 port->pkt_size);
3725 if (!port->pool_long)
3726 return -ENOMEM;
3727
Marcin Wojtas3f518502014-07-10 16:52:13 -03003728 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003729
3730 for (rxq = 0; rxq < rxq_number; rxq++)
3731 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
3732 }
3733
3734 if (!port->pool_short) {
3735 port->pool_short =
3736 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
3737 MVPP2_BM_SWF_SHORT,
3738 MVPP2_BM_SHORT_PKT_SIZE);
3739 if (!port->pool_short)
3740 return -ENOMEM;
3741
Marcin Wojtas3f518502014-07-10 16:52:13 -03003742 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003743
3744 for (rxq = 0; rxq < rxq_number; rxq++)
3745 mvpp2_rxq_short_pool_set(port, rxq,
3746 port->pool_short->id);
3747 }
3748
3749 return 0;
3750}
3751
3752static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
3753{
3754 struct mvpp2_port *port = netdev_priv(dev);
3755 struct mvpp2_bm_pool *port_pool = port->pool_long;
3756 int num, pkts_num = port_pool->buf_num;
3757 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
3758
3759 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003760 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03003761 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003762 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
3763 return -EIO;
3764 }
3765
3766 port_pool->pkt_size = pkt_size;
3767 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
3768 if (num != pkts_num) {
3769 WARN(1, "pool %d: %d of %d allocated\n",
3770 port_pool->id, num, pkts_num);
3771 return -EIO;
3772 }
3773
3774 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
3775 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
3776 dev->mtu = mtu;
3777 netdev_update_features(dev);
3778 return 0;
3779}
3780
3781static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
3782{
3783 int cpu, cpu_mask = 0;
3784
3785 for_each_present_cpu(cpu)
3786 cpu_mask |= 1 << cpu;
3787 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3788 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3789}
3790
3791static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
3792{
3793 int cpu, cpu_mask = 0;
3794
3795 for_each_present_cpu(cpu)
3796 cpu_mask |= 1 << cpu;
3797 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
3798 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3799}
3800
3801/* Mask the current CPU's Rx/Tx interrupts */
3802static void mvpp2_interrupts_mask(void *arg)
3803{
3804 struct mvpp2_port *port = arg;
3805
3806 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
3807}
3808
3809/* Unmask the current CPU's Rx/Tx interrupts */
3810static void mvpp2_interrupts_unmask(void *arg)
3811{
3812 struct mvpp2_port *port = arg;
3813
3814 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3815 (MVPP2_CAUSE_MISC_SUM_MASK |
Marcin Wojtas3f518502014-07-10 16:52:13 -03003816 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3817}
3818
3819/* Port configuration routines */
3820
3821static void mvpp2_port_mii_set(struct mvpp2_port *port)
3822{
Marcin Wojtas08a23752014-07-21 13:48:12 -03003823 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003824
Marcin Wojtas08a23752014-07-21 13:48:12 -03003825 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003826
Marcin Wojtas08a23752014-07-21 13:48:12 -03003827 switch (port->phy_interface) {
3828 case PHY_INTERFACE_MODE_SGMII:
3829 val |= MVPP2_GMAC_INBAND_AN_MASK;
3830 break;
3831 case PHY_INTERFACE_MODE_RGMII:
3832 val |= MVPP2_GMAC_PORT_RGMII_MASK;
3833 default:
3834 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3835 }
3836
3837 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3838}
3839
3840static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
3841{
3842 u32 val;
3843
3844 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3845 val |= MVPP2_GMAC_FC_ADV_EN;
3846 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003847}
3848
3849static void mvpp2_port_enable(struct mvpp2_port *port)
3850{
3851 u32 val;
3852
3853 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3854 val |= MVPP2_GMAC_PORT_EN_MASK;
3855 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3856 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3857}
3858
3859static void mvpp2_port_disable(struct mvpp2_port *port)
3860{
3861 u32 val;
3862
3863 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3864 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
3865 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3866}
3867
3868/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
3869static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
3870{
3871 u32 val;
3872
3873 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
3874 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3875 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3876}
3877
3878/* Configure loopback port */
3879static void mvpp2_port_loopback_set(struct mvpp2_port *port)
3880{
3881 u32 val;
3882
3883 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3884
3885 if (port->speed == 1000)
3886 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
3887 else
3888 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
3889
3890 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
3891 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
3892 else
3893 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
3894
3895 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3896}
3897
3898static void mvpp2_port_reset(struct mvpp2_port *port)
3899{
3900 u32 val;
3901
3902 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3903 ~MVPP2_GMAC_PORT_RESET_MASK;
3904 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3905
3906 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3907 MVPP2_GMAC_PORT_RESET_MASK)
3908 continue;
3909}
3910
3911/* Change maximum receive size of the port */
3912static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
3913{
3914 u32 val;
3915
3916 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3917 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3918 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
3919 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
3920 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3921}
3922
3923/* Set defaults to the MVPP2 port */
3924static void mvpp2_defaults_set(struct mvpp2_port *port)
3925{
3926 int tx_port_num, val, queue, ptxq, lrxq;
3927
3928 /* Configure port to loopback if needed */
3929 if (port->flags & MVPP2_F_LOOPBACK)
3930 mvpp2_port_loopback_set(port);
3931
3932 /* Update TX FIFO MIN Threshold */
3933 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3934 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3935 /* Min. TX threshold must be less than minimal packet length */
3936 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3937 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3938
3939 /* Disable Legacy WRR, Disable EJP, Release from reset */
3940 tx_port_num = mvpp2_egress_port(port);
3941 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3942 tx_port_num);
3943 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3944
3945 /* Close bandwidth for all queues */
3946 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3947 ptxq = mvpp2_txq_phys(port->id, queue);
3948 mvpp2_write(port->priv,
3949 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3950 }
3951
3952 /* Set refill period to 1 usec, refill tokens
3953 * and bucket size to maximum
3954 */
3955 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
3956 port->priv->tclk / USEC_PER_SEC);
3957 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3958 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3959 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3960 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3961 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3962 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3963 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3964
3965 /* Set MaximumLowLatencyPacketSize value to 256 */
3966 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3967 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3968 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3969
3970 /* Enable Rx cache snoop */
3971 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3972 queue = port->rxqs[lrxq]->id;
3973 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3974 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3975 MVPP2_SNOOP_BUF_HDR_MASK;
3976 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3977 }
3978
3979 /* At default, mask all interrupts to all present cpus */
3980 mvpp2_interrupts_disable(port);
3981}
3982
3983/* Enable/disable receiving packets */
3984static void mvpp2_ingress_enable(struct mvpp2_port *port)
3985{
3986 u32 val;
3987 int lrxq, queue;
3988
3989 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3990 queue = port->rxqs[lrxq]->id;
3991 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3992 val &= ~MVPP2_RXQ_DISABLE_MASK;
3993 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3994 }
3995}
3996
3997static void mvpp2_ingress_disable(struct mvpp2_port *port)
3998{
3999 u32 val;
4000 int lrxq, queue;
4001
4002 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4003 queue = port->rxqs[lrxq]->id;
4004 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4005 val |= MVPP2_RXQ_DISABLE_MASK;
4006 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4007 }
4008}
4009
4010/* Enable transmit via physical egress queue
4011 * - HW starts take descriptors from DRAM
4012 */
4013static void mvpp2_egress_enable(struct mvpp2_port *port)
4014{
4015 u32 qmap;
4016 int queue;
4017 int tx_port_num = mvpp2_egress_port(port);
4018
4019 /* Enable all initialized TXs. */
4020 qmap = 0;
4021 for (queue = 0; queue < txq_number; queue++) {
4022 struct mvpp2_tx_queue *txq = port->txqs[queue];
4023
4024 if (txq->descs != NULL)
4025 qmap |= (1 << queue);
4026 }
4027
4028 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4029 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4030}
4031
4032/* Disable transmit via physical egress queue
4033 * - HW doesn't take descriptors from DRAM
4034 */
4035static void mvpp2_egress_disable(struct mvpp2_port *port)
4036{
4037 u32 reg_data;
4038 int delay;
4039 int tx_port_num = mvpp2_egress_port(port);
4040
4041 /* Issue stop command for active channels only */
4042 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4043 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4044 MVPP2_TXP_SCHED_ENQ_MASK;
4045 if (reg_data != 0)
4046 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4047 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4048
4049 /* Wait for all Tx activity to terminate. */
4050 delay = 0;
4051 do {
4052 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4053 netdev_warn(port->dev,
4054 "Tx stop timed out, status=0x%08x\n",
4055 reg_data);
4056 break;
4057 }
4058 mdelay(1);
4059 delay++;
4060
4061 /* Check port TX Command register that all
4062 * Tx queues are stopped
4063 */
4064 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4065 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4066}
4067
4068/* Rx descriptors helper methods */
4069
4070/* Get number of Rx descriptors occupied by received packets */
4071static inline int
4072mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4073{
4074 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4075
4076 return val & MVPP2_RXQ_OCCUPIED_MASK;
4077}
4078
4079/* Update Rx queue status with the number of occupied and available
4080 * Rx descriptor slots.
4081 */
4082static inline void
4083mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4084 int used_count, int free_count)
4085{
4086 /* Decrement the number of used descriptors and increment count
4087 * increment the number of free descriptors.
4088 */
4089 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4090
4091 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4092}
4093
4094/* Get pointer to next RX descriptor to be processed by SW */
4095static inline struct mvpp2_rx_desc *
4096mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4097{
4098 int rx_desc = rxq->next_desc_to_proc;
4099
4100 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4101 prefetch(rxq->descs + rxq->next_desc_to_proc);
4102 return rxq->descs + rx_desc;
4103}
4104
4105/* Set rx queue offset */
4106static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4107 int prxq, int offset)
4108{
4109 u32 val;
4110
4111 /* Convert offset from bytes to units of 32 bytes */
4112 offset = offset >> 5;
4113
4114 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4115 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4116
4117 /* Offset is in */
4118 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4119 MVPP2_RXQ_PACKET_OFFSET_MASK);
4120
4121 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4122}
4123
4124/* Obtain BM cookie information from descriptor */
4125static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
4126{
4127 int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
4128 MVPP2_RXD_BM_POOL_ID_OFFS;
4129 int cpu = smp_processor_id();
4130
4131 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4132 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4133}
4134
4135/* Tx descriptors helper methods */
4136
4137/* Get number of Tx descriptors waiting to be transmitted by HW */
4138static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
4139 struct mvpp2_tx_queue *txq)
4140{
4141 u32 val;
4142
4143 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4144 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4145
4146 return val & MVPP2_TXQ_PENDING_MASK;
4147}
4148
4149/* Get pointer to next Tx descriptor to be processed (send) by HW */
4150static struct mvpp2_tx_desc *
4151mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4152{
4153 int tx_desc = txq->next_desc_to_proc;
4154
4155 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4156 return txq->descs + tx_desc;
4157}
4158
4159/* Update HW with number of aggregated Tx descriptors to be sent */
4160static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4161{
4162 /* aggregated access - relevant TXQ number is written in TX desc */
4163 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
4164}
4165
4166
4167/* Check if there are enough free descriptors in aggregated txq.
4168 * If not, update the number of occupied descriptors and repeat the check.
4169 */
4170static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4171 struct mvpp2_tx_queue *aggr_txq, int num)
4172{
4173 if ((aggr_txq->count + num) > aggr_txq->size) {
4174 /* Update number of occupied aggregated Tx descriptors */
4175 int cpu = smp_processor_id();
4176 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4177
4178 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4179 }
4180
4181 if ((aggr_txq->count + num) > aggr_txq->size)
4182 return -ENOMEM;
4183
4184 return 0;
4185}
4186
4187/* Reserved Tx descriptors allocation request */
4188static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4189 struct mvpp2_tx_queue *txq, int num)
4190{
4191 u32 val;
4192
4193 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
4194 mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
4195
4196 val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
4197
4198 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4199}
4200
4201/* Check if there are enough reserved descriptors for transmission.
4202 * If not, request chunk of reserved descriptors and check again.
4203 */
4204static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4205 struct mvpp2_tx_queue *txq,
4206 struct mvpp2_txq_pcpu *txq_pcpu,
4207 int num)
4208{
4209 int req, cpu, desc_count;
4210
4211 if (txq_pcpu->reserved_num >= num)
4212 return 0;
4213
4214 /* Not enough descriptors reserved! Update the reserved descriptor
4215 * count and check again.
4216 */
4217
4218 desc_count = 0;
4219 /* Compute total of used descriptors */
4220 for_each_present_cpu(cpu) {
4221 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4222
4223 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4224 desc_count += txq_pcpu_aux->count;
4225 desc_count += txq_pcpu_aux->reserved_num;
4226 }
4227
4228 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4229 desc_count += req;
4230
4231 if (desc_count >
4232 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4233 return -ENOMEM;
4234
4235 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4236
4237 /* OK, the descriptor cound has been updated: check again. */
4238 if (txq_pcpu->reserved_num < num)
4239 return -ENOMEM;
4240 return 0;
4241}
4242
4243/* Release the last allocated Tx descriptor. Useful to handle DMA
4244 * mapping failures in the Tx path.
4245 */
4246static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4247{
4248 if (txq->next_desc_to_proc == 0)
4249 txq->next_desc_to_proc = txq->last_desc - 1;
4250 else
4251 txq->next_desc_to_proc--;
4252}
4253
4254/* Set Tx descriptors fields relevant for CSUM calculation */
4255static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4256 int ip_hdr_len, int l4_proto)
4257{
4258 u32 command;
4259
4260 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4261 * G_L4_chk, L4_type required only for checksum calculation
4262 */
4263 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4264 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4265 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4266
4267 if (l3_proto == swab16(ETH_P_IP)) {
4268 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4269 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4270 } else {
4271 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4272 }
4273
4274 if (l4_proto == IPPROTO_TCP) {
4275 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4276 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4277 } else if (l4_proto == IPPROTO_UDP) {
4278 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4279 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4280 } else {
4281 command |= MVPP2_TXD_L4_CSUM_NOT;
4282 }
4283
4284 return command;
4285}
4286
4287/* Get number of sent descriptors and decrement counter.
4288 * The number of sent descriptors is returned.
4289 * Per-CPU access
4290 */
4291static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4292 struct mvpp2_tx_queue *txq)
4293{
4294 u32 val;
4295
4296 /* Reading status reg resets transmitted descriptor counter */
4297 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
4298
4299 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4300 MVPP2_TRANSMITTED_COUNT_OFFSET;
4301}
4302
4303static void mvpp2_txq_sent_counter_clear(void *arg)
4304{
4305 struct mvpp2_port *port = arg;
4306 int queue;
4307
4308 for (queue = 0; queue < txq_number; queue++) {
4309 int id = port->txqs[queue]->id;
4310
4311 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
4312 }
4313}
4314
4315/* Set max sizes for Tx queues */
4316static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4317{
4318 u32 val, size, mtu;
4319 int txq, tx_port_num;
4320
4321 mtu = port->pkt_size * 8;
4322 if (mtu > MVPP2_TXP_MTU_MAX)
4323 mtu = MVPP2_TXP_MTU_MAX;
4324
4325 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4326 mtu = 3 * mtu;
4327
4328 /* Indirect access to registers */
4329 tx_port_num = mvpp2_egress_port(port);
4330 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4331
4332 /* Set MTU */
4333 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4334 val &= ~MVPP2_TXP_MTU_MAX;
4335 val |= mtu;
4336 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4337
4338 /* TXP token size and all TXQs token size must be larger that MTU */
4339 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4340 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4341 if (size < mtu) {
4342 size = mtu;
4343 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4344 val |= size;
4345 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4346 }
4347
4348 for (txq = 0; txq < txq_number; txq++) {
4349 val = mvpp2_read(port->priv,
4350 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4351 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4352
4353 if (size < mtu) {
4354 size = mtu;
4355 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4356 val |= size;
4357 mvpp2_write(port->priv,
4358 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4359 val);
4360 }
4361 }
4362}
4363
4364/* Set the number of packets that will be received before Rx interrupt
4365 * will be generated by HW.
4366 */
4367static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004368 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004369{
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01004370 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4371 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004372
Marcin Wojtas3f518502014-07-10 16:52:13 -03004373 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01004374 mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG,
4375 rxq->pkts_coal);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004376}
4377
Thomas Petazzoniab426762017-02-21 11:28:04 +01004378static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4379{
4380 u64 tmp = (u64)clk_hz * usec;
4381
4382 do_div(tmp, USEC_PER_SEC);
4383
4384 return tmp > U32_MAX ? U32_MAX : tmp;
4385}
4386
4387static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4388{
4389 u64 tmp = (u64)cycles * USEC_PER_SEC;
4390
4391 do_div(tmp, clk_hz);
4392
4393 return tmp > U32_MAX ? U32_MAX : tmp;
4394}
4395
Marcin Wojtas3f518502014-07-10 16:52:13 -03004396/* Set the time delay in usec before Rx interrupt */
4397static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004398 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004399{
Thomas Petazzoniab426762017-02-21 11:28:04 +01004400 unsigned long freq = port->priv->tclk;
4401 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004402
Thomas Petazzoniab426762017-02-21 11:28:04 +01004403 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4404 rxq->time_coal =
4405 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4406
4407 /* re-evaluate to get actual register value */
4408 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4409 }
4410
Marcin Wojtas3f518502014-07-10 16:52:13 -03004411 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004412}
4413
Marcin Wojtas3f518502014-07-10 16:52:13 -03004414/* Free Tx queue skbuffs */
4415static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4416 struct mvpp2_tx_queue *txq,
4417 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4418{
4419 int i;
4420
4421 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01004422 struct mvpp2_txq_pcpu_buf *tx_buf =
4423 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004424
Thomas Petazzoni83544912016-12-21 11:28:49 +01004425 dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
4426 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01004427 if (tx_buf->skb)
4428 dev_kfree_skb_any(tx_buf->skb);
4429
4430 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004431 }
4432}
4433
4434static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4435 u32 cause)
4436{
4437 int queue = fls(cause) - 1;
4438
4439 return port->rxqs[queue];
4440}
4441
4442static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4443 u32 cause)
4444{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004445 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004446
4447 return port->txqs[queue];
4448}
4449
4450/* Handle end of transmission */
4451static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4452 struct mvpp2_txq_pcpu *txq_pcpu)
4453{
4454 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4455 int tx_done;
4456
4457 if (txq_pcpu->cpu != smp_processor_id())
4458 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4459
4460 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4461 if (!tx_done)
4462 return;
4463 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4464
4465 txq_pcpu->count -= tx_done;
4466
4467 if (netif_tx_queue_stopped(nq))
4468 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4469 netif_tx_wake_queue(nq);
4470}
4471
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004472static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4473{
4474 struct mvpp2_tx_queue *txq;
4475 struct mvpp2_txq_pcpu *txq_pcpu;
4476 unsigned int tx_todo = 0;
4477
4478 while (cause) {
4479 txq = mvpp2_get_tx_queue(port, cause);
4480 if (!txq)
4481 break;
4482
4483 txq_pcpu = this_cpu_ptr(txq->pcpu);
4484
4485 if (txq_pcpu->count) {
4486 mvpp2_txq_done(port, txq, txq_pcpu);
4487 tx_todo += txq_pcpu->count;
4488 }
4489
4490 cause &= ~(1 << txq->log_id);
4491 }
4492 return tx_todo;
4493}
4494
Marcin Wojtas3f518502014-07-10 16:52:13 -03004495/* Rx/Tx queue initialization/cleanup methods */
4496
4497/* Allocate and initialize descriptors for aggr TXQ */
4498static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4499 struct mvpp2_tx_queue *aggr_txq,
4500 int desc_num, int cpu,
4501 struct mvpp2 *priv)
4502{
4503 /* Allocate memory for TX descriptors */
4504 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4505 desc_num * MVPP2_DESC_ALIGNED_SIZE,
4506 &aggr_txq->descs_phys, GFP_KERNEL);
4507 if (!aggr_txq->descs)
4508 return -ENOMEM;
4509
Marcin Wojtas3f518502014-07-10 16:52:13 -03004510 aggr_txq->last_desc = aggr_txq->size - 1;
4511
4512 /* Aggr TXQ no reset WA */
4513 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4514 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4515
4516 /* Set Tx descriptors queue starting address */
4517 /* indirect access */
4518 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
4519 aggr_txq->descs_phys);
4520 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4521
4522 return 0;
4523}
4524
4525/* Create a specified Rx queue */
4526static int mvpp2_rxq_init(struct mvpp2_port *port,
4527 struct mvpp2_rx_queue *rxq)
4528
4529{
4530 rxq->size = port->rx_ring_size;
4531
4532 /* Allocate memory for RX descriptors */
4533 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4534 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4535 &rxq->descs_phys, GFP_KERNEL);
4536 if (!rxq->descs)
4537 return -ENOMEM;
4538
Marcin Wojtas3f518502014-07-10 16:52:13 -03004539 rxq->last_desc = rxq->size - 1;
4540
4541 /* Zero occupied and non-occupied counters - direct access */
4542 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4543
4544 /* Set Rx descriptors queue starting address - indirect access */
4545 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4546 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
4547 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4548 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4549
4550 /* Set Offset */
4551 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4552
4553 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004554 mvpp2_rx_pkts_coal_set(port, rxq);
4555 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004556
4557 /* Add number of descriptors ready for receiving packets */
4558 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4559
4560 return 0;
4561}
4562
4563/* Push packets received by the RXQ to BM pool */
4564static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4565 struct mvpp2_rx_queue *rxq)
4566{
4567 int rx_received, i;
4568
4569 rx_received = mvpp2_rxq_received(port, rxq->id);
4570 if (!rx_received)
4571 return;
4572
4573 for (i = 0; i < rx_received; i++) {
4574 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4575 u32 bm = mvpp2_bm_cookie_build(rx_desc);
4576
4577 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
4578 rx_desc->buf_cookie);
4579 }
4580 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4581}
4582
4583/* Cleanup Rx queue */
4584static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4585 struct mvpp2_rx_queue *rxq)
4586{
4587 mvpp2_rxq_drop_pkts(port, rxq);
4588
4589 if (rxq->descs)
4590 dma_free_coherent(port->dev->dev.parent,
4591 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4592 rxq->descs,
4593 rxq->descs_phys);
4594
4595 rxq->descs = NULL;
4596 rxq->last_desc = 0;
4597 rxq->next_desc_to_proc = 0;
4598 rxq->descs_phys = 0;
4599
4600 /* Clear Rx descriptors queue starting address and size;
4601 * free descriptor number
4602 */
4603 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4604 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4605 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4606 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4607}
4608
4609/* Create and initialize a Tx queue */
4610static int mvpp2_txq_init(struct mvpp2_port *port,
4611 struct mvpp2_tx_queue *txq)
4612{
4613 u32 val;
4614 int cpu, desc, desc_per_txq, tx_port_num;
4615 struct mvpp2_txq_pcpu *txq_pcpu;
4616
4617 txq->size = port->tx_ring_size;
4618
4619 /* Allocate memory for Tx descriptors */
4620 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4621 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4622 &txq->descs_phys, GFP_KERNEL);
4623 if (!txq->descs)
4624 return -ENOMEM;
4625
Marcin Wojtas3f518502014-07-10 16:52:13 -03004626 txq->last_desc = txq->size - 1;
4627
4628 /* Set Tx descriptors queue starting address - indirect access */
4629 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4630 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
4631 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4632 MVPP2_TXQ_DESC_SIZE_MASK);
4633 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4634 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4635 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4636 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4637 val &= ~MVPP2_TXQ_PENDING_MASK;
4638 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4639
4640 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
4641 * for each existing TXQ.
4642 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4643 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4644 */
4645 desc_per_txq = 16;
4646 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4647 (txq->log_id * desc_per_txq);
4648
4649 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4650 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4651 MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
4652
4653 /* WRR / EJP configuration - indirect access */
4654 tx_port_num = mvpp2_egress_port(port);
4655 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4656
4657 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4658 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4659 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4660 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4661 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4662
4663 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4664 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4665 val);
4666
4667 for_each_present_cpu(cpu) {
4668 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4669 txq_pcpu->size = txq->size;
Thomas Petazzoni83544912016-12-21 11:28:49 +01004670 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
4671 sizeof(struct mvpp2_txq_pcpu_buf),
4672 GFP_KERNEL);
4673 if (!txq_pcpu->buffs)
Marcin Wojtas71ce3912015-08-06 19:00:29 +02004674 goto error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004675
4676 txq_pcpu->count = 0;
4677 txq_pcpu->reserved_num = 0;
4678 txq_pcpu->txq_put_index = 0;
4679 txq_pcpu->txq_get_index = 0;
4680 }
4681
4682 return 0;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02004683
4684error:
4685 for_each_present_cpu(cpu) {
4686 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01004687 kfree(txq_pcpu->buffs);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02004688 }
4689
4690 dma_free_coherent(port->dev->dev.parent,
4691 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4692 txq->descs, txq->descs_phys);
4693
4694 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004695}
4696
4697/* Free allocated TXQ resources */
4698static void mvpp2_txq_deinit(struct mvpp2_port *port,
4699 struct mvpp2_tx_queue *txq)
4700{
4701 struct mvpp2_txq_pcpu *txq_pcpu;
4702 int cpu;
4703
4704 for_each_present_cpu(cpu) {
4705 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01004706 kfree(txq_pcpu->buffs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004707 }
4708
4709 if (txq->descs)
4710 dma_free_coherent(port->dev->dev.parent,
4711 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4712 txq->descs, txq->descs_phys);
4713
4714 txq->descs = NULL;
4715 txq->last_desc = 0;
4716 txq->next_desc_to_proc = 0;
4717 txq->descs_phys = 0;
4718
4719 /* Set minimum bandwidth for disabled TXQs */
4720 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4721
4722 /* Set Tx descriptors queue starting address and size */
4723 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4724 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4725 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4726}
4727
4728/* Cleanup Tx ports */
4729static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4730{
4731 struct mvpp2_txq_pcpu *txq_pcpu;
4732 int delay, pending, cpu;
4733 u32 val;
4734
4735 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4736 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4737 val |= MVPP2_TXQ_DRAIN_EN_MASK;
4738 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4739
4740 /* The napi queue has been stopped so wait for all packets
4741 * to be transmitted.
4742 */
4743 delay = 0;
4744 do {
4745 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4746 netdev_warn(port->dev,
4747 "port %d: cleaning queue %d timed out\n",
4748 port->id, txq->log_id);
4749 break;
4750 }
4751 mdelay(1);
4752 delay++;
4753
4754 pending = mvpp2_txq_pend_desc_num_get(port, txq);
4755 } while (pending);
4756
4757 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4758 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4759
4760 for_each_present_cpu(cpu) {
4761 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4762
4763 /* Release all packets */
4764 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4765
4766 /* Reset queue */
4767 txq_pcpu->count = 0;
4768 txq_pcpu->txq_put_index = 0;
4769 txq_pcpu->txq_get_index = 0;
4770 }
4771}
4772
4773/* Cleanup all Tx queues */
4774static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4775{
4776 struct mvpp2_tx_queue *txq;
4777 int queue;
4778 u32 val;
4779
4780 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4781
4782 /* Reset Tx ports and delete Tx queues */
4783 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4784 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4785
4786 for (queue = 0; queue < txq_number; queue++) {
4787 txq = port->txqs[queue];
4788 mvpp2_txq_clean(port, txq);
4789 mvpp2_txq_deinit(port, txq);
4790 }
4791
4792 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4793
4794 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4795 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4796}
4797
4798/* Cleanup all Rx queues */
4799static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4800{
4801 int queue;
4802
4803 for (queue = 0; queue < rxq_number; queue++)
4804 mvpp2_rxq_deinit(port, port->rxqs[queue]);
4805}
4806
4807/* Init all Rx queues for port */
4808static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4809{
4810 int queue, err;
4811
4812 for (queue = 0; queue < rxq_number; queue++) {
4813 err = mvpp2_rxq_init(port, port->rxqs[queue]);
4814 if (err)
4815 goto err_cleanup;
4816 }
4817 return 0;
4818
4819err_cleanup:
4820 mvpp2_cleanup_rxqs(port);
4821 return err;
4822}
4823
4824/* Init all tx queues for port */
4825static int mvpp2_setup_txqs(struct mvpp2_port *port)
4826{
4827 struct mvpp2_tx_queue *txq;
4828 int queue, err;
4829
4830 for (queue = 0; queue < txq_number; queue++) {
4831 txq = port->txqs[queue];
4832 err = mvpp2_txq_init(port, txq);
4833 if (err)
4834 goto err_cleanup;
4835 }
4836
Marcin Wojtas3f518502014-07-10 16:52:13 -03004837 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4838 return 0;
4839
4840err_cleanup:
4841 mvpp2_cleanup_txqs(port);
4842 return err;
4843}
4844
4845/* The callback for per-port interrupt */
4846static irqreturn_t mvpp2_isr(int irq, void *dev_id)
4847{
4848 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
4849
4850 mvpp2_interrupts_disable(port);
4851
4852 napi_schedule(&port->napi);
4853
4854 return IRQ_HANDLED;
4855}
4856
4857/* Adjust link */
4858static void mvpp2_link_event(struct net_device *dev)
4859{
4860 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02004861 struct phy_device *phydev = dev->phydev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004862 int status_change = 0;
4863 u32 val;
4864
4865 if (phydev->link) {
4866 if ((port->speed != phydev->speed) ||
4867 (port->duplex != phydev->duplex)) {
4868 u32 val;
4869
4870 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4871 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4872 MVPP2_GMAC_CONFIG_GMII_SPEED |
4873 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4874 MVPP2_GMAC_AN_SPEED_EN |
4875 MVPP2_GMAC_AN_DUPLEX_EN);
4876
4877 if (phydev->duplex)
4878 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4879
4880 if (phydev->speed == SPEED_1000)
4881 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni2add5112014-07-27 23:21:35 +02004882 else if (phydev->speed == SPEED_100)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004883 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4884
4885 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4886
4887 port->duplex = phydev->duplex;
4888 port->speed = phydev->speed;
4889 }
4890 }
4891
4892 if (phydev->link != port->link) {
4893 if (!phydev->link) {
4894 port->duplex = -1;
4895 port->speed = 0;
4896 }
4897
4898 port->link = phydev->link;
4899 status_change = 1;
4900 }
4901
4902 if (status_change) {
4903 if (phydev->link) {
4904 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4905 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4906 MVPP2_GMAC_FORCE_LINK_DOWN);
4907 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4908 mvpp2_egress_enable(port);
4909 mvpp2_ingress_enable(port);
4910 } else {
4911 mvpp2_ingress_disable(port);
4912 mvpp2_egress_disable(port);
4913 }
4914 phy_print_status(phydev);
4915 }
4916}
4917
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004918static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4919{
4920 ktime_t interval;
4921
4922 if (!port_pcpu->timer_scheduled) {
4923 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01004924 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004925 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4926 HRTIMER_MODE_REL_PINNED);
4927 }
4928}
4929
4930static void mvpp2_tx_proc_cb(unsigned long data)
4931{
4932 struct net_device *dev = (struct net_device *)data;
4933 struct mvpp2_port *port = netdev_priv(dev);
4934 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4935 unsigned int tx_todo, cause;
4936
4937 if (!netif_running(dev))
4938 return;
4939 port_pcpu->timer_scheduled = false;
4940
4941 /* Process all the Tx queues */
4942 cause = (1 << txq_number) - 1;
4943 tx_todo = mvpp2_tx_done(port, cause);
4944
4945 /* Set the timer in case not all the packets were processed */
4946 if (tx_todo)
4947 mvpp2_timer_set(port_pcpu);
4948}
4949
4950static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4951{
4952 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4953 struct mvpp2_port_pcpu,
4954 tx_done_timer);
4955
4956 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4957
4958 return HRTIMER_NORESTART;
4959}
4960
Marcin Wojtas3f518502014-07-10 16:52:13 -03004961/* Main RX/TX processing routines */
4962
4963/* Display more error info */
4964static void mvpp2_rx_error(struct mvpp2_port *port,
4965 struct mvpp2_rx_desc *rx_desc)
4966{
4967 u32 status = rx_desc->status;
4968
4969 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4970 case MVPP2_RXD_ERR_CRC:
4971 netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
4972 status, rx_desc->data_size);
4973 break;
4974 case MVPP2_RXD_ERR_OVERRUN:
4975 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
4976 status, rx_desc->data_size);
4977 break;
4978 case MVPP2_RXD_ERR_RESOURCE:
4979 netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
4980 status, rx_desc->data_size);
4981 break;
4982 }
4983}
4984
4985/* Handle RX checksum offload */
4986static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
4987 struct sk_buff *skb)
4988{
4989 if (((status & MVPP2_RXD_L3_IP4) &&
4990 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
4991 (status & MVPP2_RXD_L3_IP6))
4992 if (((status & MVPP2_RXD_L4_UDP) ||
4993 (status & MVPP2_RXD_L4_TCP)) &&
4994 (status & MVPP2_RXD_L4_CSUM_OK)) {
4995 skb->csum = 0;
4996 skb->ip_summed = CHECKSUM_UNNECESSARY;
4997 return;
4998 }
4999
5000 skb->ip_summed = CHECKSUM_NONE;
5001}
5002
5003/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5004static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005005 struct mvpp2_bm_pool *bm_pool, u32 bm)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005006{
5007 struct sk_buff *skb;
5008 dma_addr_t phys_addr;
5009
Marcin Wojtas3f518502014-07-10 16:52:13 -03005010 /* No recycle or too many buffers are in use, so allocate a new skb */
5011 skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
5012 if (!skb)
5013 return -ENOMEM;
5014
5015 mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005016
Marcin Wojtas3f518502014-07-10 16:52:13 -03005017 return 0;
5018}
5019
5020/* Handle tx checksum */
5021static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5022{
5023 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5024 int ip_hdr_len = 0;
5025 u8 l4_proto;
5026
5027 if (skb->protocol == htons(ETH_P_IP)) {
5028 struct iphdr *ip4h = ip_hdr(skb);
5029
5030 /* Calculate IPv4 checksum and L4 checksum */
5031 ip_hdr_len = ip4h->ihl;
5032 l4_proto = ip4h->protocol;
5033 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5034 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5035
5036 /* Read l4_protocol from one of IPv6 extra headers */
5037 if (skb_network_header_len(skb) > 0)
5038 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5039 l4_proto = ip6h->nexthdr;
5040 } else {
5041 return MVPP2_TXD_L4_CSUM_NOT;
5042 }
5043
5044 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5045 skb->protocol, ip_hdr_len, l4_proto);
5046 }
5047
5048 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5049}
5050
5051static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
5052 struct mvpp2_rx_desc *rx_desc)
5053{
5054 struct mvpp2_buff_hdr *buff_hdr;
5055 struct sk_buff *skb;
5056 u32 rx_status = rx_desc->status;
5057 u32 buff_phys_addr;
5058 u32 buff_virt_addr;
5059 u32 buff_phys_addr_next;
5060 u32 buff_virt_addr_next;
5061 int mc_id;
5062 int pool_id;
5063
5064 pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5065 MVPP2_RXD_BM_POOL_ID_OFFS;
5066 buff_phys_addr = rx_desc->buf_phys_addr;
5067 buff_virt_addr = rx_desc->buf_cookie;
5068
5069 do {
5070 skb = (struct sk_buff *)buff_virt_addr;
5071 buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
5072
5073 mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
5074
5075 buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
5076 buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
5077
5078 /* Release buffer */
5079 mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
5080 buff_virt_addr, mc_id);
5081
5082 buff_phys_addr = buff_phys_addr_next;
5083 buff_virt_addr = buff_virt_addr_next;
5084
5085 } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
5086}
5087
5088/* Main rx processing */
5089static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5090 struct mvpp2_rx_queue *rxq)
5091{
5092 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005093 int rx_received;
5094 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005095 u32 rcvd_pkts = 0;
5096 u32 rcvd_bytes = 0;
5097
5098 /* Get number of received packets and clamp the to-do */
5099 rx_received = mvpp2_rxq_received(port, rxq->id);
5100 if (rx_todo > rx_received)
5101 rx_todo = rx_received;
5102
Marcin Wojtasb5015852015-12-03 15:20:51 +01005103 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005104 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5105 struct mvpp2_bm_pool *bm_pool;
5106 struct sk_buff *skb;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005107 dma_addr_t phys_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005108 u32 bm, rx_status;
5109 int pool, rx_bytes, err;
5110
Marcin Wojtasb5015852015-12-03 15:20:51 +01005111 rx_done++;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005112 rx_status = rx_desc->status;
5113 rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005114 phys_addr = rx_desc->buf_phys_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005115
5116 bm = mvpp2_bm_cookie_build(rx_desc);
5117 pool = mvpp2_bm_cookie_pool_get(bm);
5118 bm_pool = &port->priv->bm_pools[pool];
5119 /* Check if buffer header is used */
5120 if (rx_status & MVPP2_RXD_BUF_HDR) {
5121 mvpp2_buff_hdr_rx(port, rx_desc);
5122 continue;
5123 }
5124
5125 /* In case of an error, release the requested buffer pointer
5126 * to the Buffer Manager. This request process is controlled
5127 * by the hardware, and the information about the buffer is
5128 * comprised by the RX descriptor.
5129 */
5130 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Marcin Wojtasb5015852015-12-03 15:20:51 +01005131 err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005132 dev->stats.rx_errors++;
5133 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005134 /* Return the buffer to the pool */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005135 mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
5136 rx_desc->buf_cookie);
5137 continue;
5138 }
5139
5140 skb = (struct sk_buff *)rx_desc->buf_cookie;
5141
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005142 err = mvpp2_rx_refill(port, bm_pool, bm);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005143 if (err) {
5144 netdev_err(port->dev, "failed to refill BM pools\n");
5145 goto err_drop_frame;
5146 }
5147
5148 dma_unmap_single(dev->dev.parent, phys_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01005149 bm_pool->buf_size, DMA_FROM_DEVICE);
5150
Marcin Wojtas3f518502014-07-10 16:52:13 -03005151 rcvd_pkts++;
5152 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005153
5154 skb_reserve(skb, MVPP2_MH_SIZE);
5155 skb_put(skb, rx_bytes);
5156 skb->protocol = eth_type_trans(skb, dev);
5157 mvpp2_rx_csum(port, rx_status, skb);
5158
5159 napi_gro_receive(&port->napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005160 }
5161
5162 if (rcvd_pkts) {
5163 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5164
5165 u64_stats_update_begin(&stats->syncp);
5166 stats->rx_packets += rcvd_pkts;
5167 stats->rx_bytes += rcvd_bytes;
5168 u64_stats_update_end(&stats->syncp);
5169 }
5170
5171 /* Update Rx queue management counters */
5172 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01005173 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005174
5175 return rx_todo;
5176}
5177
5178static inline void
5179tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
5180 struct mvpp2_tx_desc *desc)
5181{
5182 dma_unmap_single(dev, desc->buf_phys_addr,
5183 desc->data_size, DMA_TO_DEVICE);
5184 mvpp2_txq_desc_put(txq);
5185}
5186
5187/* Handle tx fragmentation processing */
5188static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5189 struct mvpp2_tx_queue *aggr_txq,
5190 struct mvpp2_tx_queue *txq)
5191{
5192 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5193 struct mvpp2_tx_desc *tx_desc;
5194 int i;
5195 dma_addr_t buf_phys_addr;
5196
5197 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5198 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5199 void *addr = page_address(frag->page.p) + frag->page_offset;
5200
5201 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5202 tx_desc->phys_txq = txq->id;
5203 tx_desc->data_size = frag->size;
5204
5205 buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
5206 tx_desc->data_size,
5207 DMA_TO_DEVICE);
5208 if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
5209 mvpp2_txq_desc_put(txq);
5210 goto error;
5211 }
5212
5213 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5214 tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
5215
5216 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5217 /* Last descriptor */
5218 tx_desc->command = MVPP2_TXD_L_DESC;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005219 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005220 } else {
5221 /* Descriptor in the middle: Not First, Not Last */
5222 tx_desc->command = 0;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005223 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005224 }
5225 }
5226
5227 return 0;
5228
5229error:
5230 /* Release all descriptors that were used to map fragments of
5231 * this packet, as well as the corresponding DMA mappings
5232 */
5233 for (i = i - 1; i >= 0; i--) {
5234 tx_desc = txq->descs + i;
5235 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5236 }
5237
5238 return -ENOMEM;
5239}
5240
5241/* Main tx processing */
5242static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5243{
5244 struct mvpp2_port *port = netdev_priv(dev);
5245 struct mvpp2_tx_queue *txq, *aggr_txq;
5246 struct mvpp2_txq_pcpu *txq_pcpu;
5247 struct mvpp2_tx_desc *tx_desc;
5248 dma_addr_t buf_phys_addr;
5249 int frags = 0;
5250 u16 txq_id;
5251 u32 tx_cmd;
5252
5253 txq_id = skb_get_queue_mapping(skb);
5254 txq = port->txqs[txq_id];
5255 txq_pcpu = this_cpu_ptr(txq->pcpu);
5256 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5257
5258 frags = skb_shinfo(skb)->nr_frags + 1;
5259
5260 /* Check number of available descriptors */
5261 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5262 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5263 txq_pcpu, frags)) {
5264 frags = 0;
5265 goto out;
5266 }
5267
5268 /* Get a descriptor for the first part of the packet */
5269 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5270 tx_desc->phys_txq = txq->id;
5271 tx_desc->data_size = skb_headlen(skb);
5272
5273 buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
5274 tx_desc->data_size, DMA_TO_DEVICE);
5275 if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
5276 mvpp2_txq_desc_put(txq);
5277 frags = 0;
5278 goto out;
5279 }
5280 tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
5281 tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
5282
5283 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5284
5285 if (frags == 1) {
5286 /* First and Last descriptor */
5287 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5288 tx_desc->command = tx_cmd;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005289 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005290 } else {
5291 /* First but not Last */
5292 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5293 tx_desc->command = tx_cmd;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005294 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005295
5296 /* Continue with other skb fragments */
5297 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
5298 tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
5299 frags = 0;
5300 goto out;
5301 }
5302 }
5303
5304 txq_pcpu->reserved_num -= frags;
5305 txq_pcpu->count += frags;
5306 aggr_txq->count += frags;
5307
5308 /* Enable transmit */
5309 wmb();
5310 mvpp2_aggr_txq_pend_desc_add(port, frags);
5311
5312 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5313 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5314
5315 netif_tx_stop_queue(nq);
5316 }
5317out:
5318 if (frags > 0) {
5319 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5320
5321 u64_stats_update_begin(&stats->syncp);
5322 stats->tx_packets++;
5323 stats->tx_bytes += skb->len;
5324 u64_stats_update_end(&stats->syncp);
5325 } else {
5326 dev->stats.tx_dropped++;
5327 dev_kfree_skb_any(skb);
5328 }
5329
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005330 /* Finalize TX processing */
5331 if (txq_pcpu->count >= txq->done_pkts_coal)
5332 mvpp2_txq_done(port, txq, txq_pcpu);
5333
5334 /* Set the timer in case not all frags were processed */
5335 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5336 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5337
5338 mvpp2_timer_set(port_pcpu);
5339 }
5340
Marcin Wojtas3f518502014-07-10 16:52:13 -03005341 return NETDEV_TX_OK;
5342}
5343
5344static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5345{
5346 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5347 netdev_err(dev, "FCS error\n");
5348 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5349 netdev_err(dev, "rx fifo overrun error\n");
5350 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5351 netdev_err(dev, "tx fifo underrun error\n");
5352}
5353
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005354static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005355{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005356 u32 cause_rx_tx, cause_rx, cause_misc;
5357 int rx_done = 0;
5358 struct mvpp2_port *port = netdev_priv(napi->dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005359
5360 /* Rx/Tx cause register
5361 *
5362 * Bits 0-15: each bit indicates received packets on the Rx queue
5363 * (bit 0 is for Rx queue 0).
5364 *
5365 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5366 * (bit 16 is for Tx queue 0).
5367 *
5368 * Each CPU has its own Rx/Tx cause register
5369 */
5370 cause_rx_tx = mvpp2_read(port->priv,
5371 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005372 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005373 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5374
5375 if (cause_misc) {
5376 mvpp2_cause_error(port->dev, cause_misc);
5377
5378 /* Clear the cause register */
5379 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
5380 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5381 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5382 }
5383
Marcin Wojtas3f518502014-07-10 16:52:13 -03005384 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5385
5386 /* Process RX packets */
5387 cause_rx |= port->pending_cause_rx;
5388 while (cause_rx && budget > 0) {
5389 int count;
5390 struct mvpp2_rx_queue *rxq;
5391
5392 rxq = mvpp2_get_rx_queue(port, cause_rx);
5393 if (!rxq)
5394 break;
5395
5396 count = mvpp2_rx(port, budget, rxq);
5397 rx_done += count;
5398 budget -= count;
5399 if (budget > 0) {
5400 /* Clear the bit associated to this Rx queue
5401 * so that next iteration will continue from
5402 * the next Rx queue.
5403 */
5404 cause_rx &= ~(1 << rxq->logic_rxq);
5405 }
5406 }
5407
5408 if (budget > 0) {
5409 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08005410 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005411
5412 mvpp2_interrupts_enable(port);
5413 }
5414 port->pending_cause_rx = cause_rx;
5415 return rx_done;
5416}
5417
5418/* Set hw internals when starting port */
5419static void mvpp2_start_dev(struct mvpp2_port *port)
5420{
Philippe Reynes8e072692016-06-28 00:08:11 +02005421 struct net_device *ndev = port->dev;
5422
Marcin Wojtas3f518502014-07-10 16:52:13 -03005423 mvpp2_gmac_max_rx_size_set(port);
5424 mvpp2_txp_max_tx_size_set(port);
5425
5426 napi_enable(&port->napi);
5427
5428 /* Enable interrupts on all CPUs */
5429 mvpp2_interrupts_enable(port);
5430
5431 mvpp2_port_enable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02005432 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005433 netif_tx_start_all_queues(port->dev);
5434}
5435
5436/* Set hw internals when stopping port */
5437static void mvpp2_stop_dev(struct mvpp2_port *port)
5438{
Philippe Reynes8e072692016-06-28 00:08:11 +02005439 struct net_device *ndev = port->dev;
5440
Marcin Wojtas3f518502014-07-10 16:52:13 -03005441 /* Stop new packets from arriving to RXQs */
5442 mvpp2_ingress_disable(port);
5443
5444 mdelay(10);
5445
5446 /* Disable interrupts on all CPUs */
5447 mvpp2_interrupts_disable(port);
5448
5449 napi_disable(&port->napi);
5450
5451 netif_carrier_off(port->dev);
5452 netif_tx_stop_all_queues(port->dev);
5453
5454 mvpp2_egress_disable(port);
5455 mvpp2_port_disable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02005456 phy_stop(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005457}
5458
Marcin Wojtas3f518502014-07-10 16:52:13 -03005459static int mvpp2_check_ringparam_valid(struct net_device *dev,
5460 struct ethtool_ringparam *ring)
5461{
5462 u16 new_rx_pending = ring->rx_pending;
5463 u16 new_tx_pending = ring->tx_pending;
5464
5465 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5466 return -EINVAL;
5467
5468 if (ring->rx_pending > MVPP2_MAX_RXD)
5469 new_rx_pending = MVPP2_MAX_RXD;
5470 else if (!IS_ALIGNED(ring->rx_pending, 16))
5471 new_rx_pending = ALIGN(ring->rx_pending, 16);
5472
5473 if (ring->tx_pending > MVPP2_MAX_TXD)
5474 new_tx_pending = MVPP2_MAX_TXD;
5475 else if (!IS_ALIGNED(ring->tx_pending, 32))
5476 new_tx_pending = ALIGN(ring->tx_pending, 32);
5477
5478 if (ring->rx_pending != new_rx_pending) {
5479 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5480 ring->rx_pending, new_rx_pending);
5481 ring->rx_pending = new_rx_pending;
5482 }
5483
5484 if (ring->tx_pending != new_tx_pending) {
5485 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5486 ring->tx_pending, new_tx_pending);
5487 ring->tx_pending = new_tx_pending;
5488 }
5489
5490 return 0;
5491}
5492
5493static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
5494{
5495 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5496
5497 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5498 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5499 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5500 addr[0] = (mac_addr_h >> 24) & 0xFF;
5501 addr[1] = (mac_addr_h >> 16) & 0xFF;
5502 addr[2] = (mac_addr_h >> 8) & 0xFF;
5503 addr[3] = mac_addr_h & 0xFF;
5504 addr[4] = mac_addr_m & 0xFF;
5505 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5506}
5507
5508static int mvpp2_phy_connect(struct mvpp2_port *port)
5509{
5510 struct phy_device *phy_dev;
5511
5512 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5513 port->phy_interface);
5514 if (!phy_dev) {
5515 netdev_err(port->dev, "cannot connect to phy\n");
5516 return -ENODEV;
5517 }
5518 phy_dev->supported &= PHY_GBIT_FEATURES;
5519 phy_dev->advertising = phy_dev->supported;
5520
Marcin Wojtas3f518502014-07-10 16:52:13 -03005521 port->link = 0;
5522 port->duplex = 0;
5523 port->speed = 0;
5524
5525 return 0;
5526}
5527
5528static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5529{
Philippe Reynes8e072692016-06-28 00:08:11 +02005530 struct net_device *ndev = port->dev;
5531
5532 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005533}
5534
5535static int mvpp2_open(struct net_device *dev)
5536{
5537 struct mvpp2_port *port = netdev_priv(dev);
5538 unsigned char mac_bcast[ETH_ALEN] = {
5539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5540 int err;
5541
5542 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5543 if (err) {
5544 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5545 return err;
5546 }
5547 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5548 dev->dev_addr, true);
5549 if (err) {
5550 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5551 return err;
5552 }
5553 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5554 if (err) {
5555 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5556 return err;
5557 }
5558 err = mvpp2_prs_def_flow(port);
5559 if (err) {
5560 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5561 return err;
5562 }
5563
5564 /* Allocate the Rx/Tx queues */
5565 err = mvpp2_setup_rxqs(port);
5566 if (err) {
5567 netdev_err(port->dev, "cannot allocate Rx queues\n");
5568 return err;
5569 }
5570
5571 err = mvpp2_setup_txqs(port);
5572 if (err) {
5573 netdev_err(port->dev, "cannot allocate Tx queues\n");
5574 goto err_cleanup_rxqs;
5575 }
5576
5577 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5578 if (err) {
5579 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5580 goto err_cleanup_txqs;
5581 }
5582
5583 /* In default link is down */
5584 netif_carrier_off(port->dev);
5585
5586 err = mvpp2_phy_connect(port);
5587 if (err < 0)
5588 goto err_free_irq;
5589
5590 /* Unmask interrupts on all CPUs */
5591 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5592
5593 mvpp2_start_dev(port);
5594
5595 return 0;
5596
5597err_free_irq:
5598 free_irq(port->irq, port);
5599err_cleanup_txqs:
5600 mvpp2_cleanup_txqs(port);
5601err_cleanup_rxqs:
5602 mvpp2_cleanup_rxqs(port);
5603 return err;
5604}
5605
5606static int mvpp2_stop(struct net_device *dev)
5607{
5608 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005609 struct mvpp2_port_pcpu *port_pcpu;
5610 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005611
5612 mvpp2_stop_dev(port);
5613 mvpp2_phy_disconnect(port);
5614
5615 /* Mask interrupts on all CPUs */
5616 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5617
5618 free_irq(port->irq, port);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005619 for_each_present_cpu(cpu) {
5620 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5621
5622 hrtimer_cancel(&port_pcpu->tx_done_timer);
5623 port_pcpu->timer_scheduled = false;
5624 tasklet_kill(&port_pcpu->tx_done_tasklet);
5625 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005626 mvpp2_cleanup_rxqs(port);
5627 mvpp2_cleanup_txqs(port);
5628
5629 return 0;
5630}
5631
5632static void mvpp2_set_rx_mode(struct net_device *dev)
5633{
5634 struct mvpp2_port *port = netdev_priv(dev);
5635 struct mvpp2 *priv = port->priv;
5636 struct netdev_hw_addr *ha;
5637 int id = port->id;
5638 bool allmulti = dev->flags & IFF_ALLMULTI;
5639
5640 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
5641 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
5642 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
5643
5644 /* Remove all port->id's mcast enries */
5645 mvpp2_prs_mcast_del_all(priv, id);
5646
5647 if (allmulti && !netdev_mc_empty(dev)) {
5648 netdev_for_each_mc_addr(ha, dev)
5649 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
5650 }
5651}
5652
5653static int mvpp2_set_mac_address(struct net_device *dev, void *p)
5654{
5655 struct mvpp2_port *port = netdev_priv(dev);
5656 const struct sockaddr *addr = p;
5657 int err;
5658
5659 if (!is_valid_ether_addr(addr->sa_data)) {
5660 err = -EADDRNOTAVAIL;
5661 goto error;
5662 }
5663
5664 if (!netif_running(dev)) {
5665 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5666 if (!err)
5667 return 0;
5668 /* Reconfigure parser to accept the original MAC address */
5669 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5670 if (err)
5671 goto error;
5672 }
5673
5674 mvpp2_stop_dev(port);
5675
5676 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
5677 if (!err)
5678 goto out_start;
5679
5680 /* Reconfigure parser accept the original MAC address */
5681 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
5682 if (err)
5683 goto error;
5684out_start:
5685 mvpp2_start_dev(port);
5686 mvpp2_egress_enable(port);
5687 mvpp2_ingress_enable(port);
5688 return 0;
5689
5690error:
5691 netdev_err(dev, "fail to change MAC address\n");
5692 return err;
5693}
5694
5695static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5696{
5697 struct mvpp2_port *port = netdev_priv(dev);
5698 int err;
5699
Jarod Wilson57779872016-10-17 15:54:06 -04005700 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5701 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5702 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5703 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005704 }
5705
5706 if (!netif_running(dev)) {
5707 err = mvpp2_bm_update_mtu(dev, mtu);
5708 if (!err) {
5709 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5710 return 0;
5711 }
5712
5713 /* Reconfigure BM to the original MTU */
5714 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5715 if (err)
5716 goto error;
5717 }
5718
5719 mvpp2_stop_dev(port);
5720
5721 err = mvpp2_bm_update_mtu(dev, mtu);
5722 if (!err) {
5723 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
5724 goto out_start;
5725 }
5726
5727 /* Reconfigure BM to the original MTU */
5728 err = mvpp2_bm_update_mtu(dev, dev->mtu);
5729 if (err)
5730 goto error;
5731
5732out_start:
5733 mvpp2_start_dev(port);
5734 mvpp2_egress_enable(port);
5735 mvpp2_ingress_enable(port);
5736
5737 return 0;
5738
5739error:
5740 netdev_err(dev, "fail to change MTU\n");
5741 return err;
5742}
5743
stephen hemmingerbc1f4472017-01-06 19:12:52 -08005744static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03005745mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5746{
5747 struct mvpp2_port *port = netdev_priv(dev);
5748 unsigned int start;
5749 int cpu;
5750
5751 for_each_possible_cpu(cpu) {
5752 struct mvpp2_pcpu_stats *cpu_stats;
5753 u64 rx_packets;
5754 u64 rx_bytes;
5755 u64 tx_packets;
5756 u64 tx_bytes;
5757
5758 cpu_stats = per_cpu_ptr(port->stats, cpu);
5759 do {
5760 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5761 rx_packets = cpu_stats->rx_packets;
5762 rx_bytes = cpu_stats->rx_bytes;
5763 tx_packets = cpu_stats->tx_packets;
5764 tx_bytes = cpu_stats->tx_bytes;
5765 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5766
5767 stats->rx_packets += rx_packets;
5768 stats->rx_bytes += rx_bytes;
5769 stats->tx_packets += tx_packets;
5770 stats->tx_bytes += tx_bytes;
5771 }
5772
5773 stats->rx_errors = dev->stats.rx_errors;
5774 stats->rx_dropped = dev->stats.rx_dropped;
5775 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005776}
5777
Thomas Petazzonibd695a52014-07-27 23:21:36 +02005778static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5779{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02005780 int ret;
5781
Philippe Reynes8e072692016-06-28 00:08:11 +02005782 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02005783 return -ENOTSUPP;
5784
Philippe Reynes8e072692016-06-28 00:08:11 +02005785 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02005786 if (!ret)
5787 mvpp2_link_event(dev);
5788
5789 return ret;
5790}
5791
Marcin Wojtas3f518502014-07-10 16:52:13 -03005792/* Ethtool methods */
5793
Marcin Wojtas3f518502014-07-10 16:52:13 -03005794/* Set interrupt coalescing for ethtools */
5795static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5796 struct ethtool_coalesce *c)
5797{
5798 struct mvpp2_port *port = netdev_priv(dev);
5799 int queue;
5800
5801 for (queue = 0; queue < rxq_number; queue++) {
5802 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5803
5804 rxq->time_coal = c->rx_coalesce_usecs;
5805 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005806 mvpp2_rx_pkts_coal_set(port, rxq);
5807 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005808 }
5809
5810 for (queue = 0; queue < txq_number; queue++) {
5811 struct mvpp2_tx_queue *txq = port->txqs[queue];
5812
5813 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5814 }
5815
Marcin Wojtas3f518502014-07-10 16:52:13 -03005816 return 0;
5817}
5818
5819/* get coalescing for ethtools */
5820static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5821 struct ethtool_coalesce *c)
5822{
5823 struct mvpp2_port *port = netdev_priv(dev);
5824
5825 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
5826 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5827 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5828 return 0;
5829}
5830
5831static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5832 struct ethtool_drvinfo *drvinfo)
5833{
5834 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5835 sizeof(drvinfo->driver));
5836 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5837 sizeof(drvinfo->version));
5838 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5839 sizeof(drvinfo->bus_info));
5840}
5841
5842static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5843 struct ethtool_ringparam *ring)
5844{
5845 struct mvpp2_port *port = netdev_priv(dev);
5846
5847 ring->rx_max_pending = MVPP2_MAX_RXD;
5848 ring->tx_max_pending = MVPP2_MAX_TXD;
5849 ring->rx_pending = port->rx_ring_size;
5850 ring->tx_pending = port->tx_ring_size;
5851}
5852
5853static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5854 struct ethtool_ringparam *ring)
5855{
5856 struct mvpp2_port *port = netdev_priv(dev);
5857 u16 prev_rx_ring_size = port->rx_ring_size;
5858 u16 prev_tx_ring_size = port->tx_ring_size;
5859 int err;
5860
5861 err = mvpp2_check_ringparam_valid(dev, ring);
5862 if (err)
5863 return err;
5864
5865 if (!netif_running(dev)) {
5866 port->rx_ring_size = ring->rx_pending;
5867 port->tx_ring_size = ring->tx_pending;
5868 return 0;
5869 }
5870
5871 /* The interface is running, so we have to force a
5872 * reallocation of the queues
5873 */
5874 mvpp2_stop_dev(port);
5875 mvpp2_cleanup_rxqs(port);
5876 mvpp2_cleanup_txqs(port);
5877
5878 port->rx_ring_size = ring->rx_pending;
5879 port->tx_ring_size = ring->tx_pending;
5880
5881 err = mvpp2_setup_rxqs(port);
5882 if (err) {
5883 /* Reallocate Rx queues with the original ring size */
5884 port->rx_ring_size = prev_rx_ring_size;
5885 ring->rx_pending = prev_rx_ring_size;
5886 err = mvpp2_setup_rxqs(port);
5887 if (err)
5888 goto err_out;
5889 }
5890 err = mvpp2_setup_txqs(port);
5891 if (err) {
5892 /* Reallocate Tx queues with the original ring size */
5893 port->tx_ring_size = prev_tx_ring_size;
5894 ring->tx_pending = prev_tx_ring_size;
5895 err = mvpp2_setup_txqs(port);
5896 if (err)
5897 goto err_clean_rxqs;
5898 }
5899
5900 mvpp2_start_dev(port);
5901 mvpp2_egress_enable(port);
5902 mvpp2_ingress_enable(port);
5903
5904 return 0;
5905
5906err_clean_rxqs:
5907 mvpp2_cleanup_rxqs(port);
5908err_out:
5909 netdev_err(dev, "fail to change ring parameters");
5910 return err;
5911}
5912
5913/* Device ops */
5914
5915static const struct net_device_ops mvpp2_netdev_ops = {
5916 .ndo_open = mvpp2_open,
5917 .ndo_stop = mvpp2_stop,
5918 .ndo_start_xmit = mvpp2_tx,
5919 .ndo_set_rx_mode = mvpp2_set_rx_mode,
5920 .ndo_set_mac_address = mvpp2_set_mac_address,
5921 .ndo_change_mtu = mvpp2_change_mtu,
5922 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02005923 .ndo_do_ioctl = mvpp2_ioctl,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005924};
5925
5926static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08005927 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005928 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005929 .set_coalesce = mvpp2_ethtool_set_coalesce,
5930 .get_coalesce = mvpp2_ethtool_get_coalesce,
5931 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
5932 .get_ringparam = mvpp2_ethtool_get_ringparam,
5933 .set_ringparam = mvpp2_ethtool_set_ringparam,
Philippe Reynesfb773e92016-06-28 00:08:12 +02005934 .get_link_ksettings = phy_ethtool_get_link_ksettings,
5935 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005936};
5937
5938/* Driver initialization */
5939
5940static void mvpp2_port_power_up(struct mvpp2_port *port)
5941{
5942 mvpp2_port_mii_set(port);
5943 mvpp2_port_periodic_xon_disable(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03005944 mvpp2_port_fc_adv_enable(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005945 mvpp2_port_reset(port);
5946}
5947
5948/* Initialize port HW */
5949static int mvpp2_port_init(struct mvpp2_port *port)
5950{
5951 struct device *dev = port->dev->dev.parent;
5952 struct mvpp2 *priv = port->priv;
5953 struct mvpp2_txq_pcpu *txq_pcpu;
5954 int queue, cpu, err;
5955
5956 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
5957 return -EINVAL;
5958
5959 /* Disable port */
5960 mvpp2_egress_disable(port);
5961 mvpp2_port_disable(port);
5962
5963 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
5964 GFP_KERNEL);
5965 if (!port->txqs)
5966 return -ENOMEM;
5967
5968 /* Associate physical Tx queues to this port and initialize.
5969 * The mapping is predefined.
5970 */
5971 for (queue = 0; queue < txq_number; queue++) {
5972 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5973 struct mvpp2_tx_queue *txq;
5974
5975 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01005976 if (!txq) {
5977 err = -ENOMEM;
5978 goto err_free_percpu;
5979 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005980
5981 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5982 if (!txq->pcpu) {
5983 err = -ENOMEM;
5984 goto err_free_percpu;
5985 }
5986
5987 txq->id = queue_phy_id;
5988 txq->log_id = queue;
5989 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5990 for_each_present_cpu(cpu) {
5991 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5992 txq_pcpu->cpu = cpu;
5993 }
5994
5995 port->txqs[queue] = txq;
5996 }
5997
5998 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
5999 GFP_KERNEL);
6000 if (!port->rxqs) {
6001 err = -ENOMEM;
6002 goto err_free_percpu;
6003 }
6004
6005 /* Allocate and initialize Rx queue for this port */
6006 for (queue = 0; queue < rxq_number; queue++) {
6007 struct mvpp2_rx_queue *rxq;
6008
6009 /* Map physical Rx queue to port's logical Rx queue */
6010 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006011 if (!rxq) {
6012 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006013 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006014 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006015 /* Map this Rx queue to a physical queue */
6016 rxq->id = port->first_rxq + queue;
6017 rxq->port = port->id;
6018 rxq->logic_rxq = queue;
6019
6020 port->rxqs[queue] = rxq;
6021 }
6022
6023 /* Configure Rx queue group interrupt for this port */
6024 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6025
6026 /* Create Rx descriptor rings */
6027 for (queue = 0; queue < rxq_number; queue++) {
6028 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6029
6030 rxq->size = port->rx_ring_size;
6031 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6032 rxq->time_coal = MVPP2_RX_COAL_USEC;
6033 }
6034
6035 mvpp2_ingress_disable(port);
6036
6037 /* Port default configuration */
6038 mvpp2_defaults_set(port);
6039
6040 /* Port's classifier configuration */
6041 mvpp2_cls_oversize_rxq_set(port);
6042 mvpp2_cls_port_config(port);
6043
6044 /* Provide an initial Rx packet size */
6045 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6046
6047 /* Initialize pools for swf */
6048 err = mvpp2_swf_bm_pool_init(port);
6049 if (err)
6050 goto err_free_percpu;
6051
6052 return 0;
6053
6054err_free_percpu:
6055 for (queue = 0; queue < txq_number; queue++) {
6056 if (!port->txqs[queue])
6057 continue;
6058 free_percpu(port->txqs[queue]->pcpu);
6059 }
6060 return err;
6061}
6062
6063/* Ports initialization */
6064static int mvpp2_port_probe(struct platform_device *pdev,
6065 struct device_node *port_node,
6066 struct mvpp2 *priv,
6067 int *next_first_rxq)
6068{
6069 struct device_node *phy_node;
6070 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006071 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006072 struct net_device *dev;
6073 struct resource *res;
6074 const char *dt_mac_addr;
6075 const char *mac_from;
6076 char hw_mac_addr[ETH_ALEN];
6077 u32 id;
6078 int features;
6079 int phy_mode;
6080 int priv_common_regs_num = 2;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006081 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006082
6083 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6084 rxq_number);
6085 if (!dev)
6086 return -ENOMEM;
6087
6088 phy_node = of_parse_phandle(port_node, "phy", 0);
6089 if (!phy_node) {
6090 dev_err(&pdev->dev, "missing phy\n");
6091 err = -ENODEV;
6092 goto err_free_netdev;
6093 }
6094
6095 phy_mode = of_get_phy_mode(port_node);
6096 if (phy_mode < 0) {
6097 dev_err(&pdev->dev, "incorrect phy mode\n");
6098 err = phy_mode;
6099 goto err_free_netdev;
6100 }
6101
6102 if (of_property_read_u32(port_node, "port-id", &id)) {
6103 err = -EINVAL;
6104 dev_err(&pdev->dev, "missing port-id value\n");
6105 goto err_free_netdev;
6106 }
6107
6108 dev->tx_queue_len = MVPP2_MAX_TXD;
6109 dev->watchdog_timeo = 5 * HZ;
6110 dev->netdev_ops = &mvpp2_netdev_ops;
6111 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6112
6113 port = netdev_priv(dev);
6114
6115 port->irq = irq_of_parse_and_map(port_node, 0);
6116 if (port->irq <= 0) {
6117 err = -EINVAL;
6118 goto err_free_netdev;
6119 }
6120
6121 if (of_property_read_bool(port_node, "marvell,loopback"))
6122 port->flags |= MVPP2_F_LOOPBACK;
6123
6124 port->priv = priv;
6125 port->id = id;
6126 port->first_rxq = *next_first_rxq;
6127 port->phy_node = phy_node;
6128 port->phy_interface = phy_mode;
6129
6130 res = platform_get_resource(pdev, IORESOURCE_MEM,
6131 priv_common_regs_num + id);
6132 port->base = devm_ioremap_resource(&pdev->dev, res);
6133 if (IS_ERR(port->base)) {
6134 err = PTR_ERR(port->base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006135 goto err_free_irq;
6136 }
6137
6138 /* Alloc per-cpu stats */
6139 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6140 if (!port->stats) {
6141 err = -ENOMEM;
6142 goto err_free_irq;
6143 }
6144
6145 dt_mac_addr = of_get_mac_address(port_node);
6146 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6147 mac_from = "device tree";
6148 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6149 } else {
6150 mvpp2_get_mac_address(port, hw_mac_addr);
6151 if (is_valid_ether_addr(hw_mac_addr)) {
6152 mac_from = "hardware";
6153 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6154 } else {
6155 mac_from = "random";
6156 eth_hw_addr_random(dev);
6157 }
6158 }
6159
6160 port->tx_ring_size = MVPP2_MAX_TXD;
6161 port->rx_ring_size = MVPP2_MAX_RXD;
6162 port->dev = dev;
6163 SET_NETDEV_DEV(dev, &pdev->dev);
6164
6165 err = mvpp2_port_init(port);
6166 if (err < 0) {
6167 dev_err(&pdev->dev, "failed to init port %d\n", id);
6168 goto err_free_stats;
6169 }
6170 mvpp2_port_power_up(port);
6171
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006172 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6173 if (!port->pcpu) {
6174 err = -ENOMEM;
6175 goto err_free_txq_pcpu;
6176 }
6177
6178 for_each_present_cpu(cpu) {
6179 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6180
6181 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6182 HRTIMER_MODE_REL_PINNED);
6183 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6184 port_pcpu->timer_scheduled = false;
6185
6186 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6187 (unsigned long)dev);
6188 }
6189
Marcin Wojtas3f518502014-07-10 16:52:13 -03006190 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6191 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6192 dev->features = features | NETIF_F_RXCSUM;
6193 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6194 dev->vlan_features |= features;
6195
Jarod Wilson57779872016-10-17 15:54:06 -04006196 /* MTU range: 68 - 9676 */
6197 dev->min_mtu = ETH_MIN_MTU;
6198 /* 9676 == 9700 - 20 and rounding to 8 */
6199 dev->max_mtu = 9676;
6200
Marcin Wojtas3f518502014-07-10 16:52:13 -03006201 err = register_netdev(dev);
6202 if (err < 0) {
6203 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006204 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006205 }
6206 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6207
6208 /* Increment the first Rx queue number to be used by the next port */
6209 *next_first_rxq += rxq_number;
6210 priv->port_list[id] = port;
6211 return 0;
6212
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006213err_free_port_pcpu:
6214 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006215err_free_txq_pcpu:
6216 for (i = 0; i < txq_number; i++)
6217 free_percpu(port->txqs[i]->pcpu);
6218err_free_stats:
6219 free_percpu(port->stats);
6220err_free_irq:
6221 irq_dispose_mapping(port->irq);
6222err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08006223 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006224 free_netdev(dev);
6225 return err;
6226}
6227
6228/* Ports removal routine */
6229static void mvpp2_port_remove(struct mvpp2_port *port)
6230{
6231 int i;
6232
6233 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08006234 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006235 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006236 free_percpu(port->stats);
6237 for (i = 0; i < txq_number; i++)
6238 free_percpu(port->txqs[i]->pcpu);
6239 irq_dispose_mapping(port->irq);
6240 free_netdev(port->dev);
6241}
6242
6243/* Initialize decoding windows */
6244static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6245 struct mvpp2 *priv)
6246{
6247 u32 win_enable;
6248 int i;
6249
6250 for (i = 0; i < 6; i++) {
6251 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6252 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6253
6254 if (i < 4)
6255 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6256 }
6257
6258 win_enable = 0;
6259
6260 for (i = 0; i < dram->num_cs; i++) {
6261 const struct mbus_dram_window *cs = dram->cs + i;
6262
6263 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6264 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6265 dram->mbus_dram_target_id);
6266
6267 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6268 (cs->size - 1) & 0xffff0000);
6269
6270 win_enable |= (1 << i);
6271 }
6272
6273 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6274}
6275
6276/* Initialize Rx FIFO's */
6277static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6278{
6279 int port;
6280
6281 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6282 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6283 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6284 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6285 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6286 }
6287
6288 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6289 MVPP2_RX_FIFO_PORT_MIN_PKT);
6290 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6291}
6292
6293/* Initialize network controller common part HW */
6294static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6295{
6296 const struct mbus_dram_target_info *dram_target_info;
6297 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03006298 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006299
6300 /* Checks for hardware constraints */
6301 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6302 (txq_number > MVPP2_MAX_TXQ)) {
6303 dev_err(&pdev->dev, "invalid queue size parameter\n");
6304 return -EINVAL;
6305 }
6306
6307 /* MBUS windows configuration */
6308 dram_target_info = mv_mbus_dram_info();
6309 if (dram_target_info)
6310 mvpp2_conf_mbus_windows(dram_target_info, priv);
6311
Marcin Wojtas08a23752014-07-21 13:48:12 -03006312 /* Disable HW PHY polling */
6313 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6314 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6315 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6316
Marcin Wojtas3f518502014-07-10 16:52:13 -03006317 /* Allocate and initialize aggregated TXQs */
6318 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6319 sizeof(struct mvpp2_tx_queue),
6320 GFP_KERNEL);
6321 if (!priv->aggr_txqs)
6322 return -ENOMEM;
6323
6324 for_each_present_cpu(i) {
6325 priv->aggr_txqs[i].id = i;
6326 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6327 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6328 MVPP2_AGGR_TXQ_SIZE, i, priv);
6329 if (err < 0)
6330 return err;
6331 }
6332
6333 /* Rx Fifo Init */
6334 mvpp2_rx_fifo_init(priv);
6335
6336 /* Reset Rx queue group interrupt configuration */
6337 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6338 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6339
6340 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6341 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6342
6343 /* Allow cache snoop when transmiting packets */
6344 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6345
6346 /* Buffer Manager initialization */
6347 err = mvpp2_bm_init(pdev, priv);
6348 if (err < 0)
6349 return err;
6350
6351 /* Parser default initialization */
6352 err = mvpp2_prs_default_init(pdev, priv);
6353 if (err < 0)
6354 return err;
6355
6356 /* Classifier default initialization */
6357 mvpp2_cls_init(priv);
6358
6359 return 0;
6360}
6361
6362static int mvpp2_probe(struct platform_device *pdev)
6363{
6364 struct device_node *dn = pdev->dev.of_node;
6365 struct device_node *port_node;
6366 struct mvpp2 *priv;
6367 struct resource *res;
6368 int port_count, first_rxq;
6369 int err;
6370
6371 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6372 if (!priv)
6373 return -ENOMEM;
6374
6375 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6376 priv->base = devm_ioremap_resource(&pdev->dev, res);
6377 if (IS_ERR(priv->base))
6378 return PTR_ERR(priv->base);
6379
6380 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6381 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6382 if (IS_ERR(priv->lms_base))
6383 return PTR_ERR(priv->lms_base);
6384
6385 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6386 if (IS_ERR(priv->pp_clk))
6387 return PTR_ERR(priv->pp_clk);
6388 err = clk_prepare_enable(priv->pp_clk);
6389 if (err < 0)
6390 return err;
6391
6392 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6393 if (IS_ERR(priv->gop_clk)) {
6394 err = PTR_ERR(priv->gop_clk);
6395 goto err_pp_clk;
6396 }
6397 err = clk_prepare_enable(priv->gop_clk);
6398 if (err < 0)
6399 goto err_pp_clk;
6400
6401 /* Get system's tclk rate */
6402 priv->tclk = clk_get_rate(priv->pp_clk);
6403
6404 /* Initialize network controller */
6405 err = mvpp2_init(pdev, priv);
6406 if (err < 0) {
6407 dev_err(&pdev->dev, "failed to initialize controller\n");
6408 goto err_gop_clk;
6409 }
6410
6411 port_count = of_get_available_child_count(dn);
6412 if (port_count == 0) {
6413 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08006414 err = -ENODEV;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006415 goto err_gop_clk;
6416 }
6417
6418 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6419 sizeof(struct mvpp2_port *),
6420 GFP_KERNEL);
6421 if (!priv->port_list) {
6422 err = -ENOMEM;
6423 goto err_gop_clk;
6424 }
6425
6426 /* Initialize ports */
6427 first_rxq = 0;
6428 for_each_available_child_of_node(dn, port_node) {
6429 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6430 if (err < 0)
6431 goto err_gop_clk;
6432 }
6433
6434 platform_set_drvdata(pdev, priv);
6435 return 0;
6436
6437err_gop_clk:
6438 clk_disable_unprepare(priv->gop_clk);
6439err_pp_clk:
6440 clk_disable_unprepare(priv->pp_clk);
6441 return err;
6442}
6443
6444static int mvpp2_remove(struct platform_device *pdev)
6445{
6446 struct mvpp2 *priv = platform_get_drvdata(pdev);
6447 struct device_node *dn = pdev->dev.of_node;
6448 struct device_node *port_node;
6449 int i = 0;
6450
6451 for_each_available_child_of_node(dn, port_node) {
6452 if (priv->port_list[i])
6453 mvpp2_port_remove(priv->port_list[i]);
6454 i++;
6455 }
6456
6457 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6458 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6459
6460 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6461 }
6462
6463 for_each_present_cpu(i) {
6464 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6465
6466 dma_free_coherent(&pdev->dev,
6467 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6468 aggr_txq->descs,
6469 aggr_txq->descs_phys);
6470 }
6471
6472 clk_disable_unprepare(priv->pp_clk);
6473 clk_disable_unprepare(priv->gop_clk);
6474
6475 return 0;
6476}
6477
6478static const struct of_device_id mvpp2_match[] = {
6479 { .compatible = "marvell,armada-375-pp2" },
6480 { }
6481};
6482MODULE_DEVICE_TABLE(of, mvpp2_match);
6483
6484static struct platform_driver mvpp2_driver = {
6485 .probe = mvpp2_probe,
6486 .remove = mvpp2_remove,
6487 .driver = {
6488 .name = MVPP2_DRIVER_NAME,
6489 .of_match_table = mvpp2_match,
6490 },
6491};
6492
6493module_platform_driver(mvpp2_driver);
6494
6495MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6496MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03006497MODULE_LICENSE("GPL v2");