blob: bd7dc4b6eb2dd00823aec5dfcb7c87657e6ec7be [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010028#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030029#include <linux/phy.h>
30#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020031#include <linux/hrtimer.h>
32#include <linux/ktime.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <uapi/linux/ppp_defs.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36
37/* RX Fifo Registers */
38#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41#define MVPP2_RX_FIFO_INIT_REG 0x64
42
43/* RX DMA Top Registers */
44#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48#define MVPP2_POOL_BUF_SIZE_OFFSET 5
49#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010053#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030055#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010056#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030058#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60#define MVPP2_RXQ_DISABLE_MASK BIT(31)
61
62/* Parser Registers */
63#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64#define MVPP2_PRS_PORT_LU_MAX 0xf
65#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73#define MVPP2_PRS_TCAM_IDX_REG 0x1100
74#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76#define MVPP2_PRS_SRAM_IDX_REG 0x1200
77#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
80
81/* Classifier Registers */
82#define MVPP2_CLS_MODE_REG 0x1800
83#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84#define MVPP2_CLS_PORT_WAY_REG 0x1810
85#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86#define MVPP2_CLS_LKP_INDEX_REG 0x1814
87#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88#define MVPP2_CLS_LKP_TBL_REG 0x1818
89#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
101
102/* Descriptor Manager Top Registers */
103#define MVPP2_RXQ_NUM_REG 0x2040
104#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100105#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300106#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110#define MVPP2_RXQ_NUM_NEW_OFFSET 16
111#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115#define MVPP2_RXQ_THRESH_REG 0x204c
116#define MVPP2_OCCUPIED_THRESH_OFFSET 0
117#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118#define MVPP2_RXQ_INDEX_REG 0x2050
119#define MVPP2_TXQ_NUM_REG 0x2080
120#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
123#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300124#define MVPP2_TXQ_INDEX_REG 0x2098
125#define MVPP2_TXQ_PREF_BUF_REG 0x209c
126#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
127#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
128#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
129#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
130#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
131#define MVPP2_TXQ_PENDING_REG 0x20a0
132#define MVPP2_TXQ_PENDING_MASK 0x3fff
133#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
134#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
135#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
136#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
137#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
138#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
139#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
140#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
141#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
142#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
143#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100144#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300145#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
146#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
147#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
148#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
149#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
150
151/* MBUS bridge registers */
152#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
153#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
154#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
155#define MVPP2_BASE_ADDR_ENABLE 0x4060
156
157/* Interrupt Cause and Mask registers */
158#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100159#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300160#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq))
161#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
162#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
163#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
164#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
165#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
166#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
167#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
168#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
169#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
170#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
171#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
172#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
173#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
174#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
175#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
176#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
177#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
178#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
179
180/* Buffer Manager registers */
181#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
182#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
183#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
184#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
185#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
186#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
187#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
188#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
189#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
190#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
191#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
192#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
193#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
194#define MVPP2_BM_START_MASK BIT(0)
195#define MVPP2_BM_STOP_MASK BIT(1)
196#define MVPP2_BM_STATE_MASK BIT(4)
197#define MVPP2_BM_LOW_THRESH_OFFS 8
198#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
199#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
200 MVPP2_BM_LOW_THRESH_OFFS)
201#define MVPP2_BM_HIGH_THRESH_OFFS 16
202#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
203#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
204 MVPP2_BM_HIGH_THRESH_OFFS)
205#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
206#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
207#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
208#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
209#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
210#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
211#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
212#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
213#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
214#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100215#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
216#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
217#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
218#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300219#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
220#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
221#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
222#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
223#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100224#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
225#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
226#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
227#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300228
229/* TX Scheduler registers */
230#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
231#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
232#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
233#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
234#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
235#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
236#define MVPP2_TXP_SCHED_MTU_REG 0x801c
237#define MVPP2_TXP_MTU_MAX 0x7FFFF
238#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
239#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
240#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
241#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
242#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
243#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
244#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
245#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
246#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
247#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
248#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
249#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
250#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
251#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
252
253/* TX general registers */
254#define MVPP2_TX_SNOOP_REG 0x8800
255#define MVPP2_TX_PORT_FLUSH_REG 0x8810
256#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
257
258/* LMS registers */
259#define MVPP2_SRC_ADDR_MIDDLE 0x24
260#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300261#define MVPP2_PHY_AN_CFG0_REG 0x34
262#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300263#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100264#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300265
266/* Per-port registers */
267#define MVPP2_GMAC_CTRL_0_REG 0x0
268#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
269#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
270#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
271#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
272#define MVPP2_GMAC_CTRL_1_REG 0x4
Marcin Wojtasb5c0a802014-07-21 13:48:11 -0300273#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300274#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
275#define MVPP2_GMAC_PCS_LB_EN_BIT 6
276#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
277#define MVPP2_GMAC_SA_LOW_OFFS 7
278#define MVPP2_GMAC_CTRL_2_REG 0x8
279#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
280#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
281#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
282#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
283#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
284#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
285#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
286#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
287#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
288#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
Marcin Wojtas08a23752014-07-21 13:48:12 -0300289#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300290#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
291#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
292#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
293#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
294#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
295#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
296 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100297#define MVPP22_GMAC_CTRL_4_REG 0x90
298#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
299#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
300#define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
301#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
302
303/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
304 * relative to port->base.
305 */
306#define MVPP22_XLG_CTRL3_REG 0x11c
307#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
308#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
309
310/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
311#define MVPP22_SMI_MISC_CFG_REG 0x1204
312#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300313
Thomas Petazzonia7868412017-03-07 16:53:13 +0100314#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
315
Marcin Wojtas3f518502014-07-10 16:52:13 -0300316#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
317
318/* Descriptor ring Macros */
319#define MVPP2_QUEUE_NEXT_DESC(q, index) \
320 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
321
322/* Various constants */
323
324/* Coalescing */
325#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200326#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Marcin Wojtas3f518502014-07-10 16:52:13 -0300327#define MVPP2_RX_COAL_PKTS 32
328#define MVPP2_RX_COAL_USEC 100
329
330/* The two bytes Marvell header. Either contains a special value used
331 * by Marvell switches when a specific hardware mode is enabled (not
332 * supported by this driver) or is filled automatically by zeroes on
333 * the RX side. Those two bytes being at the front of the Ethernet
334 * header, they allow to have the IP header aligned on a 4 bytes
335 * boundary automatically: the hardware skips those two bytes on its
336 * own.
337 */
338#define MVPP2_MH_SIZE 2
339#define MVPP2_ETH_TYPE_LEN 2
340#define MVPP2_PPPOE_HDR_SIZE 8
341#define MVPP2_VLAN_TAG_LEN 4
342
343/* Lbtd 802.3 type */
344#define MVPP2_IP_LBDT_TYPE 0xfffa
345
Marcin Wojtas3f518502014-07-10 16:52:13 -0300346#define MVPP2_TX_CSUM_MAX_SIZE 9800
347
348/* Timeout constants */
349#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
350#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
351
352#define MVPP2_TX_MTU_MAX 0x7ffff
353
354/* Maximum number of T-CONTs of PON port */
355#define MVPP2_MAX_TCONT 16
356
357/* Maximum number of supported ports */
358#define MVPP2_MAX_PORTS 4
359
360/* Maximum number of TXQs used by single port */
361#define MVPP2_MAX_TXQ 8
362
363/* Maximum number of RXQs used by single port */
364#define MVPP2_MAX_RXQ 8
365
366/* Dfault number of RXQs in use */
367#define MVPP2_DEFAULT_RXQ 4
368
369/* Total number of RXQs available to all ports */
370#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
371
372/* Max number of Rx descriptors */
373#define MVPP2_MAX_RXD 128
374
375/* Max number of Tx descriptors */
376#define MVPP2_MAX_TXD 1024
377
378/* Amount of Tx descriptors that can be reserved at once by CPU */
379#define MVPP2_CPU_DESC_CHUNK 64
380
381/* Max number of Tx descriptors in each aggregated queue */
382#define MVPP2_AGGR_TXQ_SIZE 256
383
384/* Descriptor aligned size */
385#define MVPP2_DESC_ALIGNED_SIZE 32
386
387/* Descriptor alignment mask */
388#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
389
390/* RX FIFO constants */
391#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
392#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
393#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
394
395/* RX buffer constants */
396#define MVPP2_SKB_SHINFO_SIZE \
397 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
398
399#define MVPP2_RX_PKT_SIZE(mtu) \
400 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800401 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300402
403#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
404#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
405#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
406 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
407
408#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
409
410/* IPv6 max L3 address size */
411#define MVPP2_MAX_L3_ADDR_SIZE 16
412
413/* Port flags */
414#define MVPP2_F_LOOPBACK BIT(0)
415
416/* Marvell tag types */
417enum mvpp2_tag_type {
418 MVPP2_TAG_TYPE_NONE = 0,
419 MVPP2_TAG_TYPE_MH = 1,
420 MVPP2_TAG_TYPE_DSA = 2,
421 MVPP2_TAG_TYPE_EDSA = 3,
422 MVPP2_TAG_TYPE_VLAN = 4,
423 MVPP2_TAG_TYPE_LAST = 5
424};
425
426/* Parser constants */
427#define MVPP2_PRS_TCAM_SRAM_SIZE 256
428#define MVPP2_PRS_TCAM_WORDS 6
429#define MVPP2_PRS_SRAM_WORDS 4
430#define MVPP2_PRS_FLOW_ID_SIZE 64
431#define MVPP2_PRS_FLOW_ID_MASK 0x3f
432#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
433#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
434#define MVPP2_PRS_IPV4_HEAD 0x40
435#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
436#define MVPP2_PRS_IPV4_MC 0xe0
437#define MVPP2_PRS_IPV4_MC_MASK 0xf0
438#define MVPP2_PRS_IPV4_BC_MASK 0xff
439#define MVPP2_PRS_IPV4_IHL 0x5
440#define MVPP2_PRS_IPV4_IHL_MASK 0xf
441#define MVPP2_PRS_IPV6_MC 0xff
442#define MVPP2_PRS_IPV6_MC_MASK 0xff
443#define MVPP2_PRS_IPV6_HOP_MASK 0xff
444#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
445#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
446#define MVPP2_PRS_DBL_VLANS_MAX 100
447
448/* Tcam structure:
449 * - lookup ID - 4 bits
450 * - port ID - 1 byte
451 * - additional information - 1 byte
452 * - header data - 8 bytes
453 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
454 */
455#define MVPP2_PRS_AI_BITS 8
456#define MVPP2_PRS_PORT_MASK 0xff
457#define MVPP2_PRS_LU_MASK 0xf
458#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
459 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
460#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
461 (((offs) * 2) - ((offs) % 2) + 2)
462#define MVPP2_PRS_TCAM_AI_BYTE 16
463#define MVPP2_PRS_TCAM_PORT_BYTE 17
464#define MVPP2_PRS_TCAM_LU_BYTE 20
465#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
466#define MVPP2_PRS_TCAM_INV_WORD 5
467/* Tcam entries ID */
468#define MVPP2_PE_DROP_ALL 0
469#define MVPP2_PE_FIRST_FREE_TID 1
470#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
471#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
472#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
473#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
474#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
475#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
476#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
477#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
478#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
479#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
480#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
481#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
482#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
483#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
484#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
485#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
486#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
487#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
488#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
489#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
490#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
491#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
492#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
493#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
494#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
495
496/* Sram structure
497 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
498 */
499#define MVPP2_PRS_SRAM_RI_OFFS 0
500#define MVPP2_PRS_SRAM_RI_WORD 0
501#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
502#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
503#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
504#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
505#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
506#define MVPP2_PRS_SRAM_UDF_OFFS 73
507#define MVPP2_PRS_SRAM_UDF_BITS 8
508#define MVPP2_PRS_SRAM_UDF_MASK 0xff
509#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
510#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
511#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
512#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
513#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
514#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
515#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
516#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
517#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
518#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
519#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
520#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
521#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
522#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
523#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
524#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
525#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
526#define MVPP2_PRS_SRAM_AI_OFFS 90
527#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
528#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
529#define MVPP2_PRS_SRAM_AI_MASK 0xff
530#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
531#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
532#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
533#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
534
535/* Sram result info bits assignment */
536#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
537#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100538#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
539#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300540#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
541#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
542#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
543#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
544#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100545#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
546#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300547#define MVPP2_PRS_RI_L2_MCAST BIT(9)
548#define MVPP2_PRS_RI_L2_BCAST BIT(10)
549#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100550#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
551#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300552#define MVPP2_PRS_RI_L3_IP4 BIT(12)
553#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
554#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
555#define MVPP2_PRS_RI_L3_IP6 BIT(14)
556#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
557#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100558#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
559#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300560#define MVPP2_PRS_RI_L3_MCAST BIT(15)
561#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
562#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
563#define MVPP2_PRS_RI_UDF3_MASK 0x300000
564#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
565#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
566#define MVPP2_PRS_RI_L4_TCP BIT(22)
567#define MVPP2_PRS_RI_L4_UDP BIT(23)
568#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
569#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
570#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
571#define MVPP2_PRS_RI_DROP_MASK 0x80000000
572
573/* Sram additional info bits assignment */
574#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
575#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
576#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
577#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
578#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
579#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
580#define MVPP2_PRS_SINGLE_VLAN_AI 0
581#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
582
583/* DSA/EDSA type */
584#define MVPP2_PRS_TAGGED true
585#define MVPP2_PRS_UNTAGGED false
586#define MVPP2_PRS_EDSA true
587#define MVPP2_PRS_DSA false
588
589/* MAC entries, shadow udf */
590enum mvpp2_prs_udf {
591 MVPP2_PRS_UDF_MAC_DEF,
592 MVPP2_PRS_UDF_MAC_RANGE,
593 MVPP2_PRS_UDF_L2_DEF,
594 MVPP2_PRS_UDF_L2_DEF_COPY,
595 MVPP2_PRS_UDF_L2_USER,
596};
597
598/* Lookup ID */
599enum mvpp2_prs_lookup {
600 MVPP2_PRS_LU_MH,
601 MVPP2_PRS_LU_MAC,
602 MVPP2_PRS_LU_DSA,
603 MVPP2_PRS_LU_VLAN,
604 MVPP2_PRS_LU_L2,
605 MVPP2_PRS_LU_PPPOE,
606 MVPP2_PRS_LU_IP4,
607 MVPP2_PRS_LU_IP6,
608 MVPP2_PRS_LU_FLOWS,
609 MVPP2_PRS_LU_LAST,
610};
611
612/* L3 cast enum */
613enum mvpp2_prs_l3_cast {
614 MVPP2_PRS_L3_UNI_CAST,
615 MVPP2_PRS_L3_MULTI_CAST,
616 MVPP2_PRS_L3_BROAD_CAST
617};
618
619/* Classifier constants */
620#define MVPP2_CLS_FLOWS_TBL_SIZE 512
621#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
622#define MVPP2_CLS_LKP_TBL_SIZE 64
623
624/* BM constants */
625#define MVPP2_BM_POOLS_NUM 8
626#define MVPP2_BM_LONG_BUF_NUM 1024
627#define MVPP2_BM_SHORT_BUF_NUM 2048
628#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
629#define MVPP2_BM_POOL_PTR_ALIGN 128
630#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
631#define MVPP2_BM_SWF_SHORT_POOL 3
632
633/* BM cookie (32 bits) definition */
634#define MVPP2_BM_COOKIE_POOL_OFFS 8
635#define MVPP2_BM_COOKIE_CPU_OFFS 24
636
637/* BM short pool packet size
638 * These value assure that for SWF the total number
639 * of bytes allocated for each buffer will be 512
640 */
641#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
642
Thomas Petazzonia7868412017-03-07 16:53:13 +0100643#define MVPP21_ADDR_SPACE_SZ 0
644#define MVPP22_ADDR_SPACE_SZ SZ_64K
645
646#define MVPP2_MAX_CPUS 4
647
Marcin Wojtas3f518502014-07-10 16:52:13 -0300648enum mvpp2_bm_type {
649 MVPP2_BM_FREE,
650 MVPP2_BM_SWF_LONG,
651 MVPP2_BM_SWF_SHORT
652};
653
654/* Definitions */
655
656/* Shared Packet Processor resources */
657struct mvpp2 {
658 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300659 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100660 void __iomem *iface_base;
661
662 /* On PPv2.2, each CPU can access the base register through a
663 * separate address space, each 64 KB apart from each
664 * other.
665 */
666 void __iomem *cpu_base[MVPP2_MAX_CPUS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300667
668 /* Common clocks */
669 struct clk *pp_clk;
670 struct clk *gop_clk;
671
672 /* List of pointers to port structures */
673 struct mvpp2_port **port_list;
674
675 /* Aggregated TXQs */
676 struct mvpp2_tx_queue *aggr_txqs;
677
678 /* BM pools */
679 struct mvpp2_bm_pool *bm_pools;
680
681 /* PRS shadow table */
682 struct mvpp2_prs_shadow *prs_shadow;
683 /* PRS auxiliary table for double vlan entries control */
684 bool *prs_double_vlans;
685
686 /* Tclk value */
687 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100688
689 /* HW version */
690 enum { MVPP21, MVPP22 } hw_version;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300691};
692
693struct mvpp2_pcpu_stats {
694 struct u64_stats_sync syncp;
695 u64 rx_packets;
696 u64 rx_bytes;
697 u64 tx_packets;
698 u64 tx_bytes;
699};
700
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200701/* Per-CPU port control */
702struct mvpp2_port_pcpu {
703 struct hrtimer tx_done_timer;
704 bool timer_scheduled;
705 /* Tasklet for egress finalization */
706 struct tasklet_struct tx_done_tasklet;
707};
708
Marcin Wojtas3f518502014-07-10 16:52:13 -0300709struct mvpp2_port {
710 u8 id;
711
Thomas Petazzonia7868412017-03-07 16:53:13 +0100712 /* Index of the port from the "group of ports" complex point
713 * of view
714 */
715 int gop_id;
716
Marcin Wojtas3f518502014-07-10 16:52:13 -0300717 int irq;
718
719 struct mvpp2 *priv;
720
721 /* Per-port registers' base address */
722 void __iomem *base;
723
724 struct mvpp2_rx_queue **rxqs;
725 struct mvpp2_tx_queue **txqs;
726 struct net_device *dev;
727
728 int pkt_size;
729
730 u32 pending_cause_rx;
731 struct napi_struct napi;
732
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200733 /* Per-CPU port control */
734 struct mvpp2_port_pcpu __percpu *pcpu;
735
Marcin Wojtas3f518502014-07-10 16:52:13 -0300736 /* Flags */
737 unsigned long flags;
738
739 u16 tx_ring_size;
740 u16 rx_ring_size;
741 struct mvpp2_pcpu_stats __percpu *stats;
742
Marcin Wojtas3f518502014-07-10 16:52:13 -0300743 phy_interface_t phy_interface;
744 struct device_node *phy_node;
745 unsigned int link;
746 unsigned int duplex;
747 unsigned int speed;
748
749 struct mvpp2_bm_pool *pool_long;
750 struct mvpp2_bm_pool *pool_short;
751
752 /* Index of first port's physical RXQ */
753 u8 first_rxq;
754};
755
756/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
757 * layout of the transmit and reception DMA descriptors, and their
758 * layout is therefore defined by the hardware design
759 */
760
761#define MVPP2_TXD_L3_OFF_SHIFT 0
762#define MVPP2_TXD_IP_HLEN_SHIFT 8
763#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
764#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
765#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
766#define MVPP2_TXD_PADDING_DISABLE BIT(23)
767#define MVPP2_TXD_L4_UDP BIT(24)
768#define MVPP2_TXD_L3_IP6 BIT(26)
769#define MVPP2_TXD_L_DESC BIT(28)
770#define MVPP2_TXD_F_DESC BIT(29)
771
772#define MVPP2_RXD_ERR_SUMMARY BIT(15)
773#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
774#define MVPP2_RXD_ERR_CRC 0x0
775#define MVPP2_RXD_ERR_OVERRUN BIT(13)
776#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
777#define MVPP2_RXD_BM_POOL_ID_OFFS 16
778#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
779#define MVPP2_RXD_HWF_SYNC BIT(21)
780#define MVPP2_RXD_L4_CSUM_OK BIT(22)
781#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
782#define MVPP2_RXD_L4_TCP BIT(25)
783#define MVPP2_RXD_L4_UDP BIT(26)
784#define MVPP2_RXD_L3_IP4 BIT(28)
785#define MVPP2_RXD_L3_IP6 BIT(30)
786#define MVPP2_RXD_BUF_HDR BIT(31)
787
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100788/* HW TX descriptor for PPv2.1 */
789struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300790 u32 command; /* Options used by HW for packet transmitting.*/
791 u8 packet_offset; /* the offset from the buffer beginning */
792 u8 phys_txq; /* destination queue ID */
793 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100794 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300795 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
796 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
797 u32 reserved2; /* reserved (for future use) */
798};
799
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100800/* HW RX descriptor for PPv2.1 */
801struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300802 u32 status; /* info about received packet */
803 u16 reserved1; /* parser_info (for future use, PnC) */
804 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100805 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300806 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
807 u16 reserved2; /* gem_port_id (for future use, PON) */
808 u16 reserved3; /* csum_l4 (for future use, PnC) */
809 u8 reserved4; /* bm_qset (for future use, BM) */
810 u8 reserved5;
811 u16 reserved6; /* classify_info (for future use, PnC) */
812 u32 reserved7; /* flow_id (for future use, PnC) */
813 u32 reserved8;
814};
815
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100816/* HW TX descriptor for PPv2.2 */
817struct mvpp22_tx_desc {
818 u32 command;
819 u8 packet_offset;
820 u8 phys_txq;
821 u16 data_size;
822 u64 reserved1;
823 u64 buf_dma_addr_ptp;
824 u64 buf_cookie_misc;
825};
826
827/* HW RX descriptor for PPv2.2 */
828struct mvpp22_rx_desc {
829 u32 status;
830 u16 reserved1;
831 u16 data_size;
832 u32 reserved2;
833 u32 reserved3;
834 u64 buf_dma_addr_key_hash;
835 u64 buf_cookie_misc;
836};
837
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100838/* Opaque type used by the driver to manipulate the HW TX and RX
839 * descriptors
840 */
841struct mvpp2_tx_desc {
842 union {
843 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100844 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100845 };
846};
847
848struct mvpp2_rx_desc {
849 union {
850 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100851 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100852 };
853};
854
Thomas Petazzoni83544912016-12-21 11:28:49 +0100855struct mvpp2_txq_pcpu_buf {
856 /* Transmitted SKB */
857 struct sk_buff *skb;
858
859 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100860 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +0100861
862 /* Size transmitted */
863 size_t size;
864};
865
Marcin Wojtas3f518502014-07-10 16:52:13 -0300866/* Per-CPU Tx queue control */
867struct mvpp2_txq_pcpu {
868 int cpu;
869
870 /* Number of Tx DMA descriptors in the descriptor ring */
871 int size;
872
873 /* Number of currently used Tx DMA descriptor in the
874 * descriptor ring
875 */
876 int count;
877
878 /* Number of Tx DMA descriptors reserved for each CPU */
879 int reserved_num;
880
Thomas Petazzoni83544912016-12-21 11:28:49 +0100881 /* Infos about transmitted buffers */
882 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +0200883
Marcin Wojtas3f518502014-07-10 16:52:13 -0300884 /* Index of last TX DMA descriptor that was inserted */
885 int txq_put_index;
886
887 /* Index of the TX DMA descriptor to be cleaned up */
888 int txq_get_index;
889};
890
891struct mvpp2_tx_queue {
892 /* Physical number of this Tx queue */
893 u8 id;
894
895 /* Logical number of this Tx queue */
896 u8 log_id;
897
898 /* Number of Tx DMA descriptors in the descriptor ring */
899 int size;
900
901 /* Number of currently used Tx DMA descriptor in the descriptor ring */
902 int count;
903
904 /* Per-CPU control of physical Tx queues */
905 struct mvpp2_txq_pcpu __percpu *pcpu;
906
Marcin Wojtas3f518502014-07-10 16:52:13 -0300907 u32 done_pkts_coal;
908
909 /* Virtual address of thex Tx DMA descriptors array */
910 struct mvpp2_tx_desc *descs;
911
912 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100913 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300914
915 /* Index of the last Tx DMA descriptor */
916 int last_desc;
917
918 /* Index of the next Tx DMA descriptor to process */
919 int next_desc_to_proc;
920};
921
922struct mvpp2_rx_queue {
923 /* RX queue number, in the range 0-31 for physical RXQs */
924 u8 id;
925
926 /* Num of rx descriptors in the rx descriptor ring */
927 int size;
928
929 u32 pkts_coal;
930 u32 time_coal;
931
932 /* Virtual address of the RX DMA descriptors array */
933 struct mvpp2_rx_desc *descs;
934
935 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100936 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300937
938 /* Index of the last RX DMA descriptor */
939 int last_desc;
940
941 /* Index of the next RX DMA descriptor to process */
942 int next_desc_to_proc;
943
944 /* ID of port to which physical RXQ is mapped */
945 int port;
946
947 /* Port's logic RXQ number to which physical RXQ is mapped */
948 int logic_rxq;
949};
950
951union mvpp2_prs_tcam_entry {
952 u32 word[MVPP2_PRS_TCAM_WORDS];
953 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
954};
955
956union mvpp2_prs_sram_entry {
957 u32 word[MVPP2_PRS_SRAM_WORDS];
958 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
959};
960
961struct mvpp2_prs_entry {
962 u32 index;
963 union mvpp2_prs_tcam_entry tcam;
964 union mvpp2_prs_sram_entry sram;
965};
966
967struct mvpp2_prs_shadow {
968 bool valid;
969 bool finish;
970
971 /* Lookup ID */
972 int lu;
973
974 /* User defined offset */
975 int udf;
976
977 /* Result info */
978 u32 ri;
979 u32 ri_mask;
980};
981
982struct mvpp2_cls_flow_entry {
983 u32 index;
984 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
985};
986
987struct mvpp2_cls_lookup_entry {
988 u32 lkpid;
989 u32 way;
990 u32 data;
991};
992
993struct mvpp2_bm_pool {
994 /* Pool number in the range 0-7 */
995 int id;
996 enum mvpp2_bm_type type;
997
998 /* Buffer Pointers Pool External (BPPE) size */
999 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001000 /* BPPE size in bytes */
1001 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001002 /* Number of buffers for this pool */
1003 int buf_num;
1004 /* Pool buffer size */
1005 int buf_size;
1006 /* Packet size */
1007 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001008 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001009
1010 /* BPPE virtual base address */
1011 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001012 /* BPPE DMA base address */
1013 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001014
1015 /* Ports using BM pool */
1016 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001017};
1018
Marcin Wojtas3f518502014-07-10 16:52:13 -03001019/* Static declaractions */
1020
1021/* Number of RXQs used by single port */
1022static int rxq_number = MVPP2_DEFAULT_RXQ;
1023/* Number of TXQs used by single port */
1024static int txq_number = MVPP2_MAX_TXQ;
1025
1026#define MVPP2_DRIVER_NAME "mvpp2"
1027#define MVPP2_DRIVER_VERSION "1.0"
1028
1029/* Utility/helper methods */
1030
1031static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1032{
Thomas Petazzonia7868412017-03-07 16:53:13 +01001033 writel(data, priv->cpu_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001034}
1035
1036static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1037{
Thomas Petazzonia7868412017-03-07 16:53:13 +01001038 return readl(priv->cpu_base[0] + offset);
1039}
1040
1041/* These accessors should be used to access:
1042 *
1043 * - per-CPU registers, where each CPU has its own copy of the
1044 * register.
1045 *
1046 * MVPP2_BM_VIRT_ALLOC_REG
1047 * MVPP2_BM_ADDR_HIGH_ALLOC
1048 * MVPP22_BM_ADDR_HIGH_RLS_REG
1049 * MVPP2_BM_VIRT_RLS_REG
1050 * MVPP2_ISR_RX_TX_CAUSE_REG
1051 * MVPP2_ISR_RX_TX_MASK_REG
1052 * MVPP2_TXQ_NUM_REG
1053 * MVPP2_AGGR_TXQ_UPDATE_REG
1054 * MVPP2_TXQ_RSVD_REQ_REG
1055 * MVPP2_TXQ_RSVD_RSLT_REG
1056 * MVPP2_TXQ_SENT_REG
1057 * MVPP2_RXQ_NUM_REG
1058 *
1059 * - global registers that must be accessed through a specific CPU
1060 * window, because they are related to an access to a per-CPU
1061 * register
1062 *
1063 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1064 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1065 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1066 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1067 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1068 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1069 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1070 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1071 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1072 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1073 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1074 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1075 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1076 */
1077static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1078 u32 offset, u32 data)
1079{
1080 writel(data, priv->cpu_base[cpu] + offset);
1081}
1082
1083static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1084 u32 offset)
1085{
1086 return readl(priv->cpu_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001087}
1088
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001089static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1090 struct mvpp2_tx_desc *tx_desc)
1091{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001092 if (port->priv->hw_version == MVPP21)
1093 return tx_desc->pp21.buf_dma_addr;
1094 else
1095 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001096}
1097
1098static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1099 struct mvpp2_tx_desc *tx_desc,
1100 dma_addr_t dma_addr)
1101{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001102 if (port->priv->hw_version == MVPP21) {
1103 tx_desc->pp21.buf_dma_addr = dma_addr;
1104 } else {
1105 u64 val = (u64)dma_addr;
1106
1107 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1108 tx_desc->pp22.buf_dma_addr_ptp |= val;
1109 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001110}
1111
1112static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1113 struct mvpp2_tx_desc *tx_desc)
1114{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001115 if (port->priv->hw_version == MVPP21)
1116 return tx_desc->pp21.data_size;
1117 else
1118 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001119}
1120
1121static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1122 struct mvpp2_tx_desc *tx_desc,
1123 size_t size)
1124{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001125 if (port->priv->hw_version == MVPP21)
1126 tx_desc->pp21.data_size = size;
1127 else
1128 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001129}
1130
1131static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1132 struct mvpp2_tx_desc *tx_desc,
1133 unsigned int txq)
1134{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001135 if (port->priv->hw_version == MVPP21)
1136 tx_desc->pp21.phys_txq = txq;
1137 else
1138 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001139}
1140
1141static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1142 struct mvpp2_tx_desc *tx_desc,
1143 unsigned int command)
1144{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001145 if (port->priv->hw_version == MVPP21)
1146 tx_desc->pp21.command = command;
1147 else
1148 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001149}
1150
1151static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1152 struct mvpp2_tx_desc *tx_desc,
1153 unsigned int offset)
1154{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001155 if (port->priv->hw_version == MVPP21)
1156 tx_desc->pp21.packet_offset = offset;
1157 else
1158 tx_desc->pp22.packet_offset = offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001159}
1160
1161static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1162 struct mvpp2_tx_desc *tx_desc)
1163{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001164 if (port->priv->hw_version == MVPP21)
1165 return tx_desc->pp21.packet_offset;
1166 else
1167 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001168}
1169
1170static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1171 struct mvpp2_rx_desc *rx_desc)
1172{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001173 if (port->priv->hw_version == MVPP21)
1174 return rx_desc->pp21.buf_dma_addr;
1175 else
1176 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001177}
1178
1179static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1180 struct mvpp2_rx_desc *rx_desc)
1181{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001182 if (port->priv->hw_version == MVPP21)
1183 return rx_desc->pp21.buf_cookie;
1184 else
1185 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001186}
1187
1188static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1189 struct mvpp2_rx_desc *rx_desc)
1190{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001191 if (port->priv->hw_version == MVPP21)
1192 return rx_desc->pp21.data_size;
1193 else
1194 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001195}
1196
1197static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1198 struct mvpp2_rx_desc *rx_desc)
1199{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001200 if (port->priv->hw_version == MVPP21)
1201 return rx_desc->pp21.status;
1202 else
1203 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001204}
1205
Marcin Wojtas3f518502014-07-10 16:52:13 -03001206static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1207{
1208 txq_pcpu->txq_get_index++;
1209 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1210 txq_pcpu->txq_get_index = 0;
1211}
1212
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001213static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1214 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001215 struct sk_buff *skb,
1216 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001217{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001218 struct mvpp2_txq_pcpu_buf *tx_buf =
1219 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1220 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001221 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1222 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1223 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001224 txq_pcpu->txq_put_index++;
1225 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1226 txq_pcpu->txq_put_index = 0;
1227}
1228
1229/* Get number of physical egress port */
1230static inline int mvpp2_egress_port(struct mvpp2_port *port)
1231{
1232 return MVPP2_MAX_TCONT + port->id;
1233}
1234
1235/* Get number of physical TXQ */
1236static inline int mvpp2_txq_phys(int port, int txq)
1237{
1238 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1239}
1240
1241/* Parser configuration routines */
1242
1243/* Update parser tcam and sram hw entries */
1244static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1245{
1246 int i;
1247
1248 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1249 return -EINVAL;
1250
1251 /* Clear entry invalidation bit */
1252 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1253
1254 /* Write tcam index - indirect access */
1255 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1256 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1257 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1258
1259 /* Write sram index - indirect access */
1260 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1261 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1262 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1263
1264 return 0;
1265}
1266
1267/* Read tcam entry from hw */
1268static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1269{
1270 int i;
1271
1272 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1273 return -EINVAL;
1274
1275 /* Write tcam index - indirect access */
1276 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1277
1278 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1279 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1280 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1281 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1282
1283 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1284 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1285
1286 /* Write sram index - indirect access */
1287 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1288 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1289 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1290
1291 return 0;
1292}
1293
1294/* Invalidate tcam hw entry */
1295static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1296{
1297 /* Write index - indirect access */
1298 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1299 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1300 MVPP2_PRS_TCAM_INV_MASK);
1301}
1302
1303/* Enable shadow table entry and set its lookup ID */
1304static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1305{
1306 priv->prs_shadow[index].valid = true;
1307 priv->prs_shadow[index].lu = lu;
1308}
1309
1310/* Update ri fields in shadow table entry */
1311static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1312 unsigned int ri, unsigned int ri_mask)
1313{
1314 priv->prs_shadow[index].ri_mask = ri_mask;
1315 priv->prs_shadow[index].ri = ri;
1316}
1317
1318/* Update lookup field in tcam sw entry */
1319static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1320{
1321 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1322
1323 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1324 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1325}
1326
1327/* Update mask for single port in tcam sw entry */
1328static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1329 unsigned int port, bool add)
1330{
1331 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1332
1333 if (add)
1334 pe->tcam.byte[enable_off] &= ~(1 << port);
1335 else
1336 pe->tcam.byte[enable_off] |= 1 << port;
1337}
1338
1339/* Update port map in tcam sw entry */
1340static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1341 unsigned int ports)
1342{
1343 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1344 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1345
1346 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1347 pe->tcam.byte[enable_off] &= ~port_mask;
1348 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1349}
1350
1351/* Obtain port map from tcam sw entry */
1352static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1353{
1354 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1355
1356 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1357}
1358
1359/* Set byte of data and its enable bits in tcam sw entry */
1360static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1361 unsigned int offs, unsigned char byte,
1362 unsigned char enable)
1363{
1364 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1365 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1366}
1367
1368/* Get byte of data and its enable bits from tcam sw entry */
1369static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1370 unsigned int offs, unsigned char *byte,
1371 unsigned char *enable)
1372{
1373 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1374 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1375}
1376
1377/* Compare tcam data bytes with a pattern */
1378static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1379 u16 data)
1380{
1381 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1382 u16 tcam_data;
1383
1384 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1385 if (tcam_data != data)
1386 return false;
1387 return true;
1388}
1389
1390/* Update ai bits in tcam sw entry */
1391static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1392 unsigned int bits, unsigned int enable)
1393{
1394 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1395
1396 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1397
1398 if (!(enable & BIT(i)))
1399 continue;
1400
1401 if (bits & BIT(i))
1402 pe->tcam.byte[ai_idx] |= 1 << i;
1403 else
1404 pe->tcam.byte[ai_idx] &= ~(1 << i);
1405 }
1406
1407 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1408}
1409
1410/* Get ai bits from tcam sw entry */
1411static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1412{
1413 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1414}
1415
1416/* Set ethertype in tcam sw entry */
1417static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1418 unsigned short ethertype)
1419{
1420 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1421 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1422}
1423
1424/* Set bits in sram sw entry */
1425static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1426 int val)
1427{
1428 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1429}
1430
1431/* Clear bits in sram sw entry */
1432static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1433 int val)
1434{
1435 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1436}
1437
1438/* Update ri bits in sram sw entry */
1439static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1440 unsigned int bits, unsigned int mask)
1441{
1442 unsigned int i;
1443
1444 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1445 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1446
1447 if (!(mask & BIT(i)))
1448 continue;
1449
1450 if (bits & BIT(i))
1451 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1452 else
1453 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1454
1455 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1456 }
1457}
1458
1459/* Obtain ri bits from sram sw entry */
1460static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1461{
1462 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1463}
1464
1465/* Update ai bits in sram sw entry */
1466static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1467 unsigned int bits, unsigned int mask)
1468{
1469 unsigned int i;
1470 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1471
1472 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1473
1474 if (!(mask & BIT(i)))
1475 continue;
1476
1477 if (bits & BIT(i))
1478 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1479 else
1480 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1481
1482 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1483 }
1484}
1485
1486/* Read ai bits from sram sw entry */
1487static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1488{
1489 u8 bits;
1490 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1491 int ai_en_off = ai_off + 1;
1492 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1493
1494 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1495 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1496
1497 return bits;
1498}
1499
1500/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1501 * lookup interation
1502 */
1503static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1504 unsigned int lu)
1505{
1506 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1507
1508 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1509 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1510 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1511}
1512
1513/* In the sram sw entry set sign and value of the next lookup offset
1514 * and the offset value generated to the classifier
1515 */
1516static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1517 unsigned int op)
1518{
1519 /* Set sign */
1520 if (shift < 0) {
1521 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1522 shift = 0 - shift;
1523 } else {
1524 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1525 }
1526
1527 /* Set value */
1528 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1529 (unsigned char)shift;
1530
1531 /* Reset and set operation */
1532 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1533 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1534 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1535
1536 /* Set base offset as current */
1537 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1538}
1539
1540/* In the sram sw entry set sign and value of the user defined offset
1541 * generated to the classifier
1542 */
1543static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1544 unsigned int type, int offset,
1545 unsigned int op)
1546{
1547 /* Set sign */
1548 if (offset < 0) {
1549 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1550 offset = 0 - offset;
1551 } else {
1552 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1553 }
1554
1555 /* Set value */
1556 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1557 MVPP2_PRS_SRAM_UDF_MASK);
1558 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1559 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1560 MVPP2_PRS_SRAM_UDF_BITS)] &=
1561 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1562 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1563 MVPP2_PRS_SRAM_UDF_BITS)] |=
1564 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1565
1566 /* Set offset type */
1567 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1568 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1569 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1570
1571 /* Set offset operation */
1572 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1573 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1574 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1575
1576 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1577 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1578 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1579 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1580
1581 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1582 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1583 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1584
1585 /* Set base offset as current */
1586 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1587}
1588
1589/* Find parser flow entry */
1590static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1591{
1592 struct mvpp2_prs_entry *pe;
1593 int tid;
1594
1595 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1596 if (!pe)
1597 return NULL;
1598 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1599
1600 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1601 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1602 u8 bits;
1603
1604 if (!priv->prs_shadow[tid].valid ||
1605 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1606 continue;
1607
1608 pe->index = tid;
1609 mvpp2_prs_hw_read(priv, pe);
1610 bits = mvpp2_prs_sram_ai_get(pe);
1611
1612 /* Sram store classification lookup ID in AI bits [5:0] */
1613 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1614 return pe;
1615 }
1616 kfree(pe);
1617
1618 return NULL;
1619}
1620
1621/* Return first free tcam index, seeking from start to end */
1622static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1623 unsigned char end)
1624{
1625 int tid;
1626
1627 if (start > end)
1628 swap(start, end);
1629
1630 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1631 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1632
1633 for (tid = start; tid <= end; tid++) {
1634 if (!priv->prs_shadow[tid].valid)
1635 return tid;
1636 }
1637
1638 return -EINVAL;
1639}
1640
1641/* Enable/disable dropping all mac da's */
1642static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1643{
1644 struct mvpp2_prs_entry pe;
1645
1646 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1647 /* Entry exist - update port only */
1648 pe.index = MVPP2_PE_DROP_ALL;
1649 mvpp2_prs_hw_read(priv, &pe);
1650 } else {
1651 /* Entry doesn't exist - create new */
1652 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1653 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1654 pe.index = MVPP2_PE_DROP_ALL;
1655
1656 /* Non-promiscuous mode for all ports - DROP unknown packets */
1657 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1658 MVPP2_PRS_RI_DROP_MASK);
1659
1660 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1661 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1662
1663 /* Update shadow table */
1664 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1665
1666 /* Mask all ports */
1667 mvpp2_prs_tcam_port_map_set(&pe, 0);
1668 }
1669
1670 /* Update port mask */
1671 mvpp2_prs_tcam_port_set(&pe, port, add);
1672
1673 mvpp2_prs_hw_write(priv, &pe);
1674}
1675
1676/* Set port to promiscuous mode */
1677static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1678{
1679 struct mvpp2_prs_entry pe;
1680
Joe Perchesdbedd442015-03-06 20:49:12 -08001681 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001682
1683 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1684 /* Entry exist - update port only */
1685 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1686 mvpp2_prs_hw_read(priv, &pe);
1687 } else {
1688 /* Entry doesn't exist - create new */
1689 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1690 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1691 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1692
1693 /* Continue - set next lookup */
1694 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1695
1696 /* Set result info bits */
1697 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1698 MVPP2_PRS_RI_L2_CAST_MASK);
1699
1700 /* Shift to ethertype */
1701 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1702 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1703
1704 /* Mask all ports */
1705 mvpp2_prs_tcam_port_map_set(&pe, 0);
1706
1707 /* Update shadow table */
1708 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1709 }
1710
1711 /* Update port mask */
1712 mvpp2_prs_tcam_port_set(&pe, port, add);
1713
1714 mvpp2_prs_hw_write(priv, &pe);
1715}
1716
1717/* Accept multicast */
1718static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1719 bool add)
1720{
1721 struct mvpp2_prs_entry pe;
1722 unsigned char da_mc;
1723
1724 /* Ethernet multicast address first byte is
1725 * 0x01 for IPv4 and 0x33 for IPv6
1726 */
1727 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1728
1729 if (priv->prs_shadow[index].valid) {
1730 /* Entry exist - update port only */
1731 pe.index = index;
1732 mvpp2_prs_hw_read(priv, &pe);
1733 } else {
1734 /* Entry doesn't exist - create new */
1735 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1736 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1737 pe.index = index;
1738
1739 /* Continue - set next lookup */
1740 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1741
1742 /* Set result info bits */
1743 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1744 MVPP2_PRS_RI_L2_CAST_MASK);
1745
1746 /* Update tcam entry data first byte */
1747 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1748
1749 /* Shift to ethertype */
1750 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1751 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1752
1753 /* Mask all ports */
1754 mvpp2_prs_tcam_port_map_set(&pe, 0);
1755
1756 /* Update shadow table */
1757 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1758 }
1759
1760 /* Update port mask */
1761 mvpp2_prs_tcam_port_set(&pe, port, add);
1762
1763 mvpp2_prs_hw_write(priv, &pe);
1764}
1765
1766/* Set entry for dsa packets */
1767static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1768 bool tagged, bool extend)
1769{
1770 struct mvpp2_prs_entry pe;
1771 int tid, shift;
1772
1773 if (extend) {
1774 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1775 shift = 8;
1776 } else {
1777 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1778 shift = 4;
1779 }
1780
1781 if (priv->prs_shadow[tid].valid) {
1782 /* Entry exist - update port only */
1783 pe.index = tid;
1784 mvpp2_prs_hw_read(priv, &pe);
1785 } else {
1786 /* Entry doesn't exist - create new */
1787 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1788 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1789 pe.index = tid;
1790
1791 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1792 mvpp2_prs_sram_shift_set(&pe, shift,
1793 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1794
1795 /* Update shadow table */
1796 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1797
1798 if (tagged) {
1799 /* Set tagged bit in DSA tag */
1800 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1801 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1802 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1803 /* Clear all ai bits for next iteration */
1804 mvpp2_prs_sram_ai_update(&pe, 0,
1805 MVPP2_PRS_SRAM_AI_MASK);
1806 /* If packet is tagged continue check vlans */
1807 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1808 } else {
1809 /* Set result info bits to 'no vlans' */
1810 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1811 MVPP2_PRS_RI_VLAN_MASK);
1812 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1813 }
1814
1815 /* Mask all ports */
1816 mvpp2_prs_tcam_port_map_set(&pe, 0);
1817 }
1818
1819 /* Update port mask */
1820 mvpp2_prs_tcam_port_set(&pe, port, add);
1821
1822 mvpp2_prs_hw_write(priv, &pe);
1823}
1824
1825/* Set entry for dsa ethertype */
1826static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1827 bool add, bool tagged, bool extend)
1828{
1829 struct mvpp2_prs_entry pe;
1830 int tid, shift, port_mask;
1831
1832 if (extend) {
1833 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1834 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1835 port_mask = 0;
1836 shift = 8;
1837 } else {
1838 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1839 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1840 port_mask = MVPP2_PRS_PORT_MASK;
1841 shift = 4;
1842 }
1843
1844 if (priv->prs_shadow[tid].valid) {
1845 /* Entry exist - update port only */
1846 pe.index = tid;
1847 mvpp2_prs_hw_read(priv, &pe);
1848 } else {
1849 /* Entry doesn't exist - create new */
1850 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1851 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1852 pe.index = tid;
1853
1854 /* Set ethertype */
1855 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1856 mvpp2_prs_match_etype(&pe, 2, 0);
1857
1858 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1859 MVPP2_PRS_RI_DSA_MASK);
1860 /* Shift ethertype + 2 byte reserved + tag*/
1861 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1862 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1863
1864 /* Update shadow table */
1865 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1866
1867 if (tagged) {
1868 /* Set tagged bit in DSA tag */
1869 mvpp2_prs_tcam_data_byte_set(&pe,
1870 MVPP2_ETH_TYPE_LEN + 2 + 3,
1871 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1872 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1873 /* Clear all ai bits for next iteration */
1874 mvpp2_prs_sram_ai_update(&pe, 0,
1875 MVPP2_PRS_SRAM_AI_MASK);
1876 /* If packet is tagged continue check vlans */
1877 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1878 } else {
1879 /* Set result info bits to 'no vlans' */
1880 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1881 MVPP2_PRS_RI_VLAN_MASK);
1882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1883 }
1884 /* Mask/unmask all ports, depending on dsa type */
1885 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1886 }
1887
1888 /* Update port mask */
1889 mvpp2_prs_tcam_port_set(&pe, port, add);
1890
1891 mvpp2_prs_hw_write(priv, &pe);
1892}
1893
1894/* Search for existing single/triple vlan entry */
1895static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1896 unsigned short tpid, int ai)
1897{
1898 struct mvpp2_prs_entry *pe;
1899 int tid;
1900
1901 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1902 if (!pe)
1903 return NULL;
1904 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1905
1906 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1907 for (tid = MVPP2_PE_FIRST_FREE_TID;
1908 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1909 unsigned int ri_bits, ai_bits;
1910 bool match;
1911
1912 if (!priv->prs_shadow[tid].valid ||
1913 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1914 continue;
1915
1916 pe->index = tid;
1917
1918 mvpp2_prs_hw_read(priv, pe);
1919 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1920 if (!match)
1921 continue;
1922
1923 /* Get vlan type */
1924 ri_bits = mvpp2_prs_sram_ri_get(pe);
1925 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1926
1927 /* Get current ai value from tcam */
1928 ai_bits = mvpp2_prs_tcam_ai_get(pe);
1929 /* Clear double vlan bit */
1930 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
1931
1932 if (ai != ai_bits)
1933 continue;
1934
1935 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
1936 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
1937 return pe;
1938 }
1939 kfree(pe);
1940
1941 return NULL;
1942}
1943
1944/* Add/update single/triple vlan entry */
1945static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1946 unsigned int port_map)
1947{
1948 struct mvpp2_prs_entry *pe;
1949 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05301950 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001951
1952 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1953
1954 if (!pe) {
1955 /* Create new tcam entry */
1956 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
1957 MVPP2_PE_FIRST_FREE_TID);
1958 if (tid < 0)
1959 return tid;
1960
1961 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1962 if (!pe)
1963 return -ENOMEM;
1964
1965 /* Get last double vlan tid */
1966 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
1967 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
1968 unsigned int ri_bits;
1969
1970 if (!priv->prs_shadow[tid_aux].valid ||
1971 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
1972 continue;
1973
1974 pe->index = tid_aux;
1975 mvpp2_prs_hw_read(priv, pe);
1976 ri_bits = mvpp2_prs_sram_ri_get(pe);
1977 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
1978 MVPP2_PRS_RI_VLAN_DOUBLE)
1979 break;
1980 }
1981
Sudip Mukherjee43737472014-11-01 16:59:34 +05301982 if (tid <= tid_aux) {
1983 ret = -EINVAL;
1984 goto error;
1985 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03001986
1987 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1988 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1989 pe->index = tid;
1990
1991 mvpp2_prs_match_etype(pe, 0, tpid);
1992
1993 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
1994 /* Shift 4 bytes - skip 1 vlan tag */
1995 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
1996 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1997 /* Clear all ai bits for next iteration */
1998 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1999
2000 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2001 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2002 MVPP2_PRS_RI_VLAN_MASK);
2003 } else {
2004 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2005 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2006 MVPP2_PRS_RI_VLAN_MASK);
2007 }
2008 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2009
2010 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2011 }
2012 /* Update ports' mask */
2013 mvpp2_prs_tcam_port_map_set(pe, port_map);
2014
2015 mvpp2_prs_hw_write(priv, pe);
2016
Sudip Mukherjee43737472014-11-01 16:59:34 +05302017error:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002018 kfree(pe);
2019
Sudip Mukherjee43737472014-11-01 16:59:34 +05302020 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002021}
2022
2023/* Get first free double vlan ai number */
2024static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2025{
2026 int i;
2027
2028 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2029 if (!priv->prs_double_vlans[i])
2030 return i;
2031 }
2032
2033 return -EINVAL;
2034}
2035
2036/* Search for existing double vlan entry */
2037static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2038 unsigned short tpid1,
2039 unsigned short tpid2)
2040{
2041 struct mvpp2_prs_entry *pe;
2042 int tid;
2043
2044 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2045 if (!pe)
2046 return NULL;
2047 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2048
2049 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2050 for (tid = MVPP2_PE_FIRST_FREE_TID;
2051 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2052 unsigned int ri_mask;
2053 bool match;
2054
2055 if (!priv->prs_shadow[tid].valid ||
2056 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2057 continue;
2058
2059 pe->index = tid;
2060 mvpp2_prs_hw_read(priv, pe);
2061
2062 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2063 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2064
2065 if (!match)
2066 continue;
2067
2068 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2069 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2070 return pe;
2071 }
2072 kfree(pe);
2073
2074 return NULL;
2075}
2076
2077/* Add or update double vlan entry */
2078static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2079 unsigned short tpid2,
2080 unsigned int port_map)
2081{
2082 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302083 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002084
2085 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2086
2087 if (!pe) {
2088 /* Create new tcam entry */
2089 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2090 MVPP2_PE_LAST_FREE_TID);
2091 if (tid < 0)
2092 return tid;
2093
2094 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2095 if (!pe)
2096 return -ENOMEM;
2097
2098 /* Set ai value for new double vlan entry */
2099 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302100 if (ai < 0) {
2101 ret = ai;
2102 goto error;
2103 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002104
2105 /* Get first single/triple vlan tid */
2106 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2107 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2108 unsigned int ri_bits;
2109
2110 if (!priv->prs_shadow[tid_aux].valid ||
2111 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2112 continue;
2113
2114 pe->index = tid_aux;
2115 mvpp2_prs_hw_read(priv, pe);
2116 ri_bits = mvpp2_prs_sram_ri_get(pe);
2117 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2118 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2119 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2120 break;
2121 }
2122
Sudip Mukherjee43737472014-11-01 16:59:34 +05302123 if (tid >= tid_aux) {
2124 ret = -ERANGE;
2125 goto error;
2126 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002127
2128 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
2129 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2130 pe->index = tid;
2131
2132 priv->prs_double_vlans[ai] = true;
2133
2134 mvpp2_prs_match_etype(pe, 0, tpid1);
2135 mvpp2_prs_match_etype(pe, 4, tpid2);
2136
2137 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2138 /* Shift 8 bytes - skip 2 vlan tags */
2139 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2140 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2141 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2142 MVPP2_PRS_RI_VLAN_MASK);
2143 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2144 MVPP2_PRS_SRAM_AI_MASK);
2145
2146 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2147 }
2148
2149 /* Update ports' mask */
2150 mvpp2_prs_tcam_port_map_set(pe, port_map);
2151 mvpp2_prs_hw_write(priv, pe);
2152
Sudip Mukherjee43737472014-11-01 16:59:34 +05302153error:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002154 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302155 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002156}
2157
2158/* IPv4 header parsing for fragmentation and L4 offset */
2159static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2160 unsigned int ri, unsigned int ri_mask)
2161{
2162 struct mvpp2_prs_entry pe;
2163 int tid;
2164
2165 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2166 (proto != IPPROTO_IGMP))
2167 return -EINVAL;
2168
2169 /* Fragmented packet */
2170 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2171 MVPP2_PE_LAST_FREE_TID);
2172 if (tid < 0)
2173 return tid;
2174
2175 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2176 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2177 pe.index = tid;
2178
2179 /* Set next lu to IPv4 */
2180 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2181 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2182 /* Set L4 offset */
2183 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2184 sizeof(struct iphdr) - 4,
2185 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2186 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2187 MVPP2_PRS_IPV4_DIP_AI_BIT);
2188 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2189 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2190
2191 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2192 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2193 /* Unmask all ports */
2194 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2195
2196 /* Update shadow table and hw entry */
2197 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2198 mvpp2_prs_hw_write(priv, &pe);
2199
2200 /* Not fragmented packet */
2201 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2202 MVPP2_PE_LAST_FREE_TID);
2203 if (tid < 0)
2204 return tid;
2205
2206 pe.index = tid;
2207 /* Clear ri before updating */
2208 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2209 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2210 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2211
2212 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2213 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2214
2215 /* Update shadow table and hw entry */
2216 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2217 mvpp2_prs_hw_write(priv, &pe);
2218
2219 return 0;
2220}
2221
2222/* IPv4 L3 multicast or broadcast */
2223static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2224{
2225 struct mvpp2_prs_entry pe;
2226 int mask, tid;
2227
2228 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2229 MVPP2_PE_LAST_FREE_TID);
2230 if (tid < 0)
2231 return tid;
2232
2233 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2234 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2235 pe.index = tid;
2236
2237 switch (l3_cast) {
2238 case MVPP2_PRS_L3_MULTI_CAST:
2239 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2240 MVPP2_PRS_IPV4_MC_MASK);
2241 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2242 MVPP2_PRS_RI_L3_ADDR_MASK);
2243 break;
2244 case MVPP2_PRS_L3_BROAD_CAST:
2245 mask = MVPP2_PRS_IPV4_BC_MASK;
2246 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2247 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2248 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2249 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2250 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2251 MVPP2_PRS_RI_L3_ADDR_MASK);
2252 break;
2253 default:
2254 return -EINVAL;
2255 }
2256
2257 /* Finished: go to flowid generation */
2258 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2259 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2260
2261 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2262 MVPP2_PRS_IPV4_DIP_AI_BIT);
2263 /* Unmask all ports */
2264 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2265
2266 /* Update shadow table and hw entry */
2267 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2268 mvpp2_prs_hw_write(priv, &pe);
2269
2270 return 0;
2271}
2272
2273/* Set entries for protocols over IPv6 */
2274static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2275 unsigned int ri, unsigned int ri_mask)
2276{
2277 struct mvpp2_prs_entry pe;
2278 int tid;
2279
2280 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2281 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2282 return -EINVAL;
2283
2284 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2285 MVPP2_PE_LAST_FREE_TID);
2286 if (tid < 0)
2287 return tid;
2288
2289 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2290 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2291 pe.index = tid;
2292
2293 /* Finished: go to flowid generation */
2294 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2295 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2296 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2297 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2298 sizeof(struct ipv6hdr) - 6,
2299 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2300
2301 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2302 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2303 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2304 /* Unmask all ports */
2305 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2306
2307 /* Write HW */
2308 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2309 mvpp2_prs_hw_write(priv, &pe);
2310
2311 return 0;
2312}
2313
2314/* IPv6 L3 multicast entry */
2315static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2316{
2317 struct mvpp2_prs_entry pe;
2318 int tid;
2319
2320 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2321 return -EINVAL;
2322
2323 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2324 MVPP2_PE_LAST_FREE_TID);
2325 if (tid < 0)
2326 return tid;
2327
2328 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2329 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2330 pe.index = tid;
2331
2332 /* Finished: go to flowid generation */
2333 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2334 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2335 MVPP2_PRS_RI_L3_ADDR_MASK);
2336 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2337 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2338 /* Shift back to IPv6 NH */
2339 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2340
2341 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2342 MVPP2_PRS_IPV6_MC_MASK);
2343 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2344 /* Unmask all ports */
2345 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2346
2347 /* Update shadow table and hw entry */
2348 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2349 mvpp2_prs_hw_write(priv, &pe);
2350
2351 return 0;
2352}
2353
2354/* Parser per-port initialization */
2355static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2356 int lu_max, int offset)
2357{
2358 u32 val;
2359
2360 /* Set lookup ID */
2361 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2362 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2363 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2364 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2365
2366 /* Set maximum number of loops for packet received from port */
2367 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2368 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2369 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2370 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2371
2372 /* Set initial offset for packet header extraction for the first
2373 * searching loop
2374 */
2375 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2376 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2377 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2378 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2379}
2380
2381/* Default flow entries initialization for all ports */
2382static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2383{
2384 struct mvpp2_prs_entry pe;
2385 int port;
2386
2387 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
2388 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2389 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2390 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2391
2392 /* Mask all ports */
2393 mvpp2_prs_tcam_port_map_set(&pe, 0);
2394
2395 /* Set flow ID*/
2396 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2397 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2398
2399 /* Update shadow table and hw entry */
2400 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2401 mvpp2_prs_hw_write(priv, &pe);
2402 }
2403}
2404
2405/* Set default entry for Marvell Header field */
2406static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2407{
2408 struct mvpp2_prs_entry pe;
2409
2410 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2411
2412 pe.index = MVPP2_PE_MH_DEFAULT;
2413 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2414 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2415 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2416 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2417
2418 /* Unmask all ports */
2419 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2420
2421 /* Update shadow table and hw entry */
2422 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2423 mvpp2_prs_hw_write(priv, &pe);
2424}
2425
2426/* Set default entires (place holder) for promiscuous, non-promiscuous and
2427 * multicast MAC addresses
2428 */
2429static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2430{
2431 struct mvpp2_prs_entry pe;
2432
2433 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2434
2435 /* Non-promiscuous mode for all ports - DROP unknown packets */
2436 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2437 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2438
2439 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2440 MVPP2_PRS_RI_DROP_MASK);
2441 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2442 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2443
2444 /* Unmask all ports */
2445 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2446
2447 /* Update shadow table and hw entry */
2448 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2449 mvpp2_prs_hw_write(priv, &pe);
2450
2451 /* place holders only - no ports */
2452 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2453 mvpp2_prs_mac_promisc_set(priv, 0, false);
2454 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2455 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2456}
2457
2458/* Set default entries for various types of dsa packets */
2459static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2460{
2461 struct mvpp2_prs_entry pe;
2462
2463 /* None tagged EDSA entry - place holder */
2464 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2465 MVPP2_PRS_EDSA);
2466
2467 /* Tagged EDSA entry - place holder */
2468 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2469
2470 /* None tagged DSA entry - place holder */
2471 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2472 MVPP2_PRS_DSA);
2473
2474 /* Tagged DSA entry - place holder */
2475 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2476
2477 /* None tagged EDSA ethertype entry - place holder*/
2478 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2479 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2480
2481 /* Tagged EDSA ethertype entry - place holder*/
2482 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2483 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2484
2485 /* None tagged DSA ethertype entry */
2486 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2487 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2488
2489 /* Tagged DSA ethertype entry */
2490 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2491 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2492
2493 /* Set default entry, in case DSA or EDSA tag not found */
2494 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2495 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2496 pe.index = MVPP2_PE_DSA_DEFAULT;
2497 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2498
2499 /* Shift 0 bytes */
2500 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2501 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2502
2503 /* Clear all sram ai bits for next iteration */
2504 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2505
2506 /* Unmask all ports */
2507 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2508
2509 mvpp2_prs_hw_write(priv, &pe);
2510}
2511
2512/* Match basic ethertypes */
2513static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2514{
2515 struct mvpp2_prs_entry pe;
2516 int tid;
2517
2518 /* Ethertype: PPPoE */
2519 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2520 MVPP2_PE_LAST_FREE_TID);
2521 if (tid < 0)
2522 return tid;
2523
2524 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2525 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2526 pe.index = tid;
2527
2528 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2529
2530 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2531 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2532 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2533 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2534 MVPP2_PRS_RI_PPPOE_MASK);
2535
2536 /* Update shadow table and hw entry */
2537 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2538 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2539 priv->prs_shadow[pe.index].finish = false;
2540 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2541 MVPP2_PRS_RI_PPPOE_MASK);
2542 mvpp2_prs_hw_write(priv, &pe);
2543
2544 /* Ethertype: ARP */
2545 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2546 MVPP2_PE_LAST_FREE_TID);
2547 if (tid < 0)
2548 return tid;
2549
2550 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2551 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2552 pe.index = tid;
2553
2554 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2555
2556 /* Generate flow in the next iteration*/
2557 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2558 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2559 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2560 MVPP2_PRS_RI_L3_PROTO_MASK);
2561 /* Set L3 offset */
2562 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2563 MVPP2_ETH_TYPE_LEN,
2564 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2565
2566 /* Update shadow table and hw entry */
2567 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2568 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2569 priv->prs_shadow[pe.index].finish = true;
2570 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2571 MVPP2_PRS_RI_L3_PROTO_MASK);
2572 mvpp2_prs_hw_write(priv, &pe);
2573
2574 /* Ethertype: LBTD */
2575 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2576 MVPP2_PE_LAST_FREE_TID);
2577 if (tid < 0)
2578 return tid;
2579
2580 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2581 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2582 pe.index = tid;
2583
2584 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2585
2586 /* Generate flow in the next iteration*/
2587 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2588 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2589 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2590 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2591 MVPP2_PRS_RI_CPU_CODE_MASK |
2592 MVPP2_PRS_RI_UDF3_MASK);
2593 /* Set L3 offset */
2594 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2595 MVPP2_ETH_TYPE_LEN,
2596 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2597
2598 /* Update shadow table and hw entry */
2599 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2600 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2601 priv->prs_shadow[pe.index].finish = true;
2602 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2603 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2604 MVPP2_PRS_RI_CPU_CODE_MASK |
2605 MVPP2_PRS_RI_UDF3_MASK);
2606 mvpp2_prs_hw_write(priv, &pe);
2607
2608 /* Ethertype: IPv4 without options */
2609 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2610 MVPP2_PE_LAST_FREE_TID);
2611 if (tid < 0)
2612 return tid;
2613
2614 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2615 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2616 pe.index = tid;
2617
2618 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2619 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2620 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2621 MVPP2_PRS_IPV4_HEAD_MASK |
2622 MVPP2_PRS_IPV4_IHL_MASK);
2623
2624 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2625 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2626 MVPP2_PRS_RI_L3_PROTO_MASK);
2627 /* Skip eth_type + 4 bytes of IP header */
2628 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2629 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2630 /* Set L3 offset */
2631 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2632 MVPP2_ETH_TYPE_LEN,
2633 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2634
2635 /* Update shadow table and hw entry */
2636 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2637 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2638 priv->prs_shadow[pe.index].finish = false;
2639 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2640 MVPP2_PRS_RI_L3_PROTO_MASK);
2641 mvpp2_prs_hw_write(priv, &pe);
2642
2643 /* Ethertype: IPv4 with options */
2644 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2645 MVPP2_PE_LAST_FREE_TID);
2646 if (tid < 0)
2647 return tid;
2648
2649 pe.index = tid;
2650
2651 /* Clear tcam data before updating */
2652 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2653 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2654
2655 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2656 MVPP2_PRS_IPV4_HEAD,
2657 MVPP2_PRS_IPV4_HEAD_MASK);
2658
2659 /* Clear ri before updating */
2660 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2661 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2662 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2663 MVPP2_PRS_RI_L3_PROTO_MASK);
2664
2665 /* Update shadow table and hw entry */
2666 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2667 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2668 priv->prs_shadow[pe.index].finish = false;
2669 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2670 MVPP2_PRS_RI_L3_PROTO_MASK);
2671 mvpp2_prs_hw_write(priv, &pe);
2672
2673 /* Ethertype: IPv6 without options */
2674 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2675 MVPP2_PE_LAST_FREE_TID);
2676 if (tid < 0)
2677 return tid;
2678
2679 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2680 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2681 pe.index = tid;
2682
2683 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2684
2685 /* Skip DIP of IPV6 header */
2686 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2687 MVPP2_MAX_L3_ADDR_SIZE,
2688 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2689 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2690 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2691 MVPP2_PRS_RI_L3_PROTO_MASK);
2692 /* Set L3 offset */
2693 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2694 MVPP2_ETH_TYPE_LEN,
2695 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2696
2697 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2698 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2699 priv->prs_shadow[pe.index].finish = false;
2700 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2701 MVPP2_PRS_RI_L3_PROTO_MASK);
2702 mvpp2_prs_hw_write(priv, &pe);
2703
2704 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2705 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2706 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2707 pe.index = MVPP2_PE_ETH_TYPE_UN;
2708
2709 /* Unmask all ports */
2710 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2711
2712 /* Generate flow in the next iteration*/
2713 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2714 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2715 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2716 MVPP2_PRS_RI_L3_PROTO_MASK);
2717 /* Set L3 offset even it's unknown L3 */
2718 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2719 MVPP2_ETH_TYPE_LEN,
2720 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2721
2722 /* Update shadow table and hw entry */
2723 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2724 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2725 priv->prs_shadow[pe.index].finish = true;
2726 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2727 MVPP2_PRS_RI_L3_PROTO_MASK);
2728 mvpp2_prs_hw_write(priv, &pe);
2729
2730 return 0;
2731}
2732
2733/* Configure vlan entries and detect up to 2 successive VLAN tags.
2734 * Possible options:
2735 * 0x8100, 0x88A8
2736 * 0x8100, 0x8100
2737 * 0x8100
2738 * 0x88A8
2739 */
2740static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2741{
2742 struct mvpp2_prs_entry pe;
2743 int err;
2744
2745 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2746 MVPP2_PRS_DBL_VLANS_MAX,
2747 GFP_KERNEL);
2748 if (!priv->prs_double_vlans)
2749 return -ENOMEM;
2750
2751 /* Double VLAN: 0x8100, 0x88A8 */
2752 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2753 MVPP2_PRS_PORT_MASK);
2754 if (err)
2755 return err;
2756
2757 /* Double VLAN: 0x8100, 0x8100 */
2758 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2759 MVPP2_PRS_PORT_MASK);
2760 if (err)
2761 return err;
2762
2763 /* Single VLAN: 0x88a8 */
2764 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2765 MVPP2_PRS_PORT_MASK);
2766 if (err)
2767 return err;
2768
2769 /* Single VLAN: 0x8100 */
2770 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2771 MVPP2_PRS_PORT_MASK);
2772 if (err)
2773 return err;
2774
2775 /* Set default double vlan entry */
2776 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2777 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2778 pe.index = MVPP2_PE_VLAN_DBL;
2779
2780 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2781 /* Clear ai for next iterations */
2782 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2783 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2784 MVPP2_PRS_RI_VLAN_MASK);
2785
2786 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2787 MVPP2_PRS_DBL_VLAN_AI_BIT);
2788 /* Unmask all ports */
2789 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2790
2791 /* Update shadow table and hw entry */
2792 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2793 mvpp2_prs_hw_write(priv, &pe);
2794
2795 /* Set default vlan none entry */
2796 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2797 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2798 pe.index = MVPP2_PE_VLAN_NONE;
2799
2800 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2801 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2802 MVPP2_PRS_RI_VLAN_MASK);
2803
2804 /* Unmask all ports */
2805 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2806
2807 /* Update shadow table and hw entry */
2808 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2809 mvpp2_prs_hw_write(priv, &pe);
2810
2811 return 0;
2812}
2813
2814/* Set entries for PPPoE ethertype */
2815static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2816{
2817 struct mvpp2_prs_entry pe;
2818 int tid;
2819
2820 /* IPv4 over PPPoE with options */
2821 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2822 MVPP2_PE_LAST_FREE_TID);
2823 if (tid < 0)
2824 return tid;
2825
2826 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2827 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2828 pe.index = tid;
2829
2830 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2831
2832 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2833 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2834 MVPP2_PRS_RI_L3_PROTO_MASK);
2835 /* Skip eth_type + 4 bytes of IP header */
2836 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2837 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2838 /* Set L3 offset */
2839 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2840 MVPP2_ETH_TYPE_LEN,
2841 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2842
2843 /* Update shadow table and hw entry */
2844 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2845 mvpp2_prs_hw_write(priv, &pe);
2846
2847 /* IPv4 over PPPoE without options */
2848 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2849 MVPP2_PE_LAST_FREE_TID);
2850 if (tid < 0)
2851 return tid;
2852
2853 pe.index = tid;
2854
2855 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2856 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2857 MVPP2_PRS_IPV4_HEAD_MASK |
2858 MVPP2_PRS_IPV4_IHL_MASK);
2859
2860 /* Clear ri before updating */
2861 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2862 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2863 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2864 MVPP2_PRS_RI_L3_PROTO_MASK);
2865
2866 /* Update shadow table and hw entry */
2867 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2868 mvpp2_prs_hw_write(priv, &pe);
2869
2870 /* IPv6 over PPPoE */
2871 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2872 MVPP2_PE_LAST_FREE_TID);
2873 if (tid < 0)
2874 return tid;
2875
2876 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2877 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2878 pe.index = tid;
2879
2880 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2881
2882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2884 MVPP2_PRS_RI_L3_PROTO_MASK);
2885 /* Skip eth_type + 4 bytes of IPv6 header */
2886 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2887 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2888 /* Set L3 offset */
2889 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2890 MVPP2_ETH_TYPE_LEN,
2891 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2892
2893 /* Update shadow table and hw entry */
2894 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2895 mvpp2_prs_hw_write(priv, &pe);
2896
2897 /* Non-IP over PPPoE */
2898 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2899 MVPP2_PE_LAST_FREE_TID);
2900 if (tid < 0)
2901 return tid;
2902
2903 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2904 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2905 pe.index = tid;
2906
2907 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2908 MVPP2_PRS_RI_L3_PROTO_MASK);
2909
2910 /* Finished: go to flowid generation */
2911 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2912 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2913 /* Set L3 offset even if it's unknown L3 */
2914 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2915 MVPP2_ETH_TYPE_LEN,
2916 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2917
2918 /* Update shadow table and hw entry */
2919 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2920 mvpp2_prs_hw_write(priv, &pe);
2921
2922 return 0;
2923}
2924
2925/* Initialize entries for IPv4 */
2926static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2927{
2928 struct mvpp2_prs_entry pe;
2929 int err;
2930
2931 /* Set entries for TCP, UDP and IGMP over IPv4 */
2932 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
2933 MVPP2_PRS_RI_L4_PROTO_MASK);
2934 if (err)
2935 return err;
2936
2937 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
2938 MVPP2_PRS_RI_L4_PROTO_MASK);
2939 if (err)
2940 return err;
2941
2942 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
2943 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2944 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2945 MVPP2_PRS_RI_CPU_CODE_MASK |
2946 MVPP2_PRS_RI_UDF3_MASK);
2947 if (err)
2948 return err;
2949
2950 /* IPv4 Broadcast */
2951 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
2952 if (err)
2953 return err;
2954
2955 /* IPv4 Multicast */
2956 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
2957 if (err)
2958 return err;
2959
2960 /* Default IPv4 entry for unknown protocols */
2961 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2962 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2963 pe.index = MVPP2_PE_IP4_PROTO_UN;
2964
2965 /* Set next lu to IPv4 */
2966 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2967 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2968 /* Set L4 offset */
2969 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2970 sizeof(struct iphdr) - 4,
2971 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2972 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2973 MVPP2_PRS_IPV4_DIP_AI_BIT);
2974 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
2975 MVPP2_PRS_RI_L4_PROTO_MASK);
2976
2977 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2978 /* Unmask all ports */
2979 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2980
2981 /* Update shadow table and hw entry */
2982 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2983 mvpp2_prs_hw_write(priv, &pe);
2984
2985 /* Default IPv4 entry for unicast address */
2986 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2987 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2988 pe.index = MVPP2_PE_IP4_ADDR_UN;
2989
2990 /* Finished: go to flowid generation */
2991 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2992 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2993 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
2994 MVPP2_PRS_RI_L3_ADDR_MASK);
2995
2996 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2997 MVPP2_PRS_IPV4_DIP_AI_BIT);
2998 /* Unmask all ports */
2999 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3000
3001 /* Update shadow table and hw entry */
3002 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3003 mvpp2_prs_hw_write(priv, &pe);
3004
3005 return 0;
3006}
3007
3008/* Initialize entries for IPv6 */
3009static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3010{
3011 struct mvpp2_prs_entry pe;
3012 int tid, err;
3013
3014 /* Set entries for TCP, UDP and ICMP over IPv6 */
3015 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3016 MVPP2_PRS_RI_L4_TCP,
3017 MVPP2_PRS_RI_L4_PROTO_MASK);
3018 if (err)
3019 return err;
3020
3021 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3022 MVPP2_PRS_RI_L4_UDP,
3023 MVPP2_PRS_RI_L4_PROTO_MASK);
3024 if (err)
3025 return err;
3026
3027 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3028 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3029 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3030 MVPP2_PRS_RI_CPU_CODE_MASK |
3031 MVPP2_PRS_RI_UDF3_MASK);
3032 if (err)
3033 return err;
3034
3035 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3036 /* Result Info: UDF7=1, DS lite */
3037 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3038 MVPP2_PRS_RI_UDF7_IP6_LITE,
3039 MVPP2_PRS_RI_UDF7_MASK);
3040 if (err)
3041 return err;
3042
3043 /* IPv6 multicast */
3044 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3045 if (err)
3046 return err;
3047
3048 /* Entry for checking hop limit */
3049 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3050 MVPP2_PE_LAST_FREE_TID);
3051 if (tid < 0)
3052 return tid;
3053
3054 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3055 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3056 pe.index = tid;
3057
3058 /* Finished: go to flowid generation */
3059 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3060 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3061 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3062 MVPP2_PRS_RI_DROP_MASK,
3063 MVPP2_PRS_RI_L3_PROTO_MASK |
3064 MVPP2_PRS_RI_DROP_MASK);
3065
3066 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3067 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3068 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3069
3070 /* Update shadow table and hw entry */
3071 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3072 mvpp2_prs_hw_write(priv, &pe);
3073
3074 /* Default IPv6 entry for unknown protocols */
3075 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3076 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3077 pe.index = MVPP2_PE_IP6_PROTO_UN;
3078
3079 /* Finished: go to flowid generation */
3080 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3081 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3082 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3083 MVPP2_PRS_RI_L4_PROTO_MASK);
3084 /* Set L4 offset relatively to our current place */
3085 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3086 sizeof(struct ipv6hdr) - 4,
3087 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3088
3089 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3090 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3091 /* Unmask all ports */
3092 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3093
3094 /* Update shadow table and hw entry */
3095 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3096 mvpp2_prs_hw_write(priv, &pe);
3097
3098 /* Default IPv6 entry for unknown ext protocols */
3099 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3100 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3101 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3102
3103 /* Finished: go to flowid generation */
3104 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3105 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3106 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3107 MVPP2_PRS_RI_L4_PROTO_MASK);
3108
3109 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3110 MVPP2_PRS_IPV6_EXT_AI_BIT);
3111 /* Unmask all ports */
3112 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3113
3114 /* Update shadow table and hw entry */
3115 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3116 mvpp2_prs_hw_write(priv, &pe);
3117
3118 /* Default IPv6 entry for unicast address */
3119 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3120 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3121 pe.index = MVPP2_PE_IP6_ADDR_UN;
3122
3123 /* Finished: go to IPv6 again */
3124 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3125 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3126 MVPP2_PRS_RI_L3_ADDR_MASK);
3127 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3128 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3129 /* Shift back to IPV6 NH */
3130 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3131
3132 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3133 /* Unmask all ports */
3134 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3135
3136 /* Update shadow table and hw entry */
3137 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3138 mvpp2_prs_hw_write(priv, &pe);
3139
3140 return 0;
3141}
3142
3143/* Parser default initialization */
3144static int mvpp2_prs_default_init(struct platform_device *pdev,
3145 struct mvpp2 *priv)
3146{
3147 int err, index, i;
3148
3149 /* Enable tcam table */
3150 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3151
3152 /* Clear all tcam and sram entries */
3153 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3154 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3155 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3156 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3157
3158 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3159 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3160 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3161 }
3162
3163 /* Invalidate all tcam entries */
3164 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3165 mvpp2_prs_hw_inv(priv, index);
3166
3167 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
3168 sizeof(struct mvpp2_prs_shadow),
3169 GFP_KERNEL);
3170 if (!priv->prs_shadow)
3171 return -ENOMEM;
3172
3173 /* Always start from lookup = 0 */
3174 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3175 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3176 MVPP2_PRS_PORT_LU_MAX, 0);
3177
3178 mvpp2_prs_def_flow_init(priv);
3179
3180 mvpp2_prs_mh_init(priv);
3181
3182 mvpp2_prs_mac_init(priv);
3183
3184 mvpp2_prs_dsa_init(priv);
3185
3186 err = mvpp2_prs_etype_init(priv);
3187 if (err)
3188 return err;
3189
3190 err = mvpp2_prs_vlan_init(pdev, priv);
3191 if (err)
3192 return err;
3193
3194 err = mvpp2_prs_pppoe_init(priv);
3195 if (err)
3196 return err;
3197
3198 err = mvpp2_prs_ip6_init(priv);
3199 if (err)
3200 return err;
3201
3202 err = mvpp2_prs_ip4_init(priv);
3203 if (err)
3204 return err;
3205
3206 return 0;
3207}
3208
3209/* Compare MAC DA with tcam entry data */
3210static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3211 const u8 *da, unsigned char *mask)
3212{
3213 unsigned char tcam_byte, tcam_mask;
3214 int index;
3215
3216 for (index = 0; index < ETH_ALEN; index++) {
3217 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3218 if (tcam_mask != mask[index])
3219 return false;
3220
3221 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3222 return false;
3223 }
3224
3225 return true;
3226}
3227
3228/* Find tcam entry with matched pair <MAC DA, port> */
3229static struct mvpp2_prs_entry *
3230mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3231 unsigned char *mask, int udf_type)
3232{
3233 struct mvpp2_prs_entry *pe;
3234 int tid;
3235
3236 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3237 if (!pe)
3238 return NULL;
3239 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3240
3241 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3242 for (tid = MVPP2_PE_FIRST_FREE_TID;
3243 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3244 unsigned int entry_pmap;
3245
3246 if (!priv->prs_shadow[tid].valid ||
3247 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3248 (priv->prs_shadow[tid].udf != udf_type))
3249 continue;
3250
3251 pe->index = tid;
3252 mvpp2_prs_hw_read(priv, pe);
3253 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3254
3255 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3256 entry_pmap == pmap)
3257 return pe;
3258 }
3259 kfree(pe);
3260
3261 return NULL;
3262}
3263
3264/* Update parser's mac da entry */
3265static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3266 const u8 *da, bool add)
3267{
3268 struct mvpp2_prs_entry *pe;
3269 unsigned int pmap, len, ri;
3270 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3271 int tid;
3272
3273 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3274 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3275 MVPP2_PRS_UDF_MAC_DEF);
3276
3277 /* No such entry */
3278 if (!pe) {
3279 if (!add)
3280 return 0;
3281
3282 /* Create new TCAM entry */
3283 /* Find first range mac entry*/
3284 for (tid = MVPP2_PE_FIRST_FREE_TID;
3285 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3286 if (priv->prs_shadow[tid].valid &&
3287 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3288 (priv->prs_shadow[tid].udf ==
3289 MVPP2_PRS_UDF_MAC_RANGE))
3290 break;
3291
3292 /* Go through the all entries from first to last */
3293 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3294 tid - 1);
3295 if (tid < 0)
3296 return tid;
3297
3298 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3299 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303300 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003301 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3302 pe->index = tid;
3303
3304 /* Mask all ports */
3305 mvpp2_prs_tcam_port_map_set(pe, 0);
3306 }
3307
3308 /* Update port mask */
3309 mvpp2_prs_tcam_port_set(pe, port, add);
3310
3311 /* Invalidate the entry if no ports are left enabled */
3312 pmap = mvpp2_prs_tcam_port_map_get(pe);
3313 if (pmap == 0) {
3314 if (add) {
3315 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303316 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003317 }
3318 mvpp2_prs_hw_inv(priv, pe->index);
3319 priv->prs_shadow[pe->index].valid = false;
3320 kfree(pe);
3321 return 0;
3322 }
3323
3324 /* Continue - set next lookup */
3325 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3326
3327 /* Set match on DA */
3328 len = ETH_ALEN;
3329 while (len--)
3330 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3331
3332 /* Set result info bits */
3333 if (is_broadcast_ether_addr(da))
3334 ri = MVPP2_PRS_RI_L2_BCAST;
3335 else if (is_multicast_ether_addr(da))
3336 ri = MVPP2_PRS_RI_L2_MCAST;
3337 else
3338 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3339
3340 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3341 MVPP2_PRS_RI_MAC_ME_MASK);
3342 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3343 MVPP2_PRS_RI_MAC_ME_MASK);
3344
3345 /* Shift to ethertype */
3346 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3347 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3348
3349 /* Update shadow table and hw entry */
3350 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3351 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3352 mvpp2_prs_hw_write(priv, pe);
3353
3354 kfree(pe);
3355
3356 return 0;
3357}
3358
3359static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3360{
3361 struct mvpp2_port *port = netdev_priv(dev);
3362 int err;
3363
3364 /* Remove old parser entry */
3365 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3366 false);
3367 if (err)
3368 return err;
3369
3370 /* Add new parser entry */
3371 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3372 if (err)
3373 return err;
3374
3375 /* Set addr in the device */
3376 ether_addr_copy(dev->dev_addr, da);
3377
3378 return 0;
3379}
3380
3381/* Delete all port's multicast simple (not range) entries */
3382static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3383{
3384 struct mvpp2_prs_entry pe;
3385 int index, tid;
3386
3387 for (tid = MVPP2_PE_FIRST_FREE_TID;
3388 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3389 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3390
3391 if (!priv->prs_shadow[tid].valid ||
3392 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3393 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3394 continue;
3395
3396 /* Only simple mac entries */
3397 pe.index = tid;
3398 mvpp2_prs_hw_read(priv, &pe);
3399
3400 /* Read mac addr from entry */
3401 for (index = 0; index < ETH_ALEN; index++)
3402 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3403 &da_mask[index]);
3404
3405 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3406 /* Delete this entry */
3407 mvpp2_prs_mac_da_accept(priv, port, da, false);
3408 }
3409}
3410
3411static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3412{
3413 switch (type) {
3414 case MVPP2_TAG_TYPE_EDSA:
3415 /* Add port to EDSA entries */
3416 mvpp2_prs_dsa_tag_set(priv, port, true,
3417 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3418 mvpp2_prs_dsa_tag_set(priv, port, true,
3419 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3420 /* Remove port from DSA entries */
3421 mvpp2_prs_dsa_tag_set(priv, port, false,
3422 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3423 mvpp2_prs_dsa_tag_set(priv, port, false,
3424 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3425 break;
3426
3427 case MVPP2_TAG_TYPE_DSA:
3428 /* Add port to DSA entries */
3429 mvpp2_prs_dsa_tag_set(priv, port, true,
3430 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3431 mvpp2_prs_dsa_tag_set(priv, port, true,
3432 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3433 /* Remove port from EDSA entries */
3434 mvpp2_prs_dsa_tag_set(priv, port, false,
3435 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3436 mvpp2_prs_dsa_tag_set(priv, port, false,
3437 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3438 break;
3439
3440 case MVPP2_TAG_TYPE_MH:
3441 case MVPP2_TAG_TYPE_NONE:
3442 /* Remove port form EDSA and DSA entries */
3443 mvpp2_prs_dsa_tag_set(priv, port, false,
3444 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3445 mvpp2_prs_dsa_tag_set(priv, port, false,
3446 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3447 mvpp2_prs_dsa_tag_set(priv, port, false,
3448 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3449 mvpp2_prs_dsa_tag_set(priv, port, false,
3450 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3451 break;
3452
3453 default:
3454 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3455 return -EINVAL;
3456 }
3457
3458 return 0;
3459}
3460
3461/* Set prs flow for the port */
3462static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3463{
3464 struct mvpp2_prs_entry *pe;
3465 int tid;
3466
3467 pe = mvpp2_prs_flow_find(port->priv, port->id);
3468
3469 /* Such entry not exist */
3470 if (!pe) {
3471 /* Go through the all entires from last to first */
3472 tid = mvpp2_prs_tcam_first_free(port->priv,
3473 MVPP2_PE_LAST_FREE_TID,
3474 MVPP2_PE_FIRST_FREE_TID);
3475 if (tid < 0)
3476 return tid;
3477
3478 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3479 if (!pe)
3480 return -ENOMEM;
3481
3482 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3483 pe->index = tid;
3484
3485 /* Set flow ID*/
3486 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3487 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3488
3489 /* Update shadow table */
3490 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3491 }
3492
3493 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3494 mvpp2_prs_hw_write(port->priv, pe);
3495 kfree(pe);
3496
3497 return 0;
3498}
3499
3500/* Classifier configuration routines */
3501
3502/* Update classification flow table registers */
3503static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3504 struct mvpp2_cls_flow_entry *fe)
3505{
3506 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3507 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3508 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3509 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3510}
3511
3512/* Update classification lookup table register */
3513static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3514 struct mvpp2_cls_lookup_entry *le)
3515{
3516 u32 val;
3517
3518 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3519 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3520 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3521}
3522
3523/* Classifier default initialization */
3524static void mvpp2_cls_init(struct mvpp2 *priv)
3525{
3526 struct mvpp2_cls_lookup_entry le;
3527 struct mvpp2_cls_flow_entry fe;
3528 int index;
3529
3530 /* Enable classifier */
3531 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3532
3533 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01003534 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003535 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3536 fe.index = index;
3537 mvpp2_cls_flow_write(priv, &fe);
3538 }
3539
3540 /* Clear classifier lookup table */
3541 le.data = 0;
3542 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3543 le.lkpid = index;
3544 le.way = 0;
3545 mvpp2_cls_lookup_write(priv, &le);
3546
3547 le.way = 1;
3548 mvpp2_cls_lookup_write(priv, &le);
3549 }
3550}
3551
3552static void mvpp2_cls_port_config(struct mvpp2_port *port)
3553{
3554 struct mvpp2_cls_lookup_entry le;
3555 u32 val;
3556
3557 /* Set way for the port */
3558 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3559 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3560 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3561
3562 /* Pick the entry to be accessed in lookup ID decoding table
3563 * according to the way and lkpid.
3564 */
3565 le.lkpid = port->id;
3566 le.way = 0;
3567 le.data = 0;
3568
3569 /* Set initial CPU queue for receiving packets */
3570 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3571 le.data |= port->first_rxq;
3572
3573 /* Disable classification engines */
3574 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3575
3576 /* Update lookup ID table entry */
3577 mvpp2_cls_lookup_write(port->priv, &le);
3578}
3579
3580/* Set CPU queue number for oversize packets */
3581static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3582{
3583 u32 val;
3584
3585 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3586 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3587
3588 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3589 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3590
3591 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3592 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3593 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3594}
3595
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003596static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3597{
3598 if (likely(pool->frag_size <= PAGE_SIZE))
3599 return netdev_alloc_frag(pool->frag_size);
3600 else
3601 return kmalloc(pool->frag_size, GFP_ATOMIC);
3602}
3603
3604static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3605{
3606 if (likely(pool->frag_size <= PAGE_SIZE))
3607 skb_free_frag(data);
3608 else
3609 kfree(data);
3610}
3611
Marcin Wojtas3f518502014-07-10 16:52:13 -03003612/* Buffer Manager configuration routines */
3613
3614/* Create pool */
3615static int mvpp2_bm_pool_create(struct platform_device *pdev,
3616 struct mvpp2 *priv,
3617 struct mvpp2_bm_pool *bm_pool, int size)
3618{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003619 u32 val;
3620
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003621 /* Number of buffer pointers must be a multiple of 16, as per
3622 * hardware constraints
3623 */
3624 if (!IS_ALIGNED(size, 16))
3625 return -EINVAL;
3626
3627 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3628 * bytes per buffer pointer
3629 */
3630 if (priv->hw_version == MVPP21)
3631 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3632 else
3633 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3634
3635 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003636 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003637 GFP_KERNEL);
3638 if (!bm_pool->virt_addr)
3639 return -ENOMEM;
3640
Thomas Petazzonid3158802017-02-21 11:28:13 +01003641 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3642 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003643 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3644 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003645 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3646 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3647 return -ENOMEM;
3648 }
3649
3650 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003651 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003652 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3653
3654 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3655 val |= MVPP2_BM_START_MASK;
3656 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3657
3658 bm_pool->type = MVPP2_BM_FREE;
3659 bm_pool->size = size;
3660 bm_pool->pkt_size = 0;
3661 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003662
3663 return 0;
3664}
3665
3666/* Set pool buffer size */
3667static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3668 struct mvpp2_bm_pool *bm_pool,
3669 int buf_size)
3670{
3671 u32 val;
3672
3673 bm_pool->buf_size = buf_size;
3674
3675 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3676 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3677}
3678
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003679static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3680 struct mvpp2_bm_pool *bm_pool,
3681 dma_addr_t *dma_addr,
3682 phys_addr_t *phys_addr)
3683{
Thomas Petazzonia7868412017-03-07 16:53:13 +01003684 int cpu = smp_processor_id();
3685
3686 *dma_addr = mvpp2_percpu_read(priv, cpu,
3687 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3688 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003689
3690 if (priv->hw_version == MVPP22) {
3691 u32 val;
3692 u32 dma_addr_highbits, phys_addr_highbits;
3693
Thomas Petazzonia7868412017-03-07 16:53:13 +01003694 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003695 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3696 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3697 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3698
3699 if (sizeof(dma_addr_t) == 8)
3700 *dma_addr |= (u64)dma_addr_highbits << 32;
3701
3702 if (sizeof(phys_addr_t) == 8)
3703 *phys_addr |= (u64)phys_addr_highbits << 32;
3704 }
3705}
3706
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003707/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003708static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3709 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003710{
3711 int i;
3712
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003713 for (i = 0; i < bm_pool->buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01003714 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003715 phys_addr_t buf_phys_addr;
3716 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003717
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003718 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3719 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01003720
Thomas Petazzoni20396132017-03-07 16:53:00 +01003721 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01003722 bm_pool->buf_size, DMA_FROM_DEVICE);
3723
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003724 data = (void *)phys_to_virt(buf_phys_addr);
3725 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003726 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003727
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003728 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003729 }
3730
3731 /* Update BM driver with number of buffers removed from pool */
3732 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003733}
3734
3735/* Cleanup pool */
3736static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3737 struct mvpp2 *priv,
3738 struct mvpp2_bm_pool *bm_pool)
3739{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003740 u32 val;
3741
Marcin Wojtas4229d502015-12-03 15:20:50 +01003742 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03003743 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003744 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3745 return 0;
3746 }
3747
3748 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3749 val |= MVPP2_BM_STOP_MASK;
3750 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3751
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003752 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003753 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003754 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003755 return 0;
3756}
3757
3758static int mvpp2_bm_pools_init(struct platform_device *pdev,
3759 struct mvpp2 *priv)
3760{
3761 int i, err, size;
3762 struct mvpp2_bm_pool *bm_pool;
3763
3764 /* Create all pools with maximum size */
3765 size = MVPP2_BM_POOL_SIZE_MAX;
3766 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3767 bm_pool = &priv->bm_pools[i];
3768 bm_pool->id = i;
3769 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3770 if (err)
3771 goto err_unroll_pools;
3772 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3773 }
3774 return 0;
3775
3776err_unroll_pools:
3777 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3778 for (i = i - 1; i >= 0; i--)
3779 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3780 return err;
3781}
3782
3783static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3784{
3785 int i, err;
3786
3787 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3788 /* Mask BM all interrupts */
3789 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3790 /* Clear BM cause register */
3791 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3792 }
3793
3794 /* Allocate and initialize BM pools */
3795 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
3796 sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
3797 if (!priv->bm_pools)
3798 return -ENOMEM;
3799
3800 err = mvpp2_bm_pools_init(pdev, priv);
3801 if (err < 0)
3802 return err;
3803 return 0;
3804}
3805
3806/* Attach long pool to rxq */
3807static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3808 int lrxq, int long_pool)
3809{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003810 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003811 int prxq;
3812
3813 /* Get queue physical ID */
3814 prxq = port->rxqs[lrxq]->id;
3815
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003816 if (port->priv->hw_version == MVPP21)
3817 mask = MVPP21_RXQ_POOL_LONG_MASK;
3818 else
3819 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003820
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003821 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3822 val &= ~mask;
3823 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003824 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3825}
3826
3827/* Attach short pool to rxq */
3828static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3829 int lrxq, int short_pool)
3830{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003831 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003832 int prxq;
3833
3834 /* Get queue physical ID */
3835 prxq = port->rxqs[lrxq]->id;
3836
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003837 if (port->priv->hw_version == MVPP21)
3838 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3839 else
3840 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003841
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003842 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3843 val &= ~mask;
3844 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003845 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3846}
3847
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003848static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3849 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003850 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003851 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003852 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003853{
Thomas Petazzoni20396132017-03-07 16:53:00 +01003854 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003855 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003856
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003857 data = mvpp2_frag_alloc(bm_pool);
3858 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003859 return NULL;
3860
Thomas Petazzoni20396132017-03-07 16:53:00 +01003861 dma_addr = dma_map_single(port->dev->dev.parent, data,
3862 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3863 DMA_FROM_DEVICE);
3864 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003865 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003866 return NULL;
3867 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01003868 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003869 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003870
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003871 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003872}
3873
3874/* Set pool number in a BM cookie */
3875static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
3876{
3877 u32 bm;
3878
3879 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
3880 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
3881
3882 return bm;
3883}
3884
3885/* Get pool number from a BM cookie */
Thomas Petazzonid3158802017-02-21 11:28:13 +01003886static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003887{
3888 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
3889}
3890
3891/* Release buffer to BM */
3892static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003893 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003894 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003895{
Thomas Petazzonia7868412017-03-07 16:53:13 +01003896 int cpu = smp_processor_id();
3897
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003898 if (port->priv->hw_version == MVPP22) {
3899 u32 val = 0;
3900
3901 if (sizeof(dma_addr_t) == 8)
3902 val |= upper_32_bits(buf_dma_addr) &
3903 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3904
3905 if (sizeof(phys_addr_t) == 8)
3906 val |= (upper_32_bits(buf_phys_addr)
3907 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3908 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3909
Thomas Petazzonia7868412017-03-07 16:53:13 +01003910 mvpp2_percpu_write(port->priv, cpu,
3911 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003912 }
3913
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003914 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3915 * returned in the "cookie" field of the RX
3916 * descriptor. Instead of storing the virtual address, we
3917 * store the physical address
3918 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01003919 mvpp2_percpu_write(port->priv, cpu,
3920 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3921 mvpp2_percpu_write(port->priv, cpu,
3922 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003923}
3924
Marcin Wojtas3f518502014-07-10 16:52:13 -03003925/* Refill BM pool */
3926static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003927 dma_addr_t dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003928 phys_addr_t phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003929{
3930 int pool = mvpp2_bm_cookie_pool_get(bm);
3931
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003932 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003933}
3934
3935/* Allocate buffers for the pool */
3936static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3937 struct mvpp2_bm_pool *bm_pool, int buf_num)
3938{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003939 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01003940 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003941 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003942 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003943
3944 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3945 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3946
3947 if (buf_num < 0 ||
3948 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3949 netdev_err(port->dev,
3950 "cannot allocate %d buffers for pool %d\n",
3951 buf_num, bm_pool->id);
3952 return 0;
3953 }
3954
Marcin Wojtas3f518502014-07-10 16:52:13 -03003955 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003956 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
3957 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003958 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003959 break;
3960
Thomas Petazzoni20396132017-03-07 16:53:00 +01003961 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003962 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003963 }
3964
3965 /* Update BM driver with number of buffers added to pool */
3966 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003967
3968 netdev_dbg(port->dev,
3969 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
3970 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3971 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
3972
3973 netdev_dbg(port->dev,
3974 "%s pool %d: %d of %d buffers added\n",
3975 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
3976 bm_pool->id, i, buf_num);
3977 return i;
3978}
3979
3980/* Notify the driver that BM pool is being used as specific type and return the
3981 * pool pointer on success
3982 */
3983static struct mvpp2_bm_pool *
3984mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3985 int pkt_size)
3986{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003987 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3988 int num;
3989
3990 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
3991 netdev_err(port->dev, "mixing pool types is forbidden\n");
3992 return NULL;
3993 }
3994
Marcin Wojtas3f518502014-07-10 16:52:13 -03003995 if (new_pool->type == MVPP2_BM_FREE)
3996 new_pool->type = type;
3997
3998 /* Allocate buffers in case BM pool is used as long pool, but packet
3999 * size doesn't match MTU or BM pool hasn't being used yet
4000 */
4001 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4002 (new_pool->pkt_size == 0)) {
4003 int pkts_num;
4004
4005 /* Set default buffer number or free all the buffers in case
4006 * the pool is not empty
4007 */
4008 pkts_num = new_pool->buf_num;
4009 if (pkts_num == 0)
4010 pkts_num = type == MVPP2_BM_SWF_LONG ?
4011 MVPP2_BM_LONG_BUF_NUM :
4012 MVPP2_BM_SHORT_BUF_NUM;
4013 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004014 mvpp2_bm_bufs_free(port->dev->dev.parent,
4015 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004016
4017 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004018 new_pool->frag_size =
4019 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4020 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004021
4022 /* Allocate buffers for this pool */
4023 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4024 if (num != pkts_num) {
4025 WARN(1, "pool %d: %d of %d allocated\n",
4026 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004027 return NULL;
4028 }
4029 }
4030
4031 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4032 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4033
Marcin Wojtas3f518502014-07-10 16:52:13 -03004034 return new_pool;
4035}
4036
4037/* Initialize pools for swf */
4038static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4039{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004040 int rxq;
4041
4042 if (!port->pool_long) {
4043 port->pool_long =
4044 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4045 MVPP2_BM_SWF_LONG,
4046 port->pkt_size);
4047 if (!port->pool_long)
4048 return -ENOMEM;
4049
Marcin Wojtas3f518502014-07-10 16:52:13 -03004050 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004051
4052 for (rxq = 0; rxq < rxq_number; rxq++)
4053 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4054 }
4055
4056 if (!port->pool_short) {
4057 port->pool_short =
4058 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4059 MVPP2_BM_SWF_SHORT,
4060 MVPP2_BM_SHORT_PKT_SIZE);
4061 if (!port->pool_short)
4062 return -ENOMEM;
4063
Marcin Wojtas3f518502014-07-10 16:52:13 -03004064 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004065
4066 for (rxq = 0; rxq < rxq_number; rxq++)
4067 mvpp2_rxq_short_pool_set(port, rxq,
4068 port->pool_short->id);
4069 }
4070
4071 return 0;
4072}
4073
4074static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4075{
4076 struct mvpp2_port *port = netdev_priv(dev);
4077 struct mvpp2_bm_pool *port_pool = port->pool_long;
4078 int num, pkts_num = port_pool->buf_num;
4079 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4080
4081 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004082 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004083 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004084 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4085 return -EIO;
4086 }
4087
4088 port_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004089 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4090 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004091 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4092 if (num != pkts_num) {
4093 WARN(1, "pool %d: %d of %d allocated\n",
4094 port_pool->id, num, pkts_num);
4095 return -EIO;
4096 }
4097
4098 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4099 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4100 dev->mtu = mtu;
4101 netdev_update_features(dev);
4102 return 0;
4103}
4104
4105static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4106{
4107 int cpu, cpu_mask = 0;
4108
4109 for_each_present_cpu(cpu)
4110 cpu_mask |= 1 << cpu;
4111 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4112 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
4113}
4114
4115static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4116{
4117 int cpu, cpu_mask = 0;
4118
4119 for_each_present_cpu(cpu)
4120 cpu_mask |= 1 << cpu;
4121 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4122 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
4123}
4124
4125/* Mask the current CPU's Rx/Tx interrupts */
4126static void mvpp2_interrupts_mask(void *arg)
4127{
4128 struct mvpp2_port *port = arg;
4129
Thomas Petazzonia7868412017-03-07 16:53:13 +01004130 mvpp2_percpu_write(port->priv, smp_processor_id(),
4131 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004132}
4133
4134/* Unmask the current CPU's Rx/Tx interrupts */
4135static void mvpp2_interrupts_unmask(void *arg)
4136{
4137 struct mvpp2_port *port = arg;
4138
Thomas Petazzonia7868412017-03-07 16:53:13 +01004139 mvpp2_percpu_write(port->priv, smp_processor_id(),
4140 MVPP2_ISR_RX_TX_MASK_REG(port->id),
4141 (MVPP2_CAUSE_MISC_SUM_MASK |
4142 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004143}
4144
4145/* Port configuration routines */
4146
Thomas Petazzoni26975822017-03-07 16:53:14 +01004147static void mvpp22_port_mii_set(struct mvpp2_port *port)
4148{
4149 u32 val;
4150
4151 return;
4152
4153 /* Only GOP port 0 has an XLG MAC */
4154 if (port->gop_id == 0) {
4155 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4156 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4157 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4158 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4159 }
4160
4161 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4162 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4163 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4164 else
4165 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4166 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4167 val |= MVPP22_CTRL4_SYNC_BYPASS;
4168 val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4169 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4170}
4171
Marcin Wojtas3f518502014-07-10 16:52:13 -03004172static void mvpp2_port_mii_set(struct mvpp2_port *port)
4173{
Marcin Wojtas08a23752014-07-21 13:48:12 -03004174 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004175
Thomas Petazzoni26975822017-03-07 16:53:14 +01004176 if (port->priv->hw_version == MVPP22)
4177 mvpp22_port_mii_set(port);
4178
Marcin Wojtas08a23752014-07-21 13:48:12 -03004179 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004180
Marcin Wojtas08a23752014-07-21 13:48:12 -03004181 switch (port->phy_interface) {
4182 case PHY_INTERFACE_MODE_SGMII:
4183 val |= MVPP2_GMAC_INBAND_AN_MASK;
4184 break;
4185 case PHY_INTERFACE_MODE_RGMII:
4186 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4187 default:
4188 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4189 }
4190
4191 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4192}
4193
4194static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4195{
4196 u32 val;
4197
4198 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4199 val |= MVPP2_GMAC_FC_ADV_EN;
4200 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004201}
4202
4203static void mvpp2_port_enable(struct mvpp2_port *port)
4204{
4205 u32 val;
4206
4207 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4208 val |= MVPP2_GMAC_PORT_EN_MASK;
4209 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4210 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4211}
4212
4213static void mvpp2_port_disable(struct mvpp2_port *port)
4214{
4215 u32 val;
4216
4217 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4218 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4219 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4220}
4221
4222/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4223static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4224{
4225 u32 val;
4226
4227 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4228 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4229 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4230}
4231
4232/* Configure loopback port */
4233static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4234{
4235 u32 val;
4236
4237 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4238
4239 if (port->speed == 1000)
4240 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4241 else
4242 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4243
4244 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4245 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4246 else
4247 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4248
4249 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4250}
4251
4252static void mvpp2_port_reset(struct mvpp2_port *port)
4253{
4254 u32 val;
4255
4256 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4257 ~MVPP2_GMAC_PORT_RESET_MASK;
4258 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4259
4260 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4261 MVPP2_GMAC_PORT_RESET_MASK)
4262 continue;
4263}
4264
4265/* Change maximum receive size of the port */
4266static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4267{
4268 u32 val;
4269
4270 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4271 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4272 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4273 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4274 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4275}
4276
4277/* Set defaults to the MVPP2 port */
4278static void mvpp2_defaults_set(struct mvpp2_port *port)
4279{
4280 int tx_port_num, val, queue, ptxq, lrxq;
4281
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004282 if (port->priv->hw_version == MVPP21) {
4283 /* Configure port to loopback if needed */
4284 if (port->flags & MVPP2_F_LOOPBACK)
4285 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004286
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004287 /* Update TX FIFO MIN Threshold */
4288 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4289 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4290 /* Min. TX threshold must be less than minimal packet length */
4291 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4292 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4293 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004294
4295 /* Disable Legacy WRR, Disable EJP, Release from reset */
4296 tx_port_num = mvpp2_egress_port(port);
4297 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4298 tx_port_num);
4299 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4300
4301 /* Close bandwidth for all queues */
4302 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4303 ptxq = mvpp2_txq_phys(port->id, queue);
4304 mvpp2_write(port->priv,
4305 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4306 }
4307
4308 /* Set refill period to 1 usec, refill tokens
4309 * and bucket size to maximum
4310 */
4311 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4312 port->priv->tclk / USEC_PER_SEC);
4313 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4314 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4315 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4316 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4317 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4318 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4319 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4320
4321 /* Set MaximumLowLatencyPacketSize value to 256 */
4322 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4323 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4324 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4325
4326 /* Enable Rx cache snoop */
4327 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4328 queue = port->rxqs[lrxq]->id;
4329 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4330 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4331 MVPP2_SNOOP_BUF_HDR_MASK;
4332 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4333 }
4334
4335 /* At default, mask all interrupts to all present cpus */
4336 mvpp2_interrupts_disable(port);
4337}
4338
4339/* Enable/disable receiving packets */
4340static void mvpp2_ingress_enable(struct mvpp2_port *port)
4341{
4342 u32 val;
4343 int lrxq, queue;
4344
4345 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4346 queue = port->rxqs[lrxq]->id;
4347 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4348 val &= ~MVPP2_RXQ_DISABLE_MASK;
4349 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4350 }
4351}
4352
4353static void mvpp2_ingress_disable(struct mvpp2_port *port)
4354{
4355 u32 val;
4356 int lrxq, queue;
4357
4358 for (lrxq = 0; lrxq < rxq_number; lrxq++) {
4359 queue = port->rxqs[lrxq]->id;
4360 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4361 val |= MVPP2_RXQ_DISABLE_MASK;
4362 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4363 }
4364}
4365
4366/* Enable transmit via physical egress queue
4367 * - HW starts take descriptors from DRAM
4368 */
4369static void mvpp2_egress_enable(struct mvpp2_port *port)
4370{
4371 u32 qmap;
4372 int queue;
4373 int tx_port_num = mvpp2_egress_port(port);
4374
4375 /* Enable all initialized TXs. */
4376 qmap = 0;
4377 for (queue = 0; queue < txq_number; queue++) {
4378 struct mvpp2_tx_queue *txq = port->txqs[queue];
4379
4380 if (txq->descs != NULL)
4381 qmap |= (1 << queue);
4382 }
4383
4384 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4385 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4386}
4387
4388/* Disable transmit via physical egress queue
4389 * - HW doesn't take descriptors from DRAM
4390 */
4391static void mvpp2_egress_disable(struct mvpp2_port *port)
4392{
4393 u32 reg_data;
4394 int delay;
4395 int tx_port_num = mvpp2_egress_port(port);
4396
4397 /* Issue stop command for active channels only */
4398 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4399 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4400 MVPP2_TXP_SCHED_ENQ_MASK;
4401 if (reg_data != 0)
4402 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4403 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4404
4405 /* Wait for all Tx activity to terminate. */
4406 delay = 0;
4407 do {
4408 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4409 netdev_warn(port->dev,
4410 "Tx stop timed out, status=0x%08x\n",
4411 reg_data);
4412 break;
4413 }
4414 mdelay(1);
4415 delay++;
4416
4417 /* Check port TX Command register that all
4418 * Tx queues are stopped
4419 */
4420 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4421 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4422}
4423
4424/* Rx descriptors helper methods */
4425
4426/* Get number of Rx descriptors occupied by received packets */
4427static inline int
4428mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4429{
4430 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4431
4432 return val & MVPP2_RXQ_OCCUPIED_MASK;
4433}
4434
4435/* Update Rx queue status with the number of occupied and available
4436 * Rx descriptor slots.
4437 */
4438static inline void
4439mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4440 int used_count, int free_count)
4441{
4442 /* Decrement the number of used descriptors and increment count
4443 * increment the number of free descriptors.
4444 */
4445 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4446
4447 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4448}
4449
4450/* Get pointer to next RX descriptor to be processed by SW */
4451static inline struct mvpp2_rx_desc *
4452mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4453{
4454 int rx_desc = rxq->next_desc_to_proc;
4455
4456 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4457 prefetch(rxq->descs + rxq->next_desc_to_proc);
4458 return rxq->descs + rx_desc;
4459}
4460
4461/* Set rx queue offset */
4462static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4463 int prxq, int offset)
4464{
4465 u32 val;
4466
4467 /* Convert offset from bytes to units of 32 bytes */
4468 offset = offset >> 5;
4469
4470 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4471 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4472
4473 /* Offset is in */
4474 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4475 MVPP2_RXQ_PACKET_OFFSET_MASK);
4476
4477 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4478}
4479
4480/* Obtain BM cookie information from descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01004481static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
4482 struct mvpp2_rx_desc *rx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004483{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004484 int cpu = smp_processor_id();
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01004485 int pool;
4486
4487 pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
4488 MVPP2_RXD_BM_POOL_ID_MASK) >>
4489 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004490
4491 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
4492 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
4493}
4494
4495/* Tx descriptors helper methods */
4496
Marcin Wojtas3f518502014-07-10 16:52:13 -03004497/* Get pointer to next Tx descriptor to be processed (send) by HW */
4498static struct mvpp2_tx_desc *
4499mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4500{
4501 int tx_desc = txq->next_desc_to_proc;
4502
4503 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4504 return txq->descs + tx_desc;
4505}
4506
4507/* Update HW with number of aggregated Tx descriptors to be sent */
4508static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4509{
4510 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004511 mvpp2_percpu_write(port->priv, smp_processor_id(),
4512 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004513}
4514
4515
4516/* Check if there are enough free descriptors in aggregated txq.
4517 * If not, update the number of occupied descriptors and repeat the check.
4518 */
4519static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4520 struct mvpp2_tx_queue *aggr_txq, int num)
4521{
4522 if ((aggr_txq->count + num) > aggr_txq->size) {
4523 /* Update number of occupied aggregated Tx descriptors */
4524 int cpu = smp_processor_id();
4525 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4526
4527 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4528 }
4529
4530 if ((aggr_txq->count + num) > aggr_txq->size)
4531 return -ENOMEM;
4532
4533 return 0;
4534}
4535
4536/* Reserved Tx descriptors allocation request */
4537static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4538 struct mvpp2_tx_queue *txq, int num)
4539{
4540 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004541 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004542
4543 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004544 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004545
Thomas Petazzonia7868412017-03-07 16:53:13 +01004546 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004547
4548 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4549}
4550
4551/* Check if there are enough reserved descriptors for transmission.
4552 * If not, request chunk of reserved descriptors and check again.
4553 */
4554static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4555 struct mvpp2_tx_queue *txq,
4556 struct mvpp2_txq_pcpu *txq_pcpu,
4557 int num)
4558{
4559 int req, cpu, desc_count;
4560
4561 if (txq_pcpu->reserved_num >= num)
4562 return 0;
4563
4564 /* Not enough descriptors reserved! Update the reserved descriptor
4565 * count and check again.
4566 */
4567
4568 desc_count = 0;
4569 /* Compute total of used descriptors */
4570 for_each_present_cpu(cpu) {
4571 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4572
4573 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4574 desc_count += txq_pcpu_aux->count;
4575 desc_count += txq_pcpu_aux->reserved_num;
4576 }
4577
4578 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4579 desc_count += req;
4580
4581 if (desc_count >
4582 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4583 return -ENOMEM;
4584
4585 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4586
4587 /* OK, the descriptor cound has been updated: check again. */
4588 if (txq_pcpu->reserved_num < num)
4589 return -ENOMEM;
4590 return 0;
4591}
4592
4593/* Release the last allocated Tx descriptor. Useful to handle DMA
4594 * mapping failures in the Tx path.
4595 */
4596static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4597{
4598 if (txq->next_desc_to_proc == 0)
4599 txq->next_desc_to_proc = txq->last_desc - 1;
4600 else
4601 txq->next_desc_to_proc--;
4602}
4603
4604/* Set Tx descriptors fields relevant for CSUM calculation */
4605static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4606 int ip_hdr_len, int l4_proto)
4607{
4608 u32 command;
4609
4610 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4611 * G_L4_chk, L4_type required only for checksum calculation
4612 */
4613 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4614 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4615 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4616
4617 if (l3_proto == swab16(ETH_P_IP)) {
4618 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4619 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4620 } else {
4621 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4622 }
4623
4624 if (l4_proto == IPPROTO_TCP) {
4625 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4626 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4627 } else if (l4_proto == IPPROTO_UDP) {
4628 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4629 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4630 } else {
4631 command |= MVPP2_TXD_L4_CSUM_NOT;
4632 }
4633
4634 return command;
4635}
4636
4637/* Get number of sent descriptors and decrement counter.
4638 * The number of sent descriptors is returned.
4639 * Per-CPU access
4640 */
4641static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4642 struct mvpp2_tx_queue *txq)
4643{
4644 u32 val;
4645
4646 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004647 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4648 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004649
4650 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4651 MVPP2_TRANSMITTED_COUNT_OFFSET;
4652}
4653
4654static void mvpp2_txq_sent_counter_clear(void *arg)
4655{
4656 struct mvpp2_port *port = arg;
4657 int queue;
4658
4659 for (queue = 0; queue < txq_number; queue++) {
4660 int id = port->txqs[queue]->id;
4661
Thomas Petazzonia7868412017-03-07 16:53:13 +01004662 mvpp2_percpu_read(port->priv, smp_processor_id(),
4663 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004664 }
4665}
4666
4667/* Set max sizes for Tx queues */
4668static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4669{
4670 u32 val, size, mtu;
4671 int txq, tx_port_num;
4672
4673 mtu = port->pkt_size * 8;
4674 if (mtu > MVPP2_TXP_MTU_MAX)
4675 mtu = MVPP2_TXP_MTU_MAX;
4676
4677 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4678 mtu = 3 * mtu;
4679
4680 /* Indirect access to registers */
4681 tx_port_num = mvpp2_egress_port(port);
4682 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4683
4684 /* Set MTU */
4685 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4686 val &= ~MVPP2_TXP_MTU_MAX;
4687 val |= mtu;
4688 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4689
4690 /* TXP token size and all TXQs token size must be larger that MTU */
4691 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4692 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4693 if (size < mtu) {
4694 size = mtu;
4695 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4696 val |= size;
4697 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4698 }
4699
4700 for (txq = 0; txq < txq_number; txq++) {
4701 val = mvpp2_read(port->priv,
4702 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4703 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4704
4705 if (size < mtu) {
4706 size = mtu;
4707 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4708 val |= size;
4709 mvpp2_write(port->priv,
4710 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4711 val);
4712 }
4713 }
4714}
4715
4716/* Set the number of packets that will be received before Rx interrupt
4717 * will be generated by HW.
4718 */
4719static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004720 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004721{
Thomas Petazzonia7868412017-03-07 16:53:13 +01004722 int cpu = smp_processor_id();
4723
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01004724 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4725 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004726
Thomas Petazzonia7868412017-03-07 16:53:13 +01004727 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4728 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4729 rxq->pkts_coal);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004730}
4731
Thomas Petazzoniab426762017-02-21 11:28:04 +01004732static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4733{
4734 u64 tmp = (u64)clk_hz * usec;
4735
4736 do_div(tmp, USEC_PER_SEC);
4737
4738 return tmp > U32_MAX ? U32_MAX : tmp;
4739}
4740
4741static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4742{
4743 u64 tmp = (u64)cycles * USEC_PER_SEC;
4744
4745 do_div(tmp, clk_hz);
4746
4747 return tmp > U32_MAX ? U32_MAX : tmp;
4748}
4749
Marcin Wojtas3f518502014-07-10 16:52:13 -03004750/* Set the time delay in usec before Rx interrupt */
4751static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004752 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004753{
Thomas Petazzoniab426762017-02-21 11:28:04 +01004754 unsigned long freq = port->priv->tclk;
4755 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004756
Thomas Petazzoniab426762017-02-21 11:28:04 +01004757 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4758 rxq->time_coal =
4759 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4760
4761 /* re-evaluate to get actual register value */
4762 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4763 }
4764
Marcin Wojtas3f518502014-07-10 16:52:13 -03004765 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004766}
4767
Marcin Wojtas3f518502014-07-10 16:52:13 -03004768/* Free Tx queue skbuffs */
4769static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4770 struct mvpp2_tx_queue *txq,
4771 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4772{
4773 int i;
4774
4775 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01004776 struct mvpp2_txq_pcpu_buf *tx_buf =
4777 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004778
Thomas Petazzoni20396132017-03-07 16:53:00 +01004779 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
Thomas Petazzoni83544912016-12-21 11:28:49 +01004780 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01004781 if (tx_buf->skb)
4782 dev_kfree_skb_any(tx_buf->skb);
4783
4784 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004785 }
4786}
4787
4788static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4789 u32 cause)
4790{
4791 int queue = fls(cause) - 1;
4792
4793 return port->rxqs[queue];
4794}
4795
4796static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4797 u32 cause)
4798{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004799 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004800
4801 return port->txqs[queue];
4802}
4803
4804/* Handle end of transmission */
4805static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4806 struct mvpp2_txq_pcpu *txq_pcpu)
4807{
4808 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4809 int tx_done;
4810
4811 if (txq_pcpu->cpu != smp_processor_id())
4812 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4813
4814 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4815 if (!tx_done)
4816 return;
4817 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4818
4819 txq_pcpu->count -= tx_done;
4820
4821 if (netif_tx_queue_stopped(nq))
4822 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4823 netif_tx_wake_queue(nq);
4824}
4825
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004826static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4827{
4828 struct mvpp2_tx_queue *txq;
4829 struct mvpp2_txq_pcpu *txq_pcpu;
4830 unsigned int tx_todo = 0;
4831
4832 while (cause) {
4833 txq = mvpp2_get_tx_queue(port, cause);
4834 if (!txq)
4835 break;
4836
4837 txq_pcpu = this_cpu_ptr(txq->pcpu);
4838
4839 if (txq_pcpu->count) {
4840 mvpp2_txq_done(port, txq, txq_pcpu);
4841 tx_todo += txq_pcpu->count;
4842 }
4843
4844 cause &= ~(1 << txq->log_id);
4845 }
4846 return tx_todo;
4847}
4848
Marcin Wojtas3f518502014-07-10 16:52:13 -03004849/* Rx/Tx queue initialization/cleanup methods */
4850
4851/* Allocate and initialize descriptors for aggr TXQ */
4852static int mvpp2_aggr_txq_init(struct platform_device *pdev,
4853 struct mvpp2_tx_queue *aggr_txq,
4854 int desc_num, int cpu,
4855 struct mvpp2 *priv)
4856{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01004857 u32 txq_dma;
4858
Marcin Wojtas3f518502014-07-10 16:52:13 -03004859 /* Allocate memory for TX descriptors */
4860 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
4861 desc_num * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004862 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004863 if (!aggr_txq->descs)
4864 return -ENOMEM;
4865
Marcin Wojtas3f518502014-07-10 16:52:13 -03004866 aggr_txq->last_desc = aggr_txq->size - 1;
4867
4868 /* Aggr TXQ no reset WA */
4869 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4870 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4871
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01004872 /* Set Tx descriptors queue starting address indirect
4873 * access
4874 */
4875 if (priv->hw_version == MVPP21)
4876 txq_dma = aggr_txq->descs_dma;
4877 else
4878 txq_dma = aggr_txq->descs_dma >>
4879 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4880
4881 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004882 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4883
4884 return 0;
4885}
4886
4887/* Create a specified Rx queue */
4888static int mvpp2_rxq_init(struct mvpp2_port *port,
4889 struct mvpp2_rx_queue *rxq)
4890
4891{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01004892 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004893 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01004894
Marcin Wojtas3f518502014-07-10 16:52:13 -03004895 rxq->size = port->rx_ring_size;
4896
4897 /* Allocate memory for RX descriptors */
4898 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
4899 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004900 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004901 if (!rxq->descs)
4902 return -ENOMEM;
4903
Marcin Wojtas3f518502014-07-10 16:52:13 -03004904 rxq->last_desc = rxq->size - 1;
4905
4906 /* Zero occupied and non-occupied counters - direct access */
4907 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4908
4909 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004910 cpu = smp_processor_id();
4911 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01004912 if (port->priv->hw_version == MVPP21)
4913 rxq_dma = rxq->descs_dma;
4914 else
4915 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004916 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4917 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4918 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004919
4920 /* Set Offset */
4921 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4922
4923 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004924 mvpp2_rx_pkts_coal_set(port, rxq);
4925 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004926
4927 /* Add number of descriptors ready for receiving packets */
4928 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4929
4930 return 0;
4931}
4932
4933/* Push packets received by the RXQ to BM pool */
4934static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4935 struct mvpp2_rx_queue *rxq)
4936{
4937 int rx_received, i;
4938
4939 rx_received = mvpp2_rxq_received(port, rxq->id);
4940 if (!rx_received)
4941 return;
4942
4943 for (i = 0; i < rx_received; i++) {
4944 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01004945 u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004946
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01004947 mvpp2_pool_refill(port, bm,
4948 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4949 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004950 }
4951 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4952}
4953
4954/* Cleanup Rx queue */
4955static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4956 struct mvpp2_rx_queue *rxq)
4957{
Thomas Petazzonia7868412017-03-07 16:53:13 +01004958 int cpu;
4959
Marcin Wojtas3f518502014-07-10 16:52:13 -03004960 mvpp2_rxq_drop_pkts(port, rxq);
4961
4962 if (rxq->descs)
4963 dma_free_coherent(port->dev->dev.parent,
4964 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
4965 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004966 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004967
4968 rxq->descs = NULL;
4969 rxq->last_desc = 0;
4970 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01004971 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004972
4973 /* Clear Rx descriptors queue starting address and size;
4974 * free descriptor number
4975 */
4976 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01004977 cpu = smp_processor_id();
4978 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4979 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
4980 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004981}
4982
4983/* Create and initialize a Tx queue */
4984static int mvpp2_txq_init(struct mvpp2_port *port,
4985 struct mvpp2_tx_queue *txq)
4986{
4987 u32 val;
4988 int cpu, desc, desc_per_txq, tx_port_num;
4989 struct mvpp2_txq_pcpu *txq_pcpu;
4990
4991 txq->size = port->tx_ring_size;
4992
4993 /* Allocate memory for Tx descriptors */
4994 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
4995 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004996 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004997 if (!txq->descs)
4998 return -ENOMEM;
4999
Marcin Wojtas3f518502014-07-10 16:52:13 -03005000 txq->last_desc = txq->size - 1;
5001
5002 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005003 cpu = smp_processor_id();
5004 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5005 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5006 txq->descs_dma);
5007 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5008 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5009 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5010 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5011 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5012 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005013 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005014 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005015
5016 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5017 * for each existing TXQ.
5018 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5019 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5020 */
5021 desc_per_txq = 16;
5022 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5023 (txq->log_id * desc_per_txq);
5024
Thomas Petazzonia7868412017-03-07 16:53:13 +01005025 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5026 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5027 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005028
5029 /* WRR / EJP configuration - indirect access */
5030 tx_port_num = mvpp2_egress_port(port);
5031 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5032
5033 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5034 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5035 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5036 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5037 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5038
5039 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5040 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5041 val);
5042
5043 for_each_present_cpu(cpu) {
5044 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5045 txq_pcpu->size = txq->size;
Thomas Petazzoni83544912016-12-21 11:28:49 +01005046 txq_pcpu->buffs = kmalloc(txq_pcpu->size *
5047 sizeof(struct mvpp2_txq_pcpu_buf),
5048 GFP_KERNEL);
5049 if (!txq_pcpu->buffs)
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005050 goto error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005051
5052 txq_pcpu->count = 0;
5053 txq_pcpu->reserved_num = 0;
5054 txq_pcpu->txq_put_index = 0;
5055 txq_pcpu->txq_get_index = 0;
5056 }
5057
5058 return 0;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005059
5060error:
5061 for_each_present_cpu(cpu) {
5062 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005063 kfree(txq_pcpu->buffs);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005064 }
5065
5066 dma_free_coherent(port->dev->dev.parent,
5067 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005068 txq->descs, txq->descs_dma);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005069
5070 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005071}
5072
5073/* Free allocated TXQ resources */
5074static void mvpp2_txq_deinit(struct mvpp2_port *port,
5075 struct mvpp2_tx_queue *txq)
5076{
5077 struct mvpp2_txq_pcpu *txq_pcpu;
5078 int cpu;
5079
5080 for_each_present_cpu(cpu) {
5081 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005082 kfree(txq_pcpu->buffs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005083 }
5084
5085 if (txq->descs)
5086 dma_free_coherent(port->dev->dev.parent,
5087 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005088 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005089
5090 txq->descs = NULL;
5091 txq->last_desc = 0;
5092 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005093 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005094
5095 /* Set minimum bandwidth for disabled TXQs */
5096 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5097
5098 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005099 cpu = smp_processor_id();
5100 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5101 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5102 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005103}
5104
5105/* Cleanup Tx ports */
5106static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5107{
5108 struct mvpp2_txq_pcpu *txq_pcpu;
5109 int delay, pending, cpu;
5110 u32 val;
5111
Thomas Petazzonia7868412017-03-07 16:53:13 +01005112 cpu = smp_processor_id();
5113 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5114 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005115 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005116 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005117
5118 /* The napi queue has been stopped so wait for all packets
5119 * to be transmitted.
5120 */
5121 delay = 0;
5122 do {
5123 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5124 netdev_warn(port->dev,
5125 "port %d: cleaning queue %d timed out\n",
5126 port->id, txq->log_id);
5127 break;
5128 }
5129 mdelay(1);
5130 delay++;
5131
Thomas Petazzonia7868412017-03-07 16:53:13 +01005132 pending = mvpp2_percpu_read(port->priv, cpu,
5133 MVPP2_TXQ_PENDING_REG);
5134 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005135 } while (pending);
5136
5137 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005138 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005139
5140 for_each_present_cpu(cpu) {
5141 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5142
5143 /* Release all packets */
5144 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5145
5146 /* Reset queue */
5147 txq_pcpu->count = 0;
5148 txq_pcpu->txq_put_index = 0;
5149 txq_pcpu->txq_get_index = 0;
5150 }
5151}
5152
5153/* Cleanup all Tx queues */
5154static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5155{
5156 struct mvpp2_tx_queue *txq;
5157 int queue;
5158 u32 val;
5159
5160 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5161
5162 /* Reset Tx ports and delete Tx queues */
5163 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5164 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5165
5166 for (queue = 0; queue < txq_number; queue++) {
5167 txq = port->txqs[queue];
5168 mvpp2_txq_clean(port, txq);
5169 mvpp2_txq_deinit(port, txq);
5170 }
5171
5172 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5173
5174 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5175 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5176}
5177
5178/* Cleanup all Rx queues */
5179static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5180{
5181 int queue;
5182
5183 for (queue = 0; queue < rxq_number; queue++)
5184 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5185}
5186
5187/* Init all Rx queues for port */
5188static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5189{
5190 int queue, err;
5191
5192 for (queue = 0; queue < rxq_number; queue++) {
5193 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5194 if (err)
5195 goto err_cleanup;
5196 }
5197 return 0;
5198
5199err_cleanup:
5200 mvpp2_cleanup_rxqs(port);
5201 return err;
5202}
5203
5204/* Init all tx queues for port */
5205static int mvpp2_setup_txqs(struct mvpp2_port *port)
5206{
5207 struct mvpp2_tx_queue *txq;
5208 int queue, err;
5209
5210 for (queue = 0; queue < txq_number; queue++) {
5211 txq = port->txqs[queue];
5212 err = mvpp2_txq_init(port, txq);
5213 if (err)
5214 goto err_cleanup;
5215 }
5216
Marcin Wojtas3f518502014-07-10 16:52:13 -03005217 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5218 return 0;
5219
5220err_cleanup:
5221 mvpp2_cleanup_txqs(port);
5222 return err;
5223}
5224
5225/* The callback for per-port interrupt */
5226static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5227{
5228 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
5229
5230 mvpp2_interrupts_disable(port);
5231
5232 napi_schedule(&port->napi);
5233
5234 return IRQ_HANDLED;
5235}
5236
5237/* Adjust link */
5238static void mvpp2_link_event(struct net_device *dev)
5239{
5240 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02005241 struct phy_device *phydev = dev->phydev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005242 int status_change = 0;
5243 u32 val;
5244
5245 if (phydev->link) {
5246 if ((port->speed != phydev->speed) ||
5247 (port->duplex != phydev->duplex)) {
5248 u32 val;
5249
5250 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5251 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5252 MVPP2_GMAC_CONFIG_GMII_SPEED |
5253 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5254 MVPP2_GMAC_AN_SPEED_EN |
5255 MVPP2_GMAC_AN_DUPLEX_EN);
5256
5257 if (phydev->duplex)
5258 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5259
5260 if (phydev->speed == SPEED_1000)
5261 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni2add5112014-07-27 23:21:35 +02005262 else if (phydev->speed == SPEED_100)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005263 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5264
5265 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5266
5267 port->duplex = phydev->duplex;
5268 port->speed = phydev->speed;
5269 }
5270 }
5271
5272 if (phydev->link != port->link) {
5273 if (!phydev->link) {
5274 port->duplex = -1;
5275 port->speed = 0;
5276 }
5277
5278 port->link = phydev->link;
5279 status_change = 1;
5280 }
5281
5282 if (status_change) {
5283 if (phydev->link) {
5284 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5285 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5286 MVPP2_GMAC_FORCE_LINK_DOWN);
5287 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5288 mvpp2_egress_enable(port);
5289 mvpp2_ingress_enable(port);
5290 } else {
5291 mvpp2_ingress_disable(port);
5292 mvpp2_egress_disable(port);
5293 }
5294 phy_print_status(phydev);
5295 }
5296}
5297
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005298static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5299{
5300 ktime_t interval;
5301
5302 if (!port_pcpu->timer_scheduled) {
5303 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01005304 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005305 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5306 HRTIMER_MODE_REL_PINNED);
5307 }
5308}
5309
5310static void mvpp2_tx_proc_cb(unsigned long data)
5311{
5312 struct net_device *dev = (struct net_device *)data;
5313 struct mvpp2_port *port = netdev_priv(dev);
5314 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5315 unsigned int tx_todo, cause;
5316
5317 if (!netif_running(dev))
5318 return;
5319 port_pcpu->timer_scheduled = false;
5320
5321 /* Process all the Tx queues */
5322 cause = (1 << txq_number) - 1;
5323 tx_todo = mvpp2_tx_done(port, cause);
5324
5325 /* Set the timer in case not all the packets were processed */
5326 if (tx_todo)
5327 mvpp2_timer_set(port_pcpu);
5328}
5329
5330static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5331{
5332 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5333 struct mvpp2_port_pcpu,
5334 tx_done_timer);
5335
5336 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5337
5338 return HRTIMER_NORESTART;
5339}
5340
Marcin Wojtas3f518502014-07-10 16:52:13 -03005341/* Main RX/TX processing routines */
5342
5343/* Display more error info */
5344static void mvpp2_rx_error(struct mvpp2_port *port,
5345 struct mvpp2_rx_desc *rx_desc)
5346{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005347 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5348 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005349
5350 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5351 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005352 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5353 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005354 break;
5355 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005356 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5357 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005358 break;
5359 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005360 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5361 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005362 break;
5363 }
5364}
5365
5366/* Handle RX checksum offload */
5367static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5368 struct sk_buff *skb)
5369{
5370 if (((status & MVPP2_RXD_L3_IP4) &&
5371 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5372 (status & MVPP2_RXD_L3_IP6))
5373 if (((status & MVPP2_RXD_L4_UDP) ||
5374 (status & MVPP2_RXD_L4_TCP)) &&
5375 (status & MVPP2_RXD_L4_CSUM_OK)) {
5376 skb->csum = 0;
5377 skb->ip_summed = CHECKSUM_UNNECESSARY;
5378 return;
5379 }
5380
5381 skb->ip_summed = CHECKSUM_NONE;
5382}
5383
5384/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5385static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005386 struct mvpp2_bm_pool *bm_pool, u32 bm)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005387{
Thomas Petazzoni20396132017-03-07 16:53:00 +01005388 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005389 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005390 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005391
Marcin Wojtas3f518502014-07-10 16:52:13 -03005392 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005393 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5394 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005395 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005396 return -ENOMEM;
5397
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005398 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005399
Marcin Wojtas3f518502014-07-10 16:52:13 -03005400 return 0;
5401}
5402
5403/* Handle tx checksum */
5404static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5405{
5406 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5407 int ip_hdr_len = 0;
5408 u8 l4_proto;
5409
5410 if (skb->protocol == htons(ETH_P_IP)) {
5411 struct iphdr *ip4h = ip_hdr(skb);
5412
5413 /* Calculate IPv4 checksum and L4 checksum */
5414 ip_hdr_len = ip4h->ihl;
5415 l4_proto = ip4h->protocol;
5416 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5417 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5418
5419 /* Read l4_protocol from one of IPv6 extra headers */
5420 if (skb_network_header_len(skb) > 0)
5421 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5422 l4_proto = ip6h->nexthdr;
5423 } else {
5424 return MVPP2_TXD_L4_CSUM_NOT;
5425 }
5426
5427 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5428 skb->protocol, ip_hdr_len, l4_proto);
5429 }
5430
5431 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5432}
5433
Marcin Wojtas3f518502014-07-10 16:52:13 -03005434/* Main rx processing */
5435static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
5436 struct mvpp2_rx_queue *rxq)
5437{
5438 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005439 int rx_received;
5440 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005441 u32 rcvd_pkts = 0;
5442 u32 rcvd_bytes = 0;
5443
5444 /* Get number of received packets and clamp the to-do */
5445 rx_received = mvpp2_rxq_received(port, rxq->id);
5446 if (rx_todo > rx_received)
5447 rx_todo = rx_received;
5448
Marcin Wojtasb5015852015-12-03 15:20:51 +01005449 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005450 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5451 struct mvpp2_bm_pool *bm_pool;
5452 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005453 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005454 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005455 phys_addr_t phys_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005456 u32 bm, rx_status;
5457 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005458 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005459
Marcin Wojtasb5015852015-12-03 15:20:51 +01005460 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005461 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5462 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5463 rx_bytes -= MVPP2_MH_SIZE;
5464 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5465 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5466 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005467
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005468 bm = mvpp2_bm_cookie_build(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005469 pool = mvpp2_bm_cookie_pool_get(bm);
5470 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03005471
5472 /* In case of an error, release the requested buffer pointer
5473 * to the Buffer Manager. This request process is controlled
5474 * by the hardware, and the information about the buffer is
5475 * comprised by the RX descriptor.
5476 */
5477 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Marcin Wojtasb5015852015-12-03 15:20:51 +01005478 err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005479 dev->stats.rx_errors++;
5480 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005481 /* Return the buffer to the pool */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005482 mvpp2_pool_refill(port, bm, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005483 continue;
5484 }
5485
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005486 if (bm_pool->frag_size > PAGE_SIZE)
5487 frag_size = 0;
5488 else
5489 frag_size = bm_pool->frag_size;
5490
5491 skb = build_skb(data, frag_size);
5492 if (!skb) {
5493 netdev_warn(port->dev, "skb build failed\n");
5494 goto err_drop_frame;
5495 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005496
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005497 err = mvpp2_rx_refill(port, bm_pool, bm);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005498 if (err) {
5499 netdev_err(port->dev, "failed to refill BM pools\n");
5500 goto err_drop_frame;
5501 }
5502
Thomas Petazzoni20396132017-03-07 16:53:00 +01005503 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01005504 bm_pool->buf_size, DMA_FROM_DEVICE);
5505
Marcin Wojtas3f518502014-07-10 16:52:13 -03005506 rcvd_pkts++;
5507 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005508
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005509 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005510 skb_put(skb, rx_bytes);
5511 skb->protocol = eth_type_trans(skb, dev);
5512 mvpp2_rx_csum(port, rx_status, skb);
5513
5514 napi_gro_receive(&port->napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005515 }
5516
5517 if (rcvd_pkts) {
5518 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5519
5520 u64_stats_update_begin(&stats->syncp);
5521 stats->rx_packets += rcvd_pkts;
5522 stats->rx_bytes += rcvd_bytes;
5523 u64_stats_update_end(&stats->syncp);
5524 }
5525
5526 /* Update Rx queue management counters */
5527 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01005528 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005529
5530 return rx_todo;
5531}
5532
5533static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005534tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005535 struct mvpp2_tx_desc *desc)
5536{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005537 dma_addr_t buf_dma_addr =
5538 mvpp2_txdesc_dma_addr_get(port, desc);
5539 size_t buf_sz =
5540 mvpp2_txdesc_size_get(port, desc);
5541 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5542 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005543 mvpp2_txq_desc_put(txq);
5544}
5545
5546/* Handle tx fragmentation processing */
5547static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5548 struct mvpp2_tx_queue *aggr_txq,
5549 struct mvpp2_tx_queue *txq)
5550{
5551 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5552 struct mvpp2_tx_desc *tx_desc;
5553 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005554 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005555
5556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5557 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5558 void *addr = page_address(frag->page.p) + frag->page_offset;
5559
5560 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005561 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5562 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005563
Thomas Petazzoni20396132017-03-07 16:53:00 +01005564 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005565 frag->size,
5566 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01005567 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005568 mvpp2_txq_desc_put(txq);
5569 goto error;
5570 }
5571
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005572 mvpp2_txdesc_offset_set(port, tx_desc,
5573 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5574 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5575 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005576
5577 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5578 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005579 mvpp2_txdesc_cmd_set(port, tx_desc,
5580 MVPP2_TXD_L_DESC);
5581 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005582 } else {
5583 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005584 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5585 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005586 }
5587 }
5588
5589 return 0;
5590
5591error:
5592 /* Release all descriptors that were used to map fragments of
5593 * this packet, as well as the corresponding DMA mappings
5594 */
5595 for (i = i - 1; i >= 0; i--) {
5596 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005597 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005598 }
5599
5600 return -ENOMEM;
5601}
5602
5603/* Main tx processing */
5604static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5605{
5606 struct mvpp2_port *port = netdev_priv(dev);
5607 struct mvpp2_tx_queue *txq, *aggr_txq;
5608 struct mvpp2_txq_pcpu *txq_pcpu;
5609 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005610 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005611 int frags = 0;
5612 u16 txq_id;
5613 u32 tx_cmd;
5614
5615 txq_id = skb_get_queue_mapping(skb);
5616 txq = port->txqs[txq_id];
5617 txq_pcpu = this_cpu_ptr(txq->pcpu);
5618 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5619
5620 frags = skb_shinfo(skb)->nr_frags + 1;
5621
5622 /* Check number of available descriptors */
5623 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5624 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5625 txq_pcpu, frags)) {
5626 frags = 0;
5627 goto out;
5628 }
5629
5630 /* Get a descriptor for the first part of the packet */
5631 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005632 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5633 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005634
Thomas Petazzoni20396132017-03-07 16:53:00 +01005635 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005636 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01005637 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005638 mvpp2_txq_desc_put(txq);
5639 frags = 0;
5640 goto out;
5641 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005642
5643 mvpp2_txdesc_offset_set(port, tx_desc,
5644 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5645 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5646 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005647
5648 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5649
5650 if (frags == 1) {
5651 /* First and Last descriptor */
5652 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005653 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5654 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005655 } else {
5656 /* First but not Last */
5657 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005658 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5659 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005660
5661 /* Continue with other skb fragments */
5662 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005663 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005664 frags = 0;
5665 goto out;
5666 }
5667 }
5668
5669 txq_pcpu->reserved_num -= frags;
5670 txq_pcpu->count += frags;
5671 aggr_txq->count += frags;
5672
5673 /* Enable transmit */
5674 wmb();
5675 mvpp2_aggr_txq_pend_desc_add(port, frags);
5676
5677 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5678 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5679
5680 netif_tx_stop_queue(nq);
5681 }
5682out:
5683 if (frags > 0) {
5684 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5685
5686 u64_stats_update_begin(&stats->syncp);
5687 stats->tx_packets++;
5688 stats->tx_bytes += skb->len;
5689 u64_stats_update_end(&stats->syncp);
5690 } else {
5691 dev->stats.tx_dropped++;
5692 dev_kfree_skb_any(skb);
5693 }
5694
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005695 /* Finalize TX processing */
5696 if (txq_pcpu->count >= txq->done_pkts_coal)
5697 mvpp2_txq_done(port, txq, txq_pcpu);
5698
5699 /* Set the timer in case not all frags were processed */
5700 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5701 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5702
5703 mvpp2_timer_set(port_pcpu);
5704 }
5705
Marcin Wojtas3f518502014-07-10 16:52:13 -03005706 return NETDEV_TX_OK;
5707}
5708
5709static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5710{
5711 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5712 netdev_err(dev, "FCS error\n");
5713 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5714 netdev_err(dev, "rx fifo overrun error\n");
5715 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5716 netdev_err(dev, "tx fifo underrun error\n");
5717}
5718
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005719static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005720{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005721 u32 cause_rx_tx, cause_rx, cause_misc;
5722 int rx_done = 0;
5723 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzonia7868412017-03-07 16:53:13 +01005724 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005725
5726 /* Rx/Tx cause register
5727 *
5728 * Bits 0-15: each bit indicates received packets on the Rx queue
5729 * (bit 0 is for Rx queue 0).
5730 *
5731 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5732 * (bit 16 is for Tx queue 0).
5733 *
5734 * Each CPU has its own Rx/Tx cause register
5735 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005736 cause_rx_tx = mvpp2_percpu_read(port->priv, cpu,
5737 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005738 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005739 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5740
5741 if (cause_misc) {
5742 mvpp2_cause_error(port->dev, cause_misc);
5743
5744 /* Clear the cause register */
5745 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01005746 mvpp2_percpu_write(port->priv, cpu,
5747 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5748 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005749 }
5750
Marcin Wojtas3f518502014-07-10 16:52:13 -03005751 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5752
5753 /* Process RX packets */
5754 cause_rx |= port->pending_cause_rx;
5755 while (cause_rx && budget > 0) {
5756 int count;
5757 struct mvpp2_rx_queue *rxq;
5758
5759 rxq = mvpp2_get_rx_queue(port, cause_rx);
5760 if (!rxq)
5761 break;
5762
5763 count = mvpp2_rx(port, budget, rxq);
5764 rx_done += count;
5765 budget -= count;
5766 if (budget > 0) {
5767 /* Clear the bit associated to this Rx queue
5768 * so that next iteration will continue from
5769 * the next Rx queue.
5770 */
5771 cause_rx &= ~(1 << rxq->logic_rxq);
5772 }
5773 }
5774
5775 if (budget > 0) {
5776 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08005777 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005778
5779 mvpp2_interrupts_enable(port);
5780 }
5781 port->pending_cause_rx = cause_rx;
5782 return rx_done;
5783}
5784
5785/* Set hw internals when starting port */
5786static void mvpp2_start_dev(struct mvpp2_port *port)
5787{
Philippe Reynes8e072692016-06-28 00:08:11 +02005788 struct net_device *ndev = port->dev;
5789
Marcin Wojtas3f518502014-07-10 16:52:13 -03005790 mvpp2_gmac_max_rx_size_set(port);
5791 mvpp2_txp_max_tx_size_set(port);
5792
5793 napi_enable(&port->napi);
5794
5795 /* Enable interrupts on all CPUs */
5796 mvpp2_interrupts_enable(port);
5797
5798 mvpp2_port_enable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02005799 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005800 netif_tx_start_all_queues(port->dev);
5801}
5802
5803/* Set hw internals when stopping port */
5804static void mvpp2_stop_dev(struct mvpp2_port *port)
5805{
Philippe Reynes8e072692016-06-28 00:08:11 +02005806 struct net_device *ndev = port->dev;
5807
Marcin Wojtas3f518502014-07-10 16:52:13 -03005808 /* Stop new packets from arriving to RXQs */
5809 mvpp2_ingress_disable(port);
5810
5811 mdelay(10);
5812
5813 /* Disable interrupts on all CPUs */
5814 mvpp2_interrupts_disable(port);
5815
5816 napi_disable(&port->napi);
5817
5818 netif_carrier_off(port->dev);
5819 netif_tx_stop_all_queues(port->dev);
5820
5821 mvpp2_egress_disable(port);
5822 mvpp2_port_disable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02005823 phy_stop(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005824}
5825
Marcin Wojtas3f518502014-07-10 16:52:13 -03005826static int mvpp2_check_ringparam_valid(struct net_device *dev,
5827 struct ethtool_ringparam *ring)
5828{
5829 u16 new_rx_pending = ring->rx_pending;
5830 u16 new_tx_pending = ring->tx_pending;
5831
5832 if (ring->rx_pending == 0 || ring->tx_pending == 0)
5833 return -EINVAL;
5834
5835 if (ring->rx_pending > MVPP2_MAX_RXD)
5836 new_rx_pending = MVPP2_MAX_RXD;
5837 else if (!IS_ALIGNED(ring->rx_pending, 16))
5838 new_rx_pending = ALIGN(ring->rx_pending, 16);
5839
5840 if (ring->tx_pending > MVPP2_MAX_TXD)
5841 new_tx_pending = MVPP2_MAX_TXD;
5842 else if (!IS_ALIGNED(ring->tx_pending, 32))
5843 new_tx_pending = ALIGN(ring->tx_pending, 32);
5844
5845 if (ring->rx_pending != new_rx_pending) {
5846 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
5847 ring->rx_pending, new_rx_pending);
5848 ring->rx_pending = new_rx_pending;
5849 }
5850
5851 if (ring->tx_pending != new_tx_pending) {
5852 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
5853 ring->tx_pending, new_tx_pending);
5854 ring->tx_pending = new_tx_pending;
5855 }
5856
5857 return 0;
5858}
5859
Thomas Petazzoni26975822017-03-07 16:53:14 +01005860static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005861{
5862 u32 mac_addr_l, mac_addr_m, mac_addr_h;
5863
5864 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5865 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
5866 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
5867 addr[0] = (mac_addr_h >> 24) & 0xFF;
5868 addr[1] = (mac_addr_h >> 16) & 0xFF;
5869 addr[2] = (mac_addr_h >> 8) & 0xFF;
5870 addr[3] = mac_addr_h & 0xFF;
5871 addr[4] = mac_addr_m & 0xFF;
5872 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
5873}
5874
5875static int mvpp2_phy_connect(struct mvpp2_port *port)
5876{
5877 struct phy_device *phy_dev;
5878
5879 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
5880 port->phy_interface);
5881 if (!phy_dev) {
5882 netdev_err(port->dev, "cannot connect to phy\n");
5883 return -ENODEV;
5884 }
5885 phy_dev->supported &= PHY_GBIT_FEATURES;
5886 phy_dev->advertising = phy_dev->supported;
5887
Marcin Wojtas3f518502014-07-10 16:52:13 -03005888 port->link = 0;
5889 port->duplex = 0;
5890 port->speed = 0;
5891
5892 return 0;
5893}
5894
5895static void mvpp2_phy_disconnect(struct mvpp2_port *port)
5896{
Philippe Reynes8e072692016-06-28 00:08:11 +02005897 struct net_device *ndev = port->dev;
5898
5899 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005900}
5901
5902static int mvpp2_open(struct net_device *dev)
5903{
5904 struct mvpp2_port *port = netdev_priv(dev);
5905 unsigned char mac_bcast[ETH_ALEN] = {
5906 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5907 int err;
5908
5909 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
5910 if (err) {
5911 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
5912 return err;
5913 }
5914 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
5915 dev->dev_addr, true);
5916 if (err) {
5917 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
5918 return err;
5919 }
5920 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
5921 if (err) {
5922 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
5923 return err;
5924 }
5925 err = mvpp2_prs_def_flow(port);
5926 if (err) {
5927 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
5928 return err;
5929 }
5930
5931 /* Allocate the Rx/Tx queues */
5932 err = mvpp2_setup_rxqs(port);
5933 if (err) {
5934 netdev_err(port->dev, "cannot allocate Rx queues\n");
5935 return err;
5936 }
5937
5938 err = mvpp2_setup_txqs(port);
5939 if (err) {
5940 netdev_err(port->dev, "cannot allocate Tx queues\n");
5941 goto err_cleanup_rxqs;
5942 }
5943
5944 err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
5945 if (err) {
5946 netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
5947 goto err_cleanup_txqs;
5948 }
5949
5950 /* In default link is down */
5951 netif_carrier_off(port->dev);
5952
5953 err = mvpp2_phy_connect(port);
5954 if (err < 0)
5955 goto err_free_irq;
5956
5957 /* Unmask interrupts on all CPUs */
5958 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
5959
5960 mvpp2_start_dev(port);
5961
5962 return 0;
5963
5964err_free_irq:
5965 free_irq(port->irq, port);
5966err_cleanup_txqs:
5967 mvpp2_cleanup_txqs(port);
5968err_cleanup_rxqs:
5969 mvpp2_cleanup_rxqs(port);
5970 return err;
5971}
5972
5973static int mvpp2_stop(struct net_device *dev)
5974{
5975 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005976 struct mvpp2_port_pcpu *port_pcpu;
5977 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005978
5979 mvpp2_stop_dev(port);
5980 mvpp2_phy_disconnect(port);
5981
5982 /* Mask interrupts on all CPUs */
5983 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5984
5985 free_irq(port->irq, port);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005986 for_each_present_cpu(cpu) {
5987 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5988
5989 hrtimer_cancel(&port_pcpu->tx_done_timer);
5990 port_pcpu->timer_scheduled = false;
5991 tasklet_kill(&port_pcpu->tx_done_tasklet);
5992 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005993 mvpp2_cleanup_rxqs(port);
5994 mvpp2_cleanup_txqs(port);
5995
5996 return 0;
5997}
5998
5999static void mvpp2_set_rx_mode(struct net_device *dev)
6000{
6001 struct mvpp2_port *port = netdev_priv(dev);
6002 struct mvpp2 *priv = port->priv;
6003 struct netdev_hw_addr *ha;
6004 int id = port->id;
6005 bool allmulti = dev->flags & IFF_ALLMULTI;
6006
6007 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6008 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6009 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6010
6011 /* Remove all port->id's mcast enries */
6012 mvpp2_prs_mcast_del_all(priv, id);
6013
6014 if (allmulti && !netdev_mc_empty(dev)) {
6015 netdev_for_each_mc_addr(ha, dev)
6016 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6017 }
6018}
6019
6020static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6021{
6022 struct mvpp2_port *port = netdev_priv(dev);
6023 const struct sockaddr *addr = p;
6024 int err;
6025
6026 if (!is_valid_ether_addr(addr->sa_data)) {
6027 err = -EADDRNOTAVAIL;
6028 goto error;
6029 }
6030
6031 if (!netif_running(dev)) {
6032 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6033 if (!err)
6034 return 0;
6035 /* Reconfigure parser to accept the original MAC address */
6036 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6037 if (err)
6038 goto error;
6039 }
6040
6041 mvpp2_stop_dev(port);
6042
6043 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6044 if (!err)
6045 goto out_start;
6046
6047 /* Reconfigure parser accept the original MAC address */
6048 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6049 if (err)
6050 goto error;
6051out_start:
6052 mvpp2_start_dev(port);
6053 mvpp2_egress_enable(port);
6054 mvpp2_ingress_enable(port);
6055 return 0;
6056
6057error:
6058 netdev_err(dev, "fail to change MAC address\n");
6059 return err;
6060}
6061
6062static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6063{
6064 struct mvpp2_port *port = netdev_priv(dev);
6065 int err;
6066
Jarod Wilson57779872016-10-17 15:54:06 -04006067 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6068 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6069 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6070 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006071 }
6072
6073 if (!netif_running(dev)) {
6074 err = mvpp2_bm_update_mtu(dev, mtu);
6075 if (!err) {
6076 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6077 return 0;
6078 }
6079
6080 /* Reconfigure BM to the original MTU */
6081 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6082 if (err)
6083 goto error;
6084 }
6085
6086 mvpp2_stop_dev(port);
6087
6088 err = mvpp2_bm_update_mtu(dev, mtu);
6089 if (!err) {
6090 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6091 goto out_start;
6092 }
6093
6094 /* Reconfigure BM to the original MTU */
6095 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6096 if (err)
6097 goto error;
6098
6099out_start:
6100 mvpp2_start_dev(port);
6101 mvpp2_egress_enable(port);
6102 mvpp2_ingress_enable(port);
6103
6104 return 0;
6105
6106error:
6107 netdev_err(dev, "fail to change MTU\n");
6108 return err;
6109}
6110
stephen hemmingerbc1f4472017-01-06 19:12:52 -08006111static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03006112mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6113{
6114 struct mvpp2_port *port = netdev_priv(dev);
6115 unsigned int start;
6116 int cpu;
6117
6118 for_each_possible_cpu(cpu) {
6119 struct mvpp2_pcpu_stats *cpu_stats;
6120 u64 rx_packets;
6121 u64 rx_bytes;
6122 u64 tx_packets;
6123 u64 tx_bytes;
6124
6125 cpu_stats = per_cpu_ptr(port->stats, cpu);
6126 do {
6127 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6128 rx_packets = cpu_stats->rx_packets;
6129 rx_bytes = cpu_stats->rx_bytes;
6130 tx_packets = cpu_stats->tx_packets;
6131 tx_bytes = cpu_stats->tx_bytes;
6132 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6133
6134 stats->rx_packets += rx_packets;
6135 stats->rx_bytes += rx_bytes;
6136 stats->tx_packets += tx_packets;
6137 stats->tx_bytes += tx_bytes;
6138 }
6139
6140 stats->rx_errors = dev->stats.rx_errors;
6141 stats->rx_dropped = dev->stats.rx_dropped;
6142 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006143}
6144
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006145static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6146{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006147 int ret;
6148
Philippe Reynes8e072692016-06-28 00:08:11 +02006149 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006150 return -ENOTSUPP;
6151
Philippe Reynes8e072692016-06-28 00:08:11 +02006152 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006153 if (!ret)
6154 mvpp2_link_event(dev);
6155
6156 return ret;
6157}
6158
Marcin Wojtas3f518502014-07-10 16:52:13 -03006159/* Ethtool methods */
6160
Marcin Wojtas3f518502014-07-10 16:52:13 -03006161/* Set interrupt coalescing for ethtools */
6162static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6163 struct ethtool_coalesce *c)
6164{
6165 struct mvpp2_port *port = netdev_priv(dev);
6166 int queue;
6167
6168 for (queue = 0; queue < rxq_number; queue++) {
6169 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6170
6171 rxq->time_coal = c->rx_coalesce_usecs;
6172 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01006173 mvpp2_rx_pkts_coal_set(port, rxq);
6174 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006175 }
6176
6177 for (queue = 0; queue < txq_number; queue++) {
6178 struct mvpp2_tx_queue *txq = port->txqs[queue];
6179
6180 txq->done_pkts_coal = c->tx_max_coalesced_frames;
6181 }
6182
Marcin Wojtas3f518502014-07-10 16:52:13 -03006183 return 0;
6184}
6185
6186/* get coalescing for ethtools */
6187static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6188 struct ethtool_coalesce *c)
6189{
6190 struct mvpp2_port *port = netdev_priv(dev);
6191
6192 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6193 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6194 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6195 return 0;
6196}
6197
6198static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6199 struct ethtool_drvinfo *drvinfo)
6200{
6201 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6202 sizeof(drvinfo->driver));
6203 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6204 sizeof(drvinfo->version));
6205 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6206 sizeof(drvinfo->bus_info));
6207}
6208
6209static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6210 struct ethtool_ringparam *ring)
6211{
6212 struct mvpp2_port *port = netdev_priv(dev);
6213
6214 ring->rx_max_pending = MVPP2_MAX_RXD;
6215 ring->tx_max_pending = MVPP2_MAX_TXD;
6216 ring->rx_pending = port->rx_ring_size;
6217 ring->tx_pending = port->tx_ring_size;
6218}
6219
6220static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6221 struct ethtool_ringparam *ring)
6222{
6223 struct mvpp2_port *port = netdev_priv(dev);
6224 u16 prev_rx_ring_size = port->rx_ring_size;
6225 u16 prev_tx_ring_size = port->tx_ring_size;
6226 int err;
6227
6228 err = mvpp2_check_ringparam_valid(dev, ring);
6229 if (err)
6230 return err;
6231
6232 if (!netif_running(dev)) {
6233 port->rx_ring_size = ring->rx_pending;
6234 port->tx_ring_size = ring->tx_pending;
6235 return 0;
6236 }
6237
6238 /* The interface is running, so we have to force a
6239 * reallocation of the queues
6240 */
6241 mvpp2_stop_dev(port);
6242 mvpp2_cleanup_rxqs(port);
6243 mvpp2_cleanup_txqs(port);
6244
6245 port->rx_ring_size = ring->rx_pending;
6246 port->tx_ring_size = ring->tx_pending;
6247
6248 err = mvpp2_setup_rxqs(port);
6249 if (err) {
6250 /* Reallocate Rx queues with the original ring size */
6251 port->rx_ring_size = prev_rx_ring_size;
6252 ring->rx_pending = prev_rx_ring_size;
6253 err = mvpp2_setup_rxqs(port);
6254 if (err)
6255 goto err_out;
6256 }
6257 err = mvpp2_setup_txqs(port);
6258 if (err) {
6259 /* Reallocate Tx queues with the original ring size */
6260 port->tx_ring_size = prev_tx_ring_size;
6261 ring->tx_pending = prev_tx_ring_size;
6262 err = mvpp2_setup_txqs(port);
6263 if (err)
6264 goto err_clean_rxqs;
6265 }
6266
6267 mvpp2_start_dev(port);
6268 mvpp2_egress_enable(port);
6269 mvpp2_ingress_enable(port);
6270
6271 return 0;
6272
6273err_clean_rxqs:
6274 mvpp2_cleanup_rxqs(port);
6275err_out:
6276 netdev_err(dev, "fail to change ring parameters");
6277 return err;
6278}
6279
6280/* Device ops */
6281
6282static const struct net_device_ops mvpp2_netdev_ops = {
6283 .ndo_open = mvpp2_open,
6284 .ndo_stop = mvpp2_stop,
6285 .ndo_start_xmit = mvpp2_tx,
6286 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6287 .ndo_set_mac_address = mvpp2_set_mac_address,
6288 .ndo_change_mtu = mvpp2_change_mtu,
6289 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006290 .ndo_do_ioctl = mvpp2_ioctl,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006291};
6292
6293static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08006294 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006295 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006296 .set_coalesce = mvpp2_ethtool_set_coalesce,
6297 .get_coalesce = mvpp2_ethtool_get_coalesce,
6298 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6299 .get_ringparam = mvpp2_ethtool_get_ringparam,
6300 .set_ringparam = mvpp2_ethtool_set_ringparam,
Philippe Reynesfb773e92016-06-28 00:08:12 +02006301 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6302 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006303};
6304
Marcin Wojtas3f518502014-07-10 16:52:13 -03006305/* Initialize port HW */
6306static int mvpp2_port_init(struct mvpp2_port *port)
6307{
6308 struct device *dev = port->dev->dev.parent;
6309 struct mvpp2 *priv = port->priv;
6310 struct mvpp2_txq_pcpu *txq_pcpu;
6311 int queue, cpu, err;
6312
6313 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
6314 return -EINVAL;
6315
6316 /* Disable port */
6317 mvpp2_egress_disable(port);
6318 mvpp2_port_disable(port);
6319
6320 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
6321 GFP_KERNEL);
6322 if (!port->txqs)
6323 return -ENOMEM;
6324
6325 /* Associate physical Tx queues to this port and initialize.
6326 * The mapping is predefined.
6327 */
6328 for (queue = 0; queue < txq_number; queue++) {
6329 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6330 struct mvpp2_tx_queue *txq;
6331
6332 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01006333 if (!txq) {
6334 err = -ENOMEM;
6335 goto err_free_percpu;
6336 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006337
6338 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6339 if (!txq->pcpu) {
6340 err = -ENOMEM;
6341 goto err_free_percpu;
6342 }
6343
6344 txq->id = queue_phy_id;
6345 txq->log_id = queue;
6346 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6347 for_each_present_cpu(cpu) {
6348 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6349 txq_pcpu->cpu = cpu;
6350 }
6351
6352 port->txqs[queue] = txq;
6353 }
6354
6355 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
6356 GFP_KERNEL);
6357 if (!port->rxqs) {
6358 err = -ENOMEM;
6359 goto err_free_percpu;
6360 }
6361
6362 /* Allocate and initialize Rx queue for this port */
6363 for (queue = 0; queue < rxq_number; queue++) {
6364 struct mvpp2_rx_queue *rxq;
6365
6366 /* Map physical Rx queue to port's logical Rx queue */
6367 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006368 if (!rxq) {
6369 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006370 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006371 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006372 /* Map this Rx queue to a physical queue */
6373 rxq->id = port->first_rxq + queue;
6374 rxq->port = port->id;
6375 rxq->logic_rxq = queue;
6376
6377 port->rxqs[queue] = rxq;
6378 }
6379
6380 /* Configure Rx queue group interrupt for this port */
6381 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
6382
6383 /* Create Rx descriptor rings */
6384 for (queue = 0; queue < rxq_number; queue++) {
6385 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6386
6387 rxq->size = port->rx_ring_size;
6388 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6389 rxq->time_coal = MVPP2_RX_COAL_USEC;
6390 }
6391
6392 mvpp2_ingress_disable(port);
6393
6394 /* Port default configuration */
6395 mvpp2_defaults_set(port);
6396
6397 /* Port's classifier configuration */
6398 mvpp2_cls_oversize_rxq_set(port);
6399 mvpp2_cls_port_config(port);
6400
6401 /* Provide an initial Rx packet size */
6402 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6403
6404 /* Initialize pools for swf */
6405 err = mvpp2_swf_bm_pool_init(port);
6406 if (err)
6407 goto err_free_percpu;
6408
6409 return 0;
6410
6411err_free_percpu:
6412 for (queue = 0; queue < txq_number; queue++) {
6413 if (!port->txqs[queue])
6414 continue;
6415 free_percpu(port->txqs[queue]->pcpu);
6416 }
6417 return err;
6418}
6419
6420/* Ports initialization */
6421static int mvpp2_port_probe(struct platform_device *pdev,
6422 struct device_node *port_node,
6423 struct mvpp2 *priv,
6424 int *next_first_rxq)
6425{
6426 struct device_node *phy_node;
6427 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006428 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006429 struct net_device *dev;
6430 struct resource *res;
6431 const char *dt_mac_addr;
6432 const char *mac_from;
6433 char hw_mac_addr[ETH_ALEN];
6434 u32 id;
6435 int features;
6436 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006437 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006438
6439 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6440 rxq_number);
6441 if (!dev)
6442 return -ENOMEM;
6443
6444 phy_node = of_parse_phandle(port_node, "phy", 0);
6445 if (!phy_node) {
6446 dev_err(&pdev->dev, "missing phy\n");
6447 err = -ENODEV;
6448 goto err_free_netdev;
6449 }
6450
6451 phy_mode = of_get_phy_mode(port_node);
6452 if (phy_mode < 0) {
6453 dev_err(&pdev->dev, "incorrect phy mode\n");
6454 err = phy_mode;
6455 goto err_free_netdev;
6456 }
6457
6458 if (of_property_read_u32(port_node, "port-id", &id)) {
6459 err = -EINVAL;
6460 dev_err(&pdev->dev, "missing port-id value\n");
6461 goto err_free_netdev;
6462 }
6463
6464 dev->tx_queue_len = MVPP2_MAX_TXD;
6465 dev->watchdog_timeo = 5 * HZ;
6466 dev->netdev_ops = &mvpp2_netdev_ops;
6467 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6468
6469 port = netdev_priv(dev);
6470
6471 port->irq = irq_of_parse_and_map(port_node, 0);
6472 if (port->irq <= 0) {
6473 err = -EINVAL;
6474 goto err_free_netdev;
6475 }
6476
6477 if (of_property_read_bool(port_node, "marvell,loopback"))
6478 port->flags |= MVPP2_F_LOOPBACK;
6479
6480 port->priv = priv;
6481 port->id = id;
6482 port->first_rxq = *next_first_rxq;
6483 port->phy_node = phy_node;
6484 port->phy_interface = phy_mode;
6485
Thomas Petazzonia7868412017-03-07 16:53:13 +01006486 if (priv->hw_version == MVPP21) {
6487 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6488 port->base = devm_ioremap_resource(&pdev->dev, res);
6489 if (IS_ERR(port->base)) {
6490 err = PTR_ERR(port->base);
6491 goto err_free_irq;
6492 }
6493 } else {
6494 if (of_property_read_u32(port_node, "gop-port-id",
6495 &port->gop_id)) {
6496 err = -EINVAL;
6497 dev_err(&pdev->dev, "missing gop-port-id value\n");
6498 goto err_free_irq;
6499 }
6500
6501 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006502 }
6503
6504 /* Alloc per-cpu stats */
6505 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6506 if (!port->stats) {
6507 err = -ENOMEM;
6508 goto err_free_irq;
6509 }
6510
6511 dt_mac_addr = of_get_mac_address(port_node);
6512 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6513 mac_from = "device tree";
6514 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6515 } else {
Thomas Petazzoni26975822017-03-07 16:53:14 +01006516 if (priv->hw_version == MVPP21)
6517 mvpp21_get_mac_address(port, hw_mac_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006518 if (is_valid_ether_addr(hw_mac_addr)) {
6519 mac_from = "hardware";
6520 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6521 } else {
6522 mac_from = "random";
6523 eth_hw_addr_random(dev);
6524 }
6525 }
6526
6527 port->tx_ring_size = MVPP2_MAX_TXD;
6528 port->rx_ring_size = MVPP2_MAX_RXD;
6529 port->dev = dev;
6530 SET_NETDEV_DEV(dev, &pdev->dev);
6531
6532 err = mvpp2_port_init(port);
6533 if (err < 0) {
6534 dev_err(&pdev->dev, "failed to init port %d\n", id);
6535 goto err_free_stats;
6536 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01006537
6538 mvpp2_port_mii_set(port);
6539 mvpp2_port_periodic_xon_disable(port);
6540
6541 if (priv->hw_version == MVPP21)
6542 mvpp2_port_fc_adv_enable(port);
6543
6544 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006545
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006546 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6547 if (!port->pcpu) {
6548 err = -ENOMEM;
6549 goto err_free_txq_pcpu;
6550 }
6551
6552 for_each_present_cpu(cpu) {
6553 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6554
6555 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6556 HRTIMER_MODE_REL_PINNED);
6557 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6558 port_pcpu->timer_scheduled = false;
6559
6560 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6561 (unsigned long)dev);
6562 }
6563
Marcin Wojtas3f518502014-07-10 16:52:13 -03006564 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6565 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6566 dev->features = features | NETIF_F_RXCSUM;
6567 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6568 dev->vlan_features |= features;
6569
Jarod Wilson57779872016-10-17 15:54:06 -04006570 /* MTU range: 68 - 9676 */
6571 dev->min_mtu = ETH_MIN_MTU;
6572 /* 9676 == 9700 - 20 and rounding to 8 */
6573 dev->max_mtu = 9676;
6574
Marcin Wojtas3f518502014-07-10 16:52:13 -03006575 err = register_netdev(dev);
6576 if (err < 0) {
6577 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006578 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006579 }
6580 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6581
6582 /* Increment the first Rx queue number to be used by the next port */
6583 *next_first_rxq += rxq_number;
6584 priv->port_list[id] = port;
6585 return 0;
6586
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006587err_free_port_pcpu:
6588 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006589err_free_txq_pcpu:
6590 for (i = 0; i < txq_number; i++)
6591 free_percpu(port->txqs[i]->pcpu);
6592err_free_stats:
6593 free_percpu(port->stats);
6594err_free_irq:
6595 irq_dispose_mapping(port->irq);
6596err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08006597 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006598 free_netdev(dev);
6599 return err;
6600}
6601
6602/* Ports removal routine */
6603static void mvpp2_port_remove(struct mvpp2_port *port)
6604{
6605 int i;
6606
6607 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08006608 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006609 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006610 free_percpu(port->stats);
6611 for (i = 0; i < txq_number; i++)
6612 free_percpu(port->txqs[i]->pcpu);
6613 irq_dispose_mapping(port->irq);
6614 free_netdev(port->dev);
6615}
6616
6617/* Initialize decoding windows */
6618static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6619 struct mvpp2 *priv)
6620{
6621 u32 win_enable;
6622 int i;
6623
6624 for (i = 0; i < 6; i++) {
6625 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6626 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6627
6628 if (i < 4)
6629 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6630 }
6631
6632 win_enable = 0;
6633
6634 for (i = 0; i < dram->num_cs; i++) {
6635 const struct mbus_dram_window *cs = dram->cs + i;
6636
6637 mvpp2_write(priv, MVPP2_WIN_BASE(i),
6638 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6639 dram->mbus_dram_target_id);
6640
6641 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6642 (cs->size - 1) & 0xffff0000);
6643
6644 win_enable |= (1 << i);
6645 }
6646
6647 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6648}
6649
6650/* Initialize Rx FIFO's */
6651static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6652{
6653 int port;
6654
6655 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6656 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6657 MVPP2_RX_FIFO_PORT_DATA_SIZE);
6658 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6659 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
6660 }
6661
6662 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6663 MVPP2_RX_FIFO_PORT_MIN_PKT);
6664 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6665}
6666
6667/* Initialize network controller common part HW */
6668static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6669{
6670 const struct mbus_dram_target_info *dram_target_info;
6671 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03006672 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006673
6674 /* Checks for hardware constraints */
6675 if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
6676 (txq_number > MVPP2_MAX_TXQ)) {
6677 dev_err(&pdev->dev, "invalid queue size parameter\n");
6678 return -EINVAL;
6679 }
6680
6681 /* MBUS windows configuration */
6682 dram_target_info = mv_mbus_dram_info();
6683 if (dram_target_info)
6684 mvpp2_conf_mbus_windows(dram_target_info, priv);
6685
Marcin Wojtas08a23752014-07-21 13:48:12 -03006686 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01006687 if (priv->hw_version == MVPP21) {
6688 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6689 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6690 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6691 } else {
6692 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6693 val &= ~MVPP22_SMI_POLLING_EN;
6694 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6695 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03006696
Marcin Wojtas3f518502014-07-10 16:52:13 -03006697 /* Allocate and initialize aggregated TXQs */
6698 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
6699 sizeof(struct mvpp2_tx_queue),
6700 GFP_KERNEL);
6701 if (!priv->aggr_txqs)
6702 return -ENOMEM;
6703
6704 for_each_present_cpu(i) {
6705 priv->aggr_txqs[i].id = i;
6706 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6707 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
6708 MVPP2_AGGR_TXQ_SIZE, i, priv);
6709 if (err < 0)
6710 return err;
6711 }
6712
6713 /* Rx Fifo Init */
6714 mvpp2_rx_fifo_init(priv);
6715
6716 /* Reset Rx queue group interrupt configuration */
6717 for (i = 0; i < MVPP2_MAX_PORTS; i++)
6718 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
6719
Thomas Petazzoni26975822017-03-07 16:53:14 +01006720 if (priv->hw_version == MVPP21)
6721 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6722 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006723
6724 /* Allow cache snoop when transmiting packets */
6725 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6726
6727 /* Buffer Manager initialization */
6728 err = mvpp2_bm_init(pdev, priv);
6729 if (err < 0)
6730 return err;
6731
6732 /* Parser default initialization */
6733 err = mvpp2_prs_default_init(pdev, priv);
6734 if (err < 0)
6735 return err;
6736
6737 /* Classifier default initialization */
6738 mvpp2_cls_init(priv);
6739
6740 return 0;
6741}
6742
6743static int mvpp2_probe(struct platform_device *pdev)
6744{
6745 struct device_node *dn = pdev->dev.of_node;
6746 struct device_node *port_node;
6747 struct mvpp2 *priv;
6748 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006749 void __iomem *base;
6750 int port_count, first_rxq, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006751 int err;
6752
6753 priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
6754 if (!priv)
6755 return -ENOMEM;
6756
Thomas Petazzonifaca9242017-03-07 16:53:06 +01006757 priv->hw_version =
6758 (unsigned long)of_device_get_match_data(&pdev->dev);
6759
Marcin Wojtas3f518502014-07-10 16:52:13 -03006760 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01006761 base = devm_ioremap_resource(&pdev->dev, res);
6762 if (IS_ERR(base))
6763 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006764
Thomas Petazzonia7868412017-03-07 16:53:13 +01006765 if (priv->hw_version == MVPP21) {
6766 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6767 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
6768 if (IS_ERR(priv->lms_base))
6769 return PTR_ERR(priv->lms_base);
6770 } else {
6771 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6772 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6773 if (IS_ERR(priv->iface_base))
6774 return PTR_ERR(priv->iface_base);
6775 }
6776
6777 for_each_present_cpu(cpu) {
6778 u32 addr_space_sz;
6779
6780 addr_space_sz = (priv->hw_version == MVPP21 ?
6781 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6782 priv->cpu_base[cpu] = base + cpu * addr_space_sz;
6783 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006784
6785 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6786 if (IS_ERR(priv->pp_clk))
6787 return PTR_ERR(priv->pp_clk);
6788 err = clk_prepare_enable(priv->pp_clk);
6789 if (err < 0)
6790 return err;
6791
6792 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6793 if (IS_ERR(priv->gop_clk)) {
6794 err = PTR_ERR(priv->gop_clk);
6795 goto err_pp_clk;
6796 }
6797 err = clk_prepare_enable(priv->gop_clk);
6798 if (err < 0)
6799 goto err_pp_clk;
6800
6801 /* Get system's tclk rate */
6802 priv->tclk = clk_get_rate(priv->pp_clk);
6803
6804 /* Initialize network controller */
6805 err = mvpp2_init(pdev, priv);
6806 if (err < 0) {
6807 dev_err(&pdev->dev, "failed to initialize controller\n");
6808 goto err_gop_clk;
6809 }
6810
6811 port_count = of_get_available_child_count(dn);
6812 if (port_count == 0) {
6813 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08006814 err = -ENODEV;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006815 goto err_gop_clk;
6816 }
6817
6818 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
6819 sizeof(struct mvpp2_port *),
6820 GFP_KERNEL);
6821 if (!priv->port_list) {
6822 err = -ENOMEM;
6823 goto err_gop_clk;
6824 }
6825
6826 /* Initialize ports */
6827 first_rxq = 0;
6828 for_each_available_child_of_node(dn, port_node) {
6829 err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
6830 if (err < 0)
6831 goto err_gop_clk;
6832 }
6833
6834 platform_set_drvdata(pdev, priv);
6835 return 0;
6836
6837err_gop_clk:
6838 clk_disable_unprepare(priv->gop_clk);
6839err_pp_clk:
6840 clk_disable_unprepare(priv->pp_clk);
6841 return err;
6842}
6843
6844static int mvpp2_remove(struct platform_device *pdev)
6845{
6846 struct mvpp2 *priv = platform_get_drvdata(pdev);
6847 struct device_node *dn = pdev->dev.of_node;
6848 struct device_node *port_node;
6849 int i = 0;
6850
6851 for_each_available_child_of_node(dn, port_node) {
6852 if (priv->port_list[i])
6853 mvpp2_port_remove(priv->port_list[i]);
6854 i++;
6855 }
6856
6857 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
6858 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
6859
6860 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
6861 }
6862
6863 for_each_present_cpu(i) {
6864 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
6865
6866 dma_free_coherent(&pdev->dev,
6867 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
6868 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006869 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006870 }
6871
6872 clk_disable_unprepare(priv->pp_clk);
6873 clk_disable_unprepare(priv->gop_clk);
6874
6875 return 0;
6876}
6877
6878static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01006879 {
6880 .compatible = "marvell,armada-375-pp2",
6881 .data = (void *)MVPP21,
6882 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03006883 { }
6884};
6885MODULE_DEVICE_TABLE(of, mvpp2_match);
6886
6887static struct platform_driver mvpp2_driver = {
6888 .probe = mvpp2_probe,
6889 .remove = mvpp2_remove,
6890 .driver = {
6891 .name = MVPP2_DRIVER_NAME,
6892 .of_match_table = mvpp2_match,
6893 },
6894};
6895
6896module_platform_driver(mvpp2_driver);
6897
6898MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
6899MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03006900MODULE_LICENSE("GPL v2");