blob: 39bc8fbbdd6505fd62bb59c9f5c9f88fb64ffc15 [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010028#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030029#include <linux/phy.h>
30#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020031#include <linux/hrtimer.h>
32#include <linux/ktime.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <uapi/linux/ppp_defs.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36
37/* RX Fifo Registers */
38#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41#define MVPP2_RX_FIFO_INIT_REG 0x64
42
43/* RX DMA Top Registers */
44#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48#define MVPP2_POOL_BUF_SIZE_OFFSET 5
49#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010053#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030055#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010056#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030058#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60#define MVPP2_RXQ_DISABLE_MASK BIT(31)
61
62/* Parser Registers */
63#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64#define MVPP2_PRS_PORT_LU_MAX 0xf
65#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73#define MVPP2_PRS_TCAM_IDX_REG 0x1100
74#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76#define MVPP2_PRS_SRAM_IDX_REG 0x1200
77#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
80
81/* Classifier Registers */
82#define MVPP2_CLS_MODE_REG 0x1800
83#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84#define MVPP2_CLS_PORT_WAY_REG 0x1810
85#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86#define MVPP2_CLS_LKP_INDEX_REG 0x1814
87#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88#define MVPP2_CLS_LKP_TBL_REG 0x1818
89#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
101
102/* Descriptor Manager Top Registers */
103#define MVPP2_RXQ_NUM_REG 0x2040
104#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100105#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300106#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110#define MVPP2_RXQ_NUM_NEW_OFFSET 16
111#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115#define MVPP2_RXQ_THRESH_REG 0x204c
116#define MVPP2_OCCUPIED_THRESH_OFFSET 0
117#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118#define MVPP2_RXQ_INDEX_REG 0x2050
119#define MVPP2_TXQ_NUM_REG 0x2080
120#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200123#define MVPP2_TXQ_THRESH_REG 0x2094
124#define MVPP2_TXQ_THRESH_OFFSET 16
125#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300126#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300127#define MVPP2_TXQ_INDEX_REG 0x2098
128#define MVPP2_TXQ_PREF_BUF_REG 0x209c
129#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
130#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
131#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
132#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
133#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
134#define MVPP2_TXQ_PENDING_REG 0x20a0
135#define MVPP2_TXQ_PENDING_MASK 0x3fff
136#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
137#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
138#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
139#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
140#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
141#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
142#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
143#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
144#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
145#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
146#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100147#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300148#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
149#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
150#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
151#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
152#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
153
154/* MBUS bridge registers */
155#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
156#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
157#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
158#define MVPP2_BASE_ADDR_ENABLE 0x4060
159
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100160/* AXI Bridge Registers */
161#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
162#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
163#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
164#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
165#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
166#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
167#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
168#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
169#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
170#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
171#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
172#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
173
174/* Values for AXI Bridge registers */
175#define MVPP22_AXI_ATTR_CACHE_OFFS 0
176#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
177
178#define MVPP22_AXI_CODE_CACHE_OFFS 0
179#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
180
181#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
182#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
183#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
184
185#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
186#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
187
Marcin Wojtas3f518502014-07-10 16:52:13 -0300188/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200189#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
190#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
191
Marcin Wojtas3f518502014-07-10 16:52:13 -0300192#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100193#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200194#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100195
196#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
197#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
198#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
199#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
200
201#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
202#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
203
204#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
205#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
206#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
207#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
208
Marcin Wojtas3f518502014-07-10 16:52:13 -0300209#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
210#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
211#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
212#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
213#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
214#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200215#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300216#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
217#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
218#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
219#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
220#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
221#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
222#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
223#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
224#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
225#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
226#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
227#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
228
229/* Buffer Manager registers */
230#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
231#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
232#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
233#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
234#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
235#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
236#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
237#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
238#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
239#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
240#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
241#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
242#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
243#define MVPP2_BM_START_MASK BIT(0)
244#define MVPP2_BM_STOP_MASK BIT(1)
245#define MVPP2_BM_STATE_MASK BIT(4)
246#define MVPP2_BM_LOW_THRESH_OFFS 8
247#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
248#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
249 MVPP2_BM_LOW_THRESH_OFFS)
250#define MVPP2_BM_HIGH_THRESH_OFFS 16
251#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
252#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
253 MVPP2_BM_HIGH_THRESH_OFFS)
254#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
255#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
256#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
257#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
258#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
259#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
260#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
261#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
262#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
263#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100264#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
265#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
266#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
267#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300268#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
269#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
270#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
271#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
272#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100273#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
274#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
275#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
276#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300277
278/* TX Scheduler registers */
279#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
280#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
281#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
282#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
283#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
284#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
285#define MVPP2_TXP_SCHED_MTU_REG 0x801c
286#define MVPP2_TXP_MTU_MAX 0x7FFFF
287#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
288#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
289#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
290#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
291#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
292#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
293#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
294#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
295#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
296#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
297#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
298#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
299#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
300#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
301
302/* TX general registers */
303#define MVPP2_TX_SNOOP_REG 0x8800
304#define MVPP2_TX_PORT_FLUSH_REG 0x8810
305#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
306
307/* LMS registers */
308#define MVPP2_SRC_ADDR_MIDDLE 0x24
309#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300310#define MVPP2_PHY_AN_CFG0_REG 0x34
311#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300312#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100313#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300314
315/* Per-port registers */
316#define MVPP2_GMAC_CTRL_0_REG 0x0
317#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
318#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
319#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
320#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
321#define MVPP2_GMAC_CTRL_1_REG 0x4
Marcin Wojtasb5c0a802014-07-21 13:48:11 -0300322#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300323#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
324#define MVPP2_GMAC_PCS_LB_EN_BIT 6
325#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
326#define MVPP2_GMAC_SA_LOW_OFFS 7
327#define MVPP2_GMAC_CTRL_2_REG 0x8
328#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
329#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
330#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
331#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
332#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
333#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
334#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
335#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
336#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
337#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
Marcin Wojtas08a23752014-07-21 13:48:12 -0300338#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300339#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
340#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
341#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
342#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
343#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
344#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
345 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100346#define MVPP22_GMAC_CTRL_4_REG 0x90
347#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
348#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
349#define MVPP22_CTRL4_SYNC_BYPASS BIT(6)
350#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
351
352/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
353 * relative to port->base.
354 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200355#define MVPP22_XLG_CTRL0_REG 0x100
356#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
357#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
358#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
359
Thomas Petazzoni26975822017-03-07 16:53:14 +0100360#define MVPP22_XLG_CTRL3_REG 0x11c
361#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
362#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
Antoine Ténart725757a2017-06-12 16:01:39 +0200363#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100364
365/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
366#define MVPP22_SMI_MISC_CFG_REG 0x1204
367#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300368
Thomas Petazzonia7868412017-03-07 16:53:13 +0100369#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
370
Marcin Wojtas3f518502014-07-10 16:52:13 -0300371#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
372
373/* Descriptor ring Macros */
374#define MVPP2_QUEUE_NEXT_DESC(q, index) \
375 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
376
377/* Various constants */
378
379/* Coalescing */
380#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200381#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200382#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300383#define MVPP2_RX_COAL_PKTS 32
384#define MVPP2_RX_COAL_USEC 100
385
386/* The two bytes Marvell header. Either contains a special value used
387 * by Marvell switches when a specific hardware mode is enabled (not
388 * supported by this driver) or is filled automatically by zeroes on
389 * the RX side. Those two bytes being at the front of the Ethernet
390 * header, they allow to have the IP header aligned on a 4 bytes
391 * boundary automatically: the hardware skips those two bytes on its
392 * own.
393 */
394#define MVPP2_MH_SIZE 2
395#define MVPP2_ETH_TYPE_LEN 2
396#define MVPP2_PPPOE_HDR_SIZE 8
397#define MVPP2_VLAN_TAG_LEN 4
398
399/* Lbtd 802.3 type */
400#define MVPP2_IP_LBDT_TYPE 0xfffa
401
Marcin Wojtas3f518502014-07-10 16:52:13 -0300402#define MVPP2_TX_CSUM_MAX_SIZE 9800
403
404/* Timeout constants */
405#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
406#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
407
408#define MVPP2_TX_MTU_MAX 0x7ffff
409
410/* Maximum number of T-CONTs of PON port */
411#define MVPP2_MAX_TCONT 16
412
413/* Maximum number of supported ports */
414#define MVPP2_MAX_PORTS 4
415
416/* Maximum number of TXQs used by single port */
417#define MVPP2_MAX_TXQ 8
418
Marcin Wojtas3f518502014-07-10 16:52:13 -0300419/* Dfault number of RXQs in use */
420#define MVPP2_DEFAULT_RXQ 4
421
Marcin Wojtas3f518502014-07-10 16:52:13 -0300422/* Max number of Rx descriptors */
423#define MVPP2_MAX_RXD 128
424
425/* Max number of Tx descriptors */
426#define MVPP2_MAX_TXD 1024
427
428/* Amount of Tx descriptors that can be reserved at once by CPU */
429#define MVPP2_CPU_DESC_CHUNK 64
430
431/* Max number of Tx descriptors in each aggregated queue */
432#define MVPP2_AGGR_TXQ_SIZE 256
433
434/* Descriptor aligned size */
435#define MVPP2_DESC_ALIGNED_SIZE 32
436
437/* Descriptor alignment mask */
438#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
439
440/* RX FIFO constants */
441#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
442#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
443#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
444
445/* RX buffer constants */
446#define MVPP2_SKB_SHINFO_SIZE \
447 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
448
449#define MVPP2_RX_PKT_SIZE(mtu) \
450 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800451 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300452
453#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
454#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
455#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
456 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
457
458#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
459
460/* IPv6 max L3 address size */
461#define MVPP2_MAX_L3_ADDR_SIZE 16
462
463/* Port flags */
464#define MVPP2_F_LOOPBACK BIT(0)
465
466/* Marvell tag types */
467enum mvpp2_tag_type {
468 MVPP2_TAG_TYPE_NONE = 0,
469 MVPP2_TAG_TYPE_MH = 1,
470 MVPP2_TAG_TYPE_DSA = 2,
471 MVPP2_TAG_TYPE_EDSA = 3,
472 MVPP2_TAG_TYPE_VLAN = 4,
473 MVPP2_TAG_TYPE_LAST = 5
474};
475
476/* Parser constants */
477#define MVPP2_PRS_TCAM_SRAM_SIZE 256
478#define MVPP2_PRS_TCAM_WORDS 6
479#define MVPP2_PRS_SRAM_WORDS 4
480#define MVPP2_PRS_FLOW_ID_SIZE 64
481#define MVPP2_PRS_FLOW_ID_MASK 0x3f
482#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
483#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
484#define MVPP2_PRS_IPV4_HEAD 0x40
485#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
486#define MVPP2_PRS_IPV4_MC 0xe0
487#define MVPP2_PRS_IPV4_MC_MASK 0xf0
488#define MVPP2_PRS_IPV4_BC_MASK 0xff
489#define MVPP2_PRS_IPV4_IHL 0x5
490#define MVPP2_PRS_IPV4_IHL_MASK 0xf
491#define MVPP2_PRS_IPV6_MC 0xff
492#define MVPP2_PRS_IPV6_MC_MASK 0xff
493#define MVPP2_PRS_IPV6_HOP_MASK 0xff
494#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
495#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
496#define MVPP2_PRS_DBL_VLANS_MAX 100
497
498/* Tcam structure:
499 * - lookup ID - 4 bits
500 * - port ID - 1 byte
501 * - additional information - 1 byte
502 * - header data - 8 bytes
503 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
504 */
505#define MVPP2_PRS_AI_BITS 8
506#define MVPP2_PRS_PORT_MASK 0xff
507#define MVPP2_PRS_LU_MASK 0xf
508#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
509 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
510#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
511 (((offs) * 2) - ((offs) % 2) + 2)
512#define MVPP2_PRS_TCAM_AI_BYTE 16
513#define MVPP2_PRS_TCAM_PORT_BYTE 17
514#define MVPP2_PRS_TCAM_LU_BYTE 20
515#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
516#define MVPP2_PRS_TCAM_INV_WORD 5
517/* Tcam entries ID */
518#define MVPP2_PE_DROP_ALL 0
519#define MVPP2_PE_FIRST_FREE_TID 1
520#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
521#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
522#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
523#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
524#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
525#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
526#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
527#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
528#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
529#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
530#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
531#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
532#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
533#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
534#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
535#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
536#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
537#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
538#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
539#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
540#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
541#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
542#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
543#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
544#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
545
546/* Sram structure
547 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
548 */
549#define MVPP2_PRS_SRAM_RI_OFFS 0
550#define MVPP2_PRS_SRAM_RI_WORD 0
551#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
552#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
553#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
554#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
555#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
556#define MVPP2_PRS_SRAM_UDF_OFFS 73
557#define MVPP2_PRS_SRAM_UDF_BITS 8
558#define MVPP2_PRS_SRAM_UDF_MASK 0xff
559#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
560#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
561#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
562#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
563#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
564#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
565#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
566#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
567#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
568#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
569#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
570#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
571#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
572#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
573#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
574#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
575#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
576#define MVPP2_PRS_SRAM_AI_OFFS 90
577#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
578#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
579#define MVPP2_PRS_SRAM_AI_MASK 0xff
580#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
581#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
582#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
583#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
584
585/* Sram result info bits assignment */
586#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
587#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100588#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
589#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300590#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
591#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
592#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
593#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
594#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100595#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
596#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300597#define MVPP2_PRS_RI_L2_MCAST BIT(9)
598#define MVPP2_PRS_RI_L2_BCAST BIT(10)
599#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100600#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
601#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300602#define MVPP2_PRS_RI_L3_IP4 BIT(12)
603#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
604#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
605#define MVPP2_PRS_RI_L3_IP6 BIT(14)
606#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
607#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100608#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
609#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300610#define MVPP2_PRS_RI_L3_MCAST BIT(15)
611#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
612#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
613#define MVPP2_PRS_RI_UDF3_MASK 0x300000
614#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
615#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
616#define MVPP2_PRS_RI_L4_TCP BIT(22)
617#define MVPP2_PRS_RI_L4_UDP BIT(23)
618#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
619#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
620#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
621#define MVPP2_PRS_RI_DROP_MASK 0x80000000
622
623/* Sram additional info bits assignment */
624#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
625#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
626#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
627#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
628#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
629#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
630#define MVPP2_PRS_SINGLE_VLAN_AI 0
631#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
632
633/* DSA/EDSA type */
634#define MVPP2_PRS_TAGGED true
635#define MVPP2_PRS_UNTAGGED false
636#define MVPP2_PRS_EDSA true
637#define MVPP2_PRS_DSA false
638
639/* MAC entries, shadow udf */
640enum mvpp2_prs_udf {
641 MVPP2_PRS_UDF_MAC_DEF,
642 MVPP2_PRS_UDF_MAC_RANGE,
643 MVPP2_PRS_UDF_L2_DEF,
644 MVPP2_PRS_UDF_L2_DEF_COPY,
645 MVPP2_PRS_UDF_L2_USER,
646};
647
648/* Lookup ID */
649enum mvpp2_prs_lookup {
650 MVPP2_PRS_LU_MH,
651 MVPP2_PRS_LU_MAC,
652 MVPP2_PRS_LU_DSA,
653 MVPP2_PRS_LU_VLAN,
654 MVPP2_PRS_LU_L2,
655 MVPP2_PRS_LU_PPPOE,
656 MVPP2_PRS_LU_IP4,
657 MVPP2_PRS_LU_IP6,
658 MVPP2_PRS_LU_FLOWS,
659 MVPP2_PRS_LU_LAST,
660};
661
662/* L3 cast enum */
663enum mvpp2_prs_l3_cast {
664 MVPP2_PRS_L3_UNI_CAST,
665 MVPP2_PRS_L3_MULTI_CAST,
666 MVPP2_PRS_L3_BROAD_CAST
667};
668
669/* Classifier constants */
670#define MVPP2_CLS_FLOWS_TBL_SIZE 512
671#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
672#define MVPP2_CLS_LKP_TBL_SIZE 64
673
674/* BM constants */
675#define MVPP2_BM_POOLS_NUM 8
676#define MVPP2_BM_LONG_BUF_NUM 1024
677#define MVPP2_BM_SHORT_BUF_NUM 2048
678#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
679#define MVPP2_BM_POOL_PTR_ALIGN 128
680#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
681#define MVPP2_BM_SWF_SHORT_POOL 3
682
683/* BM cookie (32 bits) definition */
684#define MVPP2_BM_COOKIE_POOL_OFFS 8
685#define MVPP2_BM_COOKIE_CPU_OFFS 24
686
687/* BM short pool packet size
688 * These value assure that for SWF the total number
689 * of bytes allocated for each buffer will be 512
690 */
691#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
692
Thomas Petazzonia7868412017-03-07 16:53:13 +0100693#define MVPP21_ADDR_SPACE_SZ 0
694#define MVPP22_ADDR_SPACE_SZ SZ_64K
695
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200696#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200697#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100698
Marcin Wojtas3f518502014-07-10 16:52:13 -0300699enum mvpp2_bm_type {
700 MVPP2_BM_FREE,
701 MVPP2_BM_SWF_LONG,
702 MVPP2_BM_SWF_SHORT
703};
704
705/* Definitions */
706
707/* Shared Packet Processor resources */
708struct mvpp2 {
709 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300710 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100711 void __iomem *iface_base;
712
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200713 /* On PPv2.2, each "software thread" can access the base
714 * register through a separate address space, each 64 KB apart
715 * from each other. Typically, such address spaces will be
716 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100717 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200718 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300719
720 /* Common clocks */
721 struct clk *pp_clk;
722 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100723 struct clk *mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300724
725 /* List of pointers to port structures */
726 struct mvpp2_port **port_list;
727
728 /* Aggregated TXQs */
729 struct mvpp2_tx_queue *aggr_txqs;
730
731 /* BM pools */
732 struct mvpp2_bm_pool *bm_pools;
733
734 /* PRS shadow table */
735 struct mvpp2_prs_shadow *prs_shadow;
736 /* PRS auxiliary table for double vlan entries control */
737 bool *prs_double_vlans;
738
739 /* Tclk value */
740 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100741
742 /* HW version */
743 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100744
745 /* Maximum number of RXQs per port */
746 unsigned int max_port_rxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300747};
748
749struct mvpp2_pcpu_stats {
750 struct u64_stats_sync syncp;
751 u64 rx_packets;
752 u64 rx_bytes;
753 u64 tx_packets;
754 u64 tx_bytes;
755};
756
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200757/* Per-CPU port control */
758struct mvpp2_port_pcpu {
759 struct hrtimer tx_done_timer;
760 bool timer_scheduled;
761 /* Tasklet for egress finalization */
762 struct tasklet_struct tx_done_tasklet;
763};
764
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200765struct mvpp2_queue_vector {
766 int irq;
767 struct napi_struct napi;
768 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
769 int sw_thread_id;
770 u16 sw_thread_mask;
771 int first_rxq;
772 int nrxqs;
773 u32 pending_cause_rx;
774 struct mvpp2_port *port;
775};
776
Marcin Wojtas3f518502014-07-10 16:52:13 -0300777struct mvpp2_port {
778 u8 id;
779
Thomas Petazzonia7868412017-03-07 16:53:13 +0100780 /* Index of the port from the "group of ports" complex point
781 * of view
782 */
783 int gop_id;
784
Marcin Wojtas3f518502014-07-10 16:52:13 -0300785 struct mvpp2 *priv;
786
787 /* Per-port registers' base address */
788 void __iomem *base;
789
790 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200791 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300792 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200793 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300794 struct net_device *dev;
795
796 int pkt_size;
797
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200798 /* Per-CPU port control */
799 struct mvpp2_port_pcpu __percpu *pcpu;
800
Marcin Wojtas3f518502014-07-10 16:52:13 -0300801 /* Flags */
802 unsigned long flags;
803
804 u16 tx_ring_size;
805 u16 rx_ring_size;
806 struct mvpp2_pcpu_stats __percpu *stats;
807
Marcin Wojtas3f518502014-07-10 16:52:13 -0300808 phy_interface_t phy_interface;
809 struct device_node *phy_node;
810 unsigned int link;
811 unsigned int duplex;
812 unsigned int speed;
813
814 struct mvpp2_bm_pool *pool_long;
815 struct mvpp2_bm_pool *pool_short;
816
817 /* Index of first port's physical RXQ */
818 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200819
820 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
821 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200822 bool has_tx_irqs;
823
824 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300825};
826
827/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
828 * layout of the transmit and reception DMA descriptors, and their
829 * layout is therefore defined by the hardware design
830 */
831
832#define MVPP2_TXD_L3_OFF_SHIFT 0
833#define MVPP2_TXD_IP_HLEN_SHIFT 8
834#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
835#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
836#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
837#define MVPP2_TXD_PADDING_DISABLE BIT(23)
838#define MVPP2_TXD_L4_UDP BIT(24)
839#define MVPP2_TXD_L3_IP6 BIT(26)
840#define MVPP2_TXD_L_DESC BIT(28)
841#define MVPP2_TXD_F_DESC BIT(29)
842
843#define MVPP2_RXD_ERR_SUMMARY BIT(15)
844#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
845#define MVPP2_RXD_ERR_CRC 0x0
846#define MVPP2_RXD_ERR_OVERRUN BIT(13)
847#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
848#define MVPP2_RXD_BM_POOL_ID_OFFS 16
849#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
850#define MVPP2_RXD_HWF_SYNC BIT(21)
851#define MVPP2_RXD_L4_CSUM_OK BIT(22)
852#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
853#define MVPP2_RXD_L4_TCP BIT(25)
854#define MVPP2_RXD_L4_UDP BIT(26)
855#define MVPP2_RXD_L3_IP4 BIT(28)
856#define MVPP2_RXD_L3_IP6 BIT(30)
857#define MVPP2_RXD_BUF_HDR BIT(31)
858
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100859/* HW TX descriptor for PPv2.1 */
860struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300861 u32 command; /* Options used by HW for packet transmitting.*/
862 u8 packet_offset; /* the offset from the buffer beginning */
863 u8 phys_txq; /* destination queue ID */
864 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100865 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300866 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
867 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
868 u32 reserved2; /* reserved (for future use) */
869};
870
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100871/* HW RX descriptor for PPv2.1 */
872struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300873 u32 status; /* info about received packet */
874 u16 reserved1; /* parser_info (for future use, PnC) */
875 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100876 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300877 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
878 u16 reserved2; /* gem_port_id (for future use, PON) */
879 u16 reserved3; /* csum_l4 (for future use, PnC) */
880 u8 reserved4; /* bm_qset (for future use, BM) */
881 u8 reserved5;
882 u16 reserved6; /* classify_info (for future use, PnC) */
883 u32 reserved7; /* flow_id (for future use, PnC) */
884 u32 reserved8;
885};
886
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100887/* HW TX descriptor for PPv2.2 */
888struct mvpp22_tx_desc {
889 u32 command;
890 u8 packet_offset;
891 u8 phys_txq;
892 u16 data_size;
893 u64 reserved1;
894 u64 buf_dma_addr_ptp;
895 u64 buf_cookie_misc;
896};
897
898/* HW RX descriptor for PPv2.2 */
899struct mvpp22_rx_desc {
900 u32 status;
901 u16 reserved1;
902 u16 data_size;
903 u32 reserved2;
904 u32 reserved3;
905 u64 buf_dma_addr_key_hash;
906 u64 buf_cookie_misc;
907};
908
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100909/* Opaque type used by the driver to manipulate the HW TX and RX
910 * descriptors
911 */
912struct mvpp2_tx_desc {
913 union {
914 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100915 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100916 };
917};
918
919struct mvpp2_rx_desc {
920 union {
921 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100922 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100923 };
924};
925
Thomas Petazzoni83544912016-12-21 11:28:49 +0100926struct mvpp2_txq_pcpu_buf {
927 /* Transmitted SKB */
928 struct sk_buff *skb;
929
930 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100931 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +0100932
933 /* Size transmitted */
934 size_t size;
935};
936
Marcin Wojtas3f518502014-07-10 16:52:13 -0300937/* Per-CPU Tx queue control */
938struct mvpp2_txq_pcpu {
939 int cpu;
940
941 /* Number of Tx DMA descriptors in the descriptor ring */
942 int size;
943
944 /* Number of currently used Tx DMA descriptor in the
945 * descriptor ring
946 */
947 int count;
948
949 /* Number of Tx DMA descriptors reserved for each CPU */
950 int reserved_num;
951
Thomas Petazzoni83544912016-12-21 11:28:49 +0100952 /* Infos about transmitted buffers */
953 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +0200954
Marcin Wojtas3f518502014-07-10 16:52:13 -0300955 /* Index of last TX DMA descriptor that was inserted */
956 int txq_put_index;
957
958 /* Index of the TX DMA descriptor to be cleaned up */
959 int txq_get_index;
960};
961
962struct mvpp2_tx_queue {
963 /* Physical number of this Tx queue */
964 u8 id;
965
966 /* Logical number of this Tx queue */
967 u8 log_id;
968
969 /* Number of Tx DMA descriptors in the descriptor ring */
970 int size;
971
972 /* Number of currently used Tx DMA descriptor in the descriptor ring */
973 int count;
974
975 /* Per-CPU control of physical Tx queues */
976 struct mvpp2_txq_pcpu __percpu *pcpu;
977
Marcin Wojtas3f518502014-07-10 16:52:13 -0300978 u32 done_pkts_coal;
979
980 /* Virtual address of thex Tx DMA descriptors array */
981 struct mvpp2_tx_desc *descs;
982
983 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100984 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300985
986 /* Index of the last Tx DMA descriptor */
987 int last_desc;
988
989 /* Index of the next Tx DMA descriptor to process */
990 int next_desc_to_proc;
991};
992
993struct mvpp2_rx_queue {
994 /* RX queue number, in the range 0-31 for physical RXQs */
995 u8 id;
996
997 /* Num of rx descriptors in the rx descriptor ring */
998 int size;
999
1000 u32 pkts_coal;
1001 u32 time_coal;
1002
1003 /* Virtual address of the RX DMA descriptors array */
1004 struct mvpp2_rx_desc *descs;
1005
1006 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001007 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001008
1009 /* Index of the last RX DMA descriptor */
1010 int last_desc;
1011
1012 /* Index of the next RX DMA descriptor to process */
1013 int next_desc_to_proc;
1014
1015 /* ID of port to which physical RXQ is mapped */
1016 int port;
1017
1018 /* Port's logic RXQ number to which physical RXQ is mapped */
1019 int logic_rxq;
1020};
1021
1022union mvpp2_prs_tcam_entry {
1023 u32 word[MVPP2_PRS_TCAM_WORDS];
1024 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1025};
1026
1027union mvpp2_prs_sram_entry {
1028 u32 word[MVPP2_PRS_SRAM_WORDS];
1029 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1030};
1031
1032struct mvpp2_prs_entry {
1033 u32 index;
1034 union mvpp2_prs_tcam_entry tcam;
1035 union mvpp2_prs_sram_entry sram;
1036};
1037
1038struct mvpp2_prs_shadow {
1039 bool valid;
1040 bool finish;
1041
1042 /* Lookup ID */
1043 int lu;
1044
1045 /* User defined offset */
1046 int udf;
1047
1048 /* Result info */
1049 u32 ri;
1050 u32 ri_mask;
1051};
1052
1053struct mvpp2_cls_flow_entry {
1054 u32 index;
1055 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1056};
1057
1058struct mvpp2_cls_lookup_entry {
1059 u32 lkpid;
1060 u32 way;
1061 u32 data;
1062};
1063
1064struct mvpp2_bm_pool {
1065 /* Pool number in the range 0-7 */
1066 int id;
1067 enum mvpp2_bm_type type;
1068
1069 /* Buffer Pointers Pool External (BPPE) size */
1070 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001071 /* BPPE size in bytes */
1072 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001073 /* Number of buffers for this pool */
1074 int buf_num;
1075 /* Pool buffer size */
1076 int buf_size;
1077 /* Packet size */
1078 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001079 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001080
1081 /* BPPE virtual base address */
1082 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001083 /* BPPE DMA base address */
1084 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001085
1086 /* Ports using BM pool */
1087 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001088};
1089
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001090/* Queue modes */
1091#define MVPP2_QDIST_SINGLE_MODE 0
1092#define MVPP2_QDIST_MULTI_MODE 1
1093
1094static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1095
1096module_param(queue_mode, int, 0444);
1097MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1098
Marcin Wojtas3f518502014-07-10 16:52:13 -03001099#define MVPP2_DRIVER_NAME "mvpp2"
1100#define MVPP2_DRIVER_VERSION "1.0"
1101
1102/* Utility/helper methods */
1103
1104static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1105{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001106 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001107}
1108
1109static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1110{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001111 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001112}
1113
1114/* These accessors should be used to access:
1115 *
1116 * - per-CPU registers, where each CPU has its own copy of the
1117 * register.
1118 *
1119 * MVPP2_BM_VIRT_ALLOC_REG
1120 * MVPP2_BM_ADDR_HIGH_ALLOC
1121 * MVPP22_BM_ADDR_HIGH_RLS_REG
1122 * MVPP2_BM_VIRT_RLS_REG
1123 * MVPP2_ISR_RX_TX_CAUSE_REG
1124 * MVPP2_ISR_RX_TX_MASK_REG
1125 * MVPP2_TXQ_NUM_REG
1126 * MVPP2_AGGR_TXQ_UPDATE_REG
1127 * MVPP2_TXQ_RSVD_REQ_REG
1128 * MVPP2_TXQ_RSVD_RSLT_REG
1129 * MVPP2_TXQ_SENT_REG
1130 * MVPP2_RXQ_NUM_REG
1131 *
1132 * - global registers that must be accessed through a specific CPU
1133 * window, because they are related to an access to a per-CPU
1134 * register
1135 *
1136 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1137 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1138 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1139 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1140 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1141 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1142 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1143 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1144 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1145 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1146 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1147 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1148 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1149 */
1150static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1151 u32 offset, u32 data)
1152{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001153 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001154}
1155
1156static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1157 u32 offset)
1158{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001159 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001160}
1161
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001162static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1163 struct mvpp2_tx_desc *tx_desc)
1164{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001165 if (port->priv->hw_version == MVPP21)
1166 return tx_desc->pp21.buf_dma_addr;
1167 else
1168 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001169}
1170
1171static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1172 struct mvpp2_tx_desc *tx_desc,
1173 dma_addr_t dma_addr)
1174{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001175 if (port->priv->hw_version == MVPP21) {
1176 tx_desc->pp21.buf_dma_addr = dma_addr;
1177 } else {
1178 u64 val = (u64)dma_addr;
1179
1180 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1181 tx_desc->pp22.buf_dma_addr_ptp |= val;
1182 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001183}
1184
1185static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1186 struct mvpp2_tx_desc *tx_desc)
1187{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001188 if (port->priv->hw_version == MVPP21)
1189 return tx_desc->pp21.data_size;
1190 else
1191 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001192}
1193
1194static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1195 struct mvpp2_tx_desc *tx_desc,
1196 size_t size)
1197{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001198 if (port->priv->hw_version == MVPP21)
1199 tx_desc->pp21.data_size = size;
1200 else
1201 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001202}
1203
1204static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1205 struct mvpp2_tx_desc *tx_desc,
1206 unsigned int txq)
1207{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001208 if (port->priv->hw_version == MVPP21)
1209 tx_desc->pp21.phys_txq = txq;
1210 else
1211 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001212}
1213
1214static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1215 struct mvpp2_tx_desc *tx_desc,
1216 unsigned int command)
1217{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001218 if (port->priv->hw_version == MVPP21)
1219 tx_desc->pp21.command = command;
1220 else
1221 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001222}
1223
1224static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1225 struct mvpp2_tx_desc *tx_desc,
1226 unsigned int offset)
1227{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001228 if (port->priv->hw_version == MVPP21)
1229 tx_desc->pp21.packet_offset = offset;
1230 else
1231 tx_desc->pp22.packet_offset = offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001232}
1233
1234static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1235 struct mvpp2_tx_desc *tx_desc)
1236{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001237 if (port->priv->hw_version == MVPP21)
1238 return tx_desc->pp21.packet_offset;
1239 else
1240 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001241}
1242
1243static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1244 struct mvpp2_rx_desc *rx_desc)
1245{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001246 if (port->priv->hw_version == MVPP21)
1247 return rx_desc->pp21.buf_dma_addr;
1248 else
1249 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001250}
1251
1252static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1253 struct mvpp2_rx_desc *rx_desc)
1254{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001255 if (port->priv->hw_version == MVPP21)
1256 return rx_desc->pp21.buf_cookie;
1257 else
1258 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001259}
1260
1261static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1262 struct mvpp2_rx_desc *rx_desc)
1263{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001264 if (port->priv->hw_version == MVPP21)
1265 return rx_desc->pp21.data_size;
1266 else
1267 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001268}
1269
1270static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1271 struct mvpp2_rx_desc *rx_desc)
1272{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001273 if (port->priv->hw_version == MVPP21)
1274 return rx_desc->pp21.status;
1275 else
1276 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001277}
1278
Marcin Wojtas3f518502014-07-10 16:52:13 -03001279static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1280{
1281 txq_pcpu->txq_get_index++;
1282 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1283 txq_pcpu->txq_get_index = 0;
1284}
1285
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001286static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1287 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001288 struct sk_buff *skb,
1289 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001290{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001291 struct mvpp2_txq_pcpu_buf *tx_buf =
1292 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1293 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001294 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1295 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1296 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001297 txq_pcpu->txq_put_index++;
1298 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1299 txq_pcpu->txq_put_index = 0;
1300}
1301
1302/* Get number of physical egress port */
1303static inline int mvpp2_egress_port(struct mvpp2_port *port)
1304{
1305 return MVPP2_MAX_TCONT + port->id;
1306}
1307
1308/* Get number of physical TXQ */
1309static inline int mvpp2_txq_phys(int port, int txq)
1310{
1311 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1312}
1313
1314/* Parser configuration routines */
1315
1316/* Update parser tcam and sram hw entries */
1317static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1318{
1319 int i;
1320
1321 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1322 return -EINVAL;
1323
1324 /* Clear entry invalidation bit */
1325 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1326
1327 /* Write tcam index - indirect access */
1328 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1329 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1330 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1331
1332 /* Write sram index - indirect access */
1333 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1334 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1335 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1336
1337 return 0;
1338}
1339
1340/* Read tcam entry from hw */
1341static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1342{
1343 int i;
1344
1345 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1346 return -EINVAL;
1347
1348 /* Write tcam index - indirect access */
1349 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1350
1351 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1352 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1353 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1354 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1355
1356 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1357 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1358
1359 /* Write sram index - indirect access */
1360 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1361 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1362 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1363
1364 return 0;
1365}
1366
1367/* Invalidate tcam hw entry */
1368static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1369{
1370 /* Write index - indirect access */
1371 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1372 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1373 MVPP2_PRS_TCAM_INV_MASK);
1374}
1375
1376/* Enable shadow table entry and set its lookup ID */
1377static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1378{
1379 priv->prs_shadow[index].valid = true;
1380 priv->prs_shadow[index].lu = lu;
1381}
1382
1383/* Update ri fields in shadow table entry */
1384static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1385 unsigned int ri, unsigned int ri_mask)
1386{
1387 priv->prs_shadow[index].ri_mask = ri_mask;
1388 priv->prs_shadow[index].ri = ri;
1389}
1390
1391/* Update lookup field in tcam sw entry */
1392static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1393{
1394 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1395
1396 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1397 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1398}
1399
1400/* Update mask for single port in tcam sw entry */
1401static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1402 unsigned int port, bool add)
1403{
1404 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1405
1406 if (add)
1407 pe->tcam.byte[enable_off] &= ~(1 << port);
1408 else
1409 pe->tcam.byte[enable_off] |= 1 << port;
1410}
1411
1412/* Update port map in tcam sw entry */
1413static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1414 unsigned int ports)
1415{
1416 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1417 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1418
1419 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1420 pe->tcam.byte[enable_off] &= ~port_mask;
1421 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1422}
1423
1424/* Obtain port map from tcam sw entry */
1425static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1426{
1427 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1428
1429 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1430}
1431
1432/* Set byte of data and its enable bits in tcam sw entry */
1433static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1434 unsigned int offs, unsigned char byte,
1435 unsigned char enable)
1436{
1437 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1438 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1439}
1440
1441/* Get byte of data and its enable bits from tcam sw entry */
1442static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1443 unsigned int offs, unsigned char *byte,
1444 unsigned char *enable)
1445{
1446 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1447 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1448}
1449
1450/* Compare tcam data bytes with a pattern */
1451static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1452 u16 data)
1453{
1454 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1455 u16 tcam_data;
1456
1457 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1458 if (tcam_data != data)
1459 return false;
1460 return true;
1461}
1462
1463/* Update ai bits in tcam sw entry */
1464static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1465 unsigned int bits, unsigned int enable)
1466{
1467 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1468
1469 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1470
1471 if (!(enable & BIT(i)))
1472 continue;
1473
1474 if (bits & BIT(i))
1475 pe->tcam.byte[ai_idx] |= 1 << i;
1476 else
1477 pe->tcam.byte[ai_idx] &= ~(1 << i);
1478 }
1479
1480 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1481}
1482
1483/* Get ai bits from tcam sw entry */
1484static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1485{
1486 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1487}
1488
1489/* Set ethertype in tcam sw entry */
1490static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1491 unsigned short ethertype)
1492{
1493 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1494 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1495}
1496
1497/* Set bits in sram sw entry */
1498static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1499 int val)
1500{
1501 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1502}
1503
1504/* Clear bits in sram sw entry */
1505static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1506 int val)
1507{
1508 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1509}
1510
1511/* Update ri bits in sram sw entry */
1512static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1513 unsigned int bits, unsigned int mask)
1514{
1515 unsigned int i;
1516
1517 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1518 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1519
1520 if (!(mask & BIT(i)))
1521 continue;
1522
1523 if (bits & BIT(i))
1524 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1525 else
1526 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1527
1528 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1529 }
1530}
1531
1532/* Obtain ri bits from sram sw entry */
1533static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1534{
1535 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1536}
1537
1538/* Update ai bits in sram sw entry */
1539static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1540 unsigned int bits, unsigned int mask)
1541{
1542 unsigned int i;
1543 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1544
1545 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1546
1547 if (!(mask & BIT(i)))
1548 continue;
1549
1550 if (bits & BIT(i))
1551 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1552 else
1553 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1554
1555 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1556 }
1557}
1558
1559/* Read ai bits from sram sw entry */
1560static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1561{
1562 u8 bits;
1563 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1564 int ai_en_off = ai_off + 1;
1565 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1566
1567 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1568 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1569
1570 return bits;
1571}
1572
1573/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1574 * lookup interation
1575 */
1576static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1577 unsigned int lu)
1578{
1579 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1580
1581 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1582 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1583 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1584}
1585
1586/* In the sram sw entry set sign and value of the next lookup offset
1587 * and the offset value generated to the classifier
1588 */
1589static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1590 unsigned int op)
1591{
1592 /* Set sign */
1593 if (shift < 0) {
1594 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1595 shift = 0 - shift;
1596 } else {
1597 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1598 }
1599
1600 /* Set value */
1601 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1602 (unsigned char)shift;
1603
1604 /* Reset and set operation */
1605 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1606 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1607 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1608
1609 /* Set base offset as current */
1610 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1611}
1612
1613/* In the sram sw entry set sign and value of the user defined offset
1614 * generated to the classifier
1615 */
1616static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1617 unsigned int type, int offset,
1618 unsigned int op)
1619{
1620 /* Set sign */
1621 if (offset < 0) {
1622 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1623 offset = 0 - offset;
1624 } else {
1625 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1626 }
1627
1628 /* Set value */
1629 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1630 MVPP2_PRS_SRAM_UDF_MASK);
1631 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1632 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1633 MVPP2_PRS_SRAM_UDF_BITS)] &=
1634 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1635 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1636 MVPP2_PRS_SRAM_UDF_BITS)] |=
1637 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1638
1639 /* Set offset type */
1640 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1641 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1642 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1643
1644 /* Set offset operation */
1645 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1646 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1647 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1648
1649 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1650 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1651 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1652 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1653
1654 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1655 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1656 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1657
1658 /* Set base offset as current */
1659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1660}
1661
1662/* Find parser flow entry */
1663static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1664{
1665 struct mvpp2_prs_entry *pe;
1666 int tid;
1667
1668 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1669 if (!pe)
1670 return NULL;
1671 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1672
1673 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1674 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1675 u8 bits;
1676
1677 if (!priv->prs_shadow[tid].valid ||
1678 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1679 continue;
1680
1681 pe->index = tid;
1682 mvpp2_prs_hw_read(priv, pe);
1683 bits = mvpp2_prs_sram_ai_get(pe);
1684
1685 /* Sram store classification lookup ID in AI bits [5:0] */
1686 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1687 return pe;
1688 }
1689 kfree(pe);
1690
1691 return NULL;
1692}
1693
1694/* Return first free tcam index, seeking from start to end */
1695static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1696 unsigned char end)
1697{
1698 int tid;
1699
1700 if (start > end)
1701 swap(start, end);
1702
1703 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1704 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1705
1706 for (tid = start; tid <= end; tid++) {
1707 if (!priv->prs_shadow[tid].valid)
1708 return tid;
1709 }
1710
1711 return -EINVAL;
1712}
1713
1714/* Enable/disable dropping all mac da's */
1715static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1716{
1717 struct mvpp2_prs_entry pe;
1718
1719 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1720 /* Entry exist - update port only */
1721 pe.index = MVPP2_PE_DROP_ALL;
1722 mvpp2_prs_hw_read(priv, &pe);
1723 } else {
1724 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001725 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001726 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1727 pe.index = MVPP2_PE_DROP_ALL;
1728
1729 /* Non-promiscuous mode for all ports - DROP unknown packets */
1730 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1731 MVPP2_PRS_RI_DROP_MASK);
1732
1733 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1734 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1735
1736 /* Update shadow table */
1737 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1738
1739 /* Mask all ports */
1740 mvpp2_prs_tcam_port_map_set(&pe, 0);
1741 }
1742
1743 /* Update port mask */
1744 mvpp2_prs_tcam_port_set(&pe, port, add);
1745
1746 mvpp2_prs_hw_write(priv, &pe);
1747}
1748
1749/* Set port to promiscuous mode */
1750static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1751{
1752 struct mvpp2_prs_entry pe;
1753
Joe Perchesdbedd442015-03-06 20:49:12 -08001754 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001755
1756 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1757 /* Entry exist - update port only */
1758 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1759 mvpp2_prs_hw_read(priv, &pe);
1760 } else {
1761 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001762 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001763 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1764 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1765
1766 /* Continue - set next lookup */
1767 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1768
1769 /* Set result info bits */
1770 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1771 MVPP2_PRS_RI_L2_CAST_MASK);
1772
1773 /* Shift to ethertype */
1774 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1775 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1776
1777 /* Mask all ports */
1778 mvpp2_prs_tcam_port_map_set(&pe, 0);
1779
1780 /* Update shadow table */
1781 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1782 }
1783
1784 /* Update port mask */
1785 mvpp2_prs_tcam_port_set(&pe, port, add);
1786
1787 mvpp2_prs_hw_write(priv, &pe);
1788}
1789
1790/* Accept multicast */
1791static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1792 bool add)
1793{
1794 struct mvpp2_prs_entry pe;
1795 unsigned char da_mc;
1796
1797 /* Ethernet multicast address first byte is
1798 * 0x01 for IPv4 and 0x33 for IPv6
1799 */
1800 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1801
1802 if (priv->prs_shadow[index].valid) {
1803 /* Entry exist - update port only */
1804 pe.index = index;
1805 mvpp2_prs_hw_read(priv, &pe);
1806 } else {
1807 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001808 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001809 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1810 pe.index = index;
1811
1812 /* Continue - set next lookup */
1813 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1814
1815 /* Set result info bits */
1816 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1817 MVPP2_PRS_RI_L2_CAST_MASK);
1818
1819 /* Update tcam entry data first byte */
1820 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1821
1822 /* Shift to ethertype */
1823 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1824 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1825
1826 /* Mask all ports */
1827 mvpp2_prs_tcam_port_map_set(&pe, 0);
1828
1829 /* Update shadow table */
1830 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1831 }
1832
1833 /* Update port mask */
1834 mvpp2_prs_tcam_port_set(&pe, port, add);
1835
1836 mvpp2_prs_hw_write(priv, &pe);
1837}
1838
1839/* Set entry for dsa packets */
1840static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1841 bool tagged, bool extend)
1842{
1843 struct mvpp2_prs_entry pe;
1844 int tid, shift;
1845
1846 if (extend) {
1847 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1848 shift = 8;
1849 } else {
1850 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1851 shift = 4;
1852 }
1853
1854 if (priv->prs_shadow[tid].valid) {
1855 /* Entry exist - update port only */
1856 pe.index = tid;
1857 mvpp2_prs_hw_read(priv, &pe);
1858 } else {
1859 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001860 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001861 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1862 pe.index = tid;
1863
1864 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1865 mvpp2_prs_sram_shift_set(&pe, shift,
1866 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1867
1868 /* Update shadow table */
1869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1870
1871 if (tagged) {
1872 /* Set tagged bit in DSA tag */
1873 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1874 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1875 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1876 /* Clear all ai bits for next iteration */
1877 mvpp2_prs_sram_ai_update(&pe, 0,
1878 MVPP2_PRS_SRAM_AI_MASK);
1879 /* If packet is tagged continue check vlans */
1880 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1881 } else {
1882 /* Set result info bits to 'no vlans' */
1883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1884 MVPP2_PRS_RI_VLAN_MASK);
1885 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1886 }
1887
1888 /* Mask all ports */
1889 mvpp2_prs_tcam_port_map_set(&pe, 0);
1890 }
1891
1892 /* Update port mask */
1893 mvpp2_prs_tcam_port_set(&pe, port, add);
1894
1895 mvpp2_prs_hw_write(priv, &pe);
1896}
1897
1898/* Set entry for dsa ethertype */
1899static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1900 bool add, bool tagged, bool extend)
1901{
1902 struct mvpp2_prs_entry pe;
1903 int tid, shift, port_mask;
1904
1905 if (extend) {
1906 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1907 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1908 port_mask = 0;
1909 shift = 8;
1910 } else {
1911 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1912 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1913 port_mask = MVPP2_PRS_PORT_MASK;
1914 shift = 4;
1915 }
1916
1917 if (priv->prs_shadow[tid].valid) {
1918 /* Entry exist - update port only */
1919 pe.index = tid;
1920 mvpp2_prs_hw_read(priv, &pe);
1921 } else {
1922 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001923 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001924 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1925 pe.index = tid;
1926
1927 /* Set ethertype */
1928 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1929 mvpp2_prs_match_etype(&pe, 2, 0);
1930
1931 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1932 MVPP2_PRS_RI_DSA_MASK);
1933 /* Shift ethertype + 2 byte reserved + tag*/
1934 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1935 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1936
1937 /* Update shadow table */
1938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1939
1940 if (tagged) {
1941 /* Set tagged bit in DSA tag */
1942 mvpp2_prs_tcam_data_byte_set(&pe,
1943 MVPP2_ETH_TYPE_LEN + 2 + 3,
1944 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1945 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1946 /* Clear all ai bits for next iteration */
1947 mvpp2_prs_sram_ai_update(&pe, 0,
1948 MVPP2_PRS_SRAM_AI_MASK);
1949 /* If packet is tagged continue check vlans */
1950 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1951 } else {
1952 /* Set result info bits to 'no vlans' */
1953 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1954 MVPP2_PRS_RI_VLAN_MASK);
1955 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1956 }
1957 /* Mask/unmask all ports, depending on dsa type */
1958 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1959 }
1960
1961 /* Update port mask */
1962 mvpp2_prs_tcam_port_set(&pe, port, add);
1963
1964 mvpp2_prs_hw_write(priv, &pe);
1965}
1966
1967/* Search for existing single/triple vlan entry */
1968static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1969 unsigned short tpid, int ai)
1970{
1971 struct mvpp2_prs_entry *pe;
1972 int tid;
1973
1974 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1975 if (!pe)
1976 return NULL;
1977 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1978
1979 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1980 for (tid = MVPP2_PE_FIRST_FREE_TID;
1981 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1982 unsigned int ri_bits, ai_bits;
1983 bool match;
1984
1985 if (!priv->prs_shadow[tid].valid ||
1986 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1987 continue;
1988
1989 pe->index = tid;
1990
1991 mvpp2_prs_hw_read(priv, pe);
1992 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1993 if (!match)
1994 continue;
1995
1996 /* Get vlan type */
1997 ri_bits = mvpp2_prs_sram_ri_get(pe);
1998 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
1999
2000 /* Get current ai value from tcam */
2001 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2002 /* Clear double vlan bit */
2003 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2004
2005 if (ai != ai_bits)
2006 continue;
2007
2008 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2009 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2010 return pe;
2011 }
2012 kfree(pe);
2013
2014 return NULL;
2015}
2016
2017/* Add/update single/triple vlan entry */
2018static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2019 unsigned int port_map)
2020{
2021 struct mvpp2_prs_entry *pe;
2022 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302023 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002024
2025 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2026
2027 if (!pe) {
2028 /* Create new tcam entry */
2029 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2030 MVPP2_PE_FIRST_FREE_TID);
2031 if (tid < 0)
2032 return tid;
2033
2034 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2035 if (!pe)
2036 return -ENOMEM;
2037
2038 /* Get last double vlan tid */
2039 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2040 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2041 unsigned int ri_bits;
2042
2043 if (!priv->prs_shadow[tid_aux].valid ||
2044 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2045 continue;
2046
2047 pe->index = tid_aux;
2048 mvpp2_prs_hw_read(priv, pe);
2049 ri_bits = mvpp2_prs_sram_ri_get(pe);
2050 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2051 MVPP2_PRS_RI_VLAN_DOUBLE)
2052 break;
2053 }
2054
Sudip Mukherjee43737472014-11-01 16:59:34 +05302055 if (tid <= tid_aux) {
2056 ret = -EINVAL;
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002057 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302058 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002059
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002060 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002061 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2062 pe->index = tid;
2063
2064 mvpp2_prs_match_etype(pe, 0, tpid);
2065
2066 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2067 /* Shift 4 bytes - skip 1 vlan tag */
2068 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2069 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2070 /* Clear all ai bits for next iteration */
2071 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2072
2073 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2074 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2075 MVPP2_PRS_RI_VLAN_MASK);
2076 } else {
2077 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2078 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2079 MVPP2_PRS_RI_VLAN_MASK);
2080 }
2081 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2082
2083 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2084 }
2085 /* Update ports' mask */
2086 mvpp2_prs_tcam_port_map_set(pe, port_map);
2087
2088 mvpp2_prs_hw_write(priv, pe);
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002089free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002090 kfree(pe);
2091
Sudip Mukherjee43737472014-11-01 16:59:34 +05302092 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002093}
2094
2095/* Get first free double vlan ai number */
2096static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2097{
2098 int i;
2099
2100 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2101 if (!priv->prs_double_vlans[i])
2102 return i;
2103 }
2104
2105 return -EINVAL;
2106}
2107
2108/* Search for existing double vlan entry */
2109static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2110 unsigned short tpid1,
2111 unsigned short tpid2)
2112{
2113 struct mvpp2_prs_entry *pe;
2114 int tid;
2115
2116 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2117 if (!pe)
2118 return NULL;
2119 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2120
2121 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2122 for (tid = MVPP2_PE_FIRST_FREE_TID;
2123 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2124 unsigned int ri_mask;
2125 bool match;
2126
2127 if (!priv->prs_shadow[tid].valid ||
2128 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2129 continue;
2130
2131 pe->index = tid;
2132 mvpp2_prs_hw_read(priv, pe);
2133
2134 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2135 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2136
2137 if (!match)
2138 continue;
2139
2140 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2141 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2142 return pe;
2143 }
2144 kfree(pe);
2145
2146 return NULL;
2147}
2148
2149/* Add or update double vlan entry */
2150static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2151 unsigned short tpid2,
2152 unsigned int port_map)
2153{
2154 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302155 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002156
2157 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2158
2159 if (!pe) {
2160 /* Create new tcam entry */
2161 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2162 MVPP2_PE_LAST_FREE_TID);
2163 if (tid < 0)
2164 return tid;
2165
2166 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2167 if (!pe)
2168 return -ENOMEM;
2169
2170 /* Set ai value for new double vlan entry */
2171 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302172 if (ai < 0) {
2173 ret = ai;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002174 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302175 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002176
2177 /* Get first single/triple vlan tid */
2178 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2179 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2180 unsigned int ri_bits;
2181
2182 if (!priv->prs_shadow[tid_aux].valid ||
2183 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2184 continue;
2185
2186 pe->index = tid_aux;
2187 mvpp2_prs_hw_read(priv, pe);
2188 ri_bits = mvpp2_prs_sram_ri_get(pe);
2189 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2190 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2191 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2192 break;
2193 }
2194
Sudip Mukherjee43737472014-11-01 16:59:34 +05302195 if (tid >= tid_aux) {
2196 ret = -ERANGE;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002197 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302198 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002199
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002200 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002201 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2202 pe->index = tid;
2203
2204 priv->prs_double_vlans[ai] = true;
2205
2206 mvpp2_prs_match_etype(pe, 0, tpid1);
2207 mvpp2_prs_match_etype(pe, 4, tpid2);
2208
2209 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2210 /* Shift 8 bytes - skip 2 vlan tags */
2211 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2212 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2213 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2214 MVPP2_PRS_RI_VLAN_MASK);
2215 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2216 MVPP2_PRS_SRAM_AI_MASK);
2217
2218 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2219 }
2220
2221 /* Update ports' mask */
2222 mvpp2_prs_tcam_port_map_set(pe, port_map);
2223 mvpp2_prs_hw_write(priv, pe);
Markus Elfringc9a7e122017-04-17 13:03:49 +02002224free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002225 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302226 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002227}
2228
2229/* IPv4 header parsing for fragmentation and L4 offset */
2230static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2231 unsigned int ri, unsigned int ri_mask)
2232{
2233 struct mvpp2_prs_entry pe;
2234 int tid;
2235
2236 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2237 (proto != IPPROTO_IGMP))
2238 return -EINVAL;
2239
2240 /* Fragmented packet */
2241 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2242 MVPP2_PE_LAST_FREE_TID);
2243 if (tid < 0)
2244 return tid;
2245
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002246 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002247 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2248 pe.index = tid;
2249
2250 /* Set next lu to IPv4 */
2251 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2252 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2253 /* Set L4 offset */
2254 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2255 sizeof(struct iphdr) - 4,
2256 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2257 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2258 MVPP2_PRS_IPV4_DIP_AI_BIT);
2259 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2260 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2261
2262 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2263 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2264 /* Unmask all ports */
2265 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2266
2267 /* Update shadow table and hw entry */
2268 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2269 mvpp2_prs_hw_write(priv, &pe);
2270
2271 /* Not fragmented packet */
2272 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2273 MVPP2_PE_LAST_FREE_TID);
2274 if (tid < 0)
2275 return tid;
2276
2277 pe.index = tid;
2278 /* Clear ri before updating */
2279 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2280 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2281 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2282
2283 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2284 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2285
2286 /* Update shadow table and hw entry */
2287 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2288 mvpp2_prs_hw_write(priv, &pe);
2289
2290 return 0;
2291}
2292
2293/* IPv4 L3 multicast or broadcast */
2294static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2295{
2296 struct mvpp2_prs_entry pe;
2297 int mask, tid;
2298
2299 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2300 MVPP2_PE_LAST_FREE_TID);
2301 if (tid < 0)
2302 return tid;
2303
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002304 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002305 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2306 pe.index = tid;
2307
2308 switch (l3_cast) {
2309 case MVPP2_PRS_L3_MULTI_CAST:
2310 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2311 MVPP2_PRS_IPV4_MC_MASK);
2312 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2313 MVPP2_PRS_RI_L3_ADDR_MASK);
2314 break;
2315 case MVPP2_PRS_L3_BROAD_CAST:
2316 mask = MVPP2_PRS_IPV4_BC_MASK;
2317 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2318 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2319 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2320 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2321 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2322 MVPP2_PRS_RI_L3_ADDR_MASK);
2323 break;
2324 default:
2325 return -EINVAL;
2326 }
2327
2328 /* Finished: go to flowid generation */
2329 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2330 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2331
2332 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2333 MVPP2_PRS_IPV4_DIP_AI_BIT);
2334 /* Unmask all ports */
2335 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2336
2337 /* Update shadow table and hw entry */
2338 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2339 mvpp2_prs_hw_write(priv, &pe);
2340
2341 return 0;
2342}
2343
2344/* Set entries for protocols over IPv6 */
2345static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2346 unsigned int ri, unsigned int ri_mask)
2347{
2348 struct mvpp2_prs_entry pe;
2349 int tid;
2350
2351 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2352 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2353 return -EINVAL;
2354
2355 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2356 MVPP2_PE_LAST_FREE_TID);
2357 if (tid < 0)
2358 return tid;
2359
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002360 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002361 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2362 pe.index = tid;
2363
2364 /* Finished: go to flowid generation */
2365 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2366 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2367 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2368 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2369 sizeof(struct ipv6hdr) - 6,
2370 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2371
2372 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2373 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2374 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2375 /* Unmask all ports */
2376 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2377
2378 /* Write HW */
2379 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2380 mvpp2_prs_hw_write(priv, &pe);
2381
2382 return 0;
2383}
2384
2385/* IPv6 L3 multicast entry */
2386static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2387{
2388 struct mvpp2_prs_entry pe;
2389 int tid;
2390
2391 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2392 return -EINVAL;
2393
2394 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2395 MVPP2_PE_LAST_FREE_TID);
2396 if (tid < 0)
2397 return tid;
2398
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002399 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002400 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2401 pe.index = tid;
2402
2403 /* Finished: go to flowid generation */
2404 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2405 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2406 MVPP2_PRS_RI_L3_ADDR_MASK);
2407 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2408 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2409 /* Shift back to IPv6 NH */
2410 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2411
2412 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2413 MVPP2_PRS_IPV6_MC_MASK);
2414 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2415 /* Unmask all ports */
2416 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2417
2418 /* Update shadow table and hw entry */
2419 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2420 mvpp2_prs_hw_write(priv, &pe);
2421
2422 return 0;
2423}
2424
2425/* Parser per-port initialization */
2426static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2427 int lu_max, int offset)
2428{
2429 u32 val;
2430
2431 /* Set lookup ID */
2432 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2433 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2434 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2435 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2436
2437 /* Set maximum number of loops for packet received from port */
2438 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2439 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2440 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2441 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2442
2443 /* Set initial offset for packet header extraction for the first
2444 * searching loop
2445 */
2446 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2447 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2448 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2449 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2450}
2451
2452/* Default flow entries initialization for all ports */
2453static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2454{
2455 struct mvpp2_prs_entry pe;
2456 int port;
2457
2458 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002459 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002460 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2461 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2462
2463 /* Mask all ports */
2464 mvpp2_prs_tcam_port_map_set(&pe, 0);
2465
2466 /* Set flow ID*/
2467 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2468 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2469
2470 /* Update shadow table and hw entry */
2471 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2472 mvpp2_prs_hw_write(priv, &pe);
2473 }
2474}
2475
2476/* Set default entry for Marvell Header field */
2477static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2478{
2479 struct mvpp2_prs_entry pe;
2480
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002481 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002482
2483 pe.index = MVPP2_PE_MH_DEFAULT;
2484 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2485 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2486 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2487 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2488
2489 /* Unmask all ports */
2490 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2491
2492 /* Update shadow table and hw entry */
2493 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2494 mvpp2_prs_hw_write(priv, &pe);
2495}
2496
2497/* Set default entires (place holder) for promiscuous, non-promiscuous and
2498 * multicast MAC addresses
2499 */
2500static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2501{
2502 struct mvpp2_prs_entry pe;
2503
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002504 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002505
2506 /* Non-promiscuous mode for all ports - DROP unknown packets */
2507 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2508 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2509
2510 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2511 MVPP2_PRS_RI_DROP_MASK);
2512 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2513 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2514
2515 /* Unmask all ports */
2516 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2517
2518 /* Update shadow table and hw entry */
2519 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2520 mvpp2_prs_hw_write(priv, &pe);
2521
2522 /* place holders only - no ports */
2523 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2524 mvpp2_prs_mac_promisc_set(priv, 0, false);
2525 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2526 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2527}
2528
2529/* Set default entries for various types of dsa packets */
2530static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2531{
2532 struct mvpp2_prs_entry pe;
2533
2534 /* None tagged EDSA entry - place holder */
2535 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2536 MVPP2_PRS_EDSA);
2537
2538 /* Tagged EDSA entry - place holder */
2539 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2540
2541 /* None tagged DSA entry - place holder */
2542 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2543 MVPP2_PRS_DSA);
2544
2545 /* Tagged DSA entry - place holder */
2546 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2547
2548 /* None tagged EDSA ethertype entry - place holder*/
2549 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2550 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2551
2552 /* Tagged EDSA ethertype entry - place holder*/
2553 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2554 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2555
2556 /* None tagged DSA ethertype entry */
2557 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2558 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2559
2560 /* Tagged DSA ethertype entry */
2561 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2562 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2563
2564 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002565 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002566 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2567 pe.index = MVPP2_PE_DSA_DEFAULT;
2568 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2569
2570 /* Shift 0 bytes */
2571 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2572 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2573
2574 /* Clear all sram ai bits for next iteration */
2575 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2576
2577 /* Unmask all ports */
2578 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2579
2580 mvpp2_prs_hw_write(priv, &pe);
2581}
2582
2583/* Match basic ethertypes */
2584static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2585{
2586 struct mvpp2_prs_entry pe;
2587 int tid;
2588
2589 /* Ethertype: PPPoE */
2590 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2591 MVPP2_PE_LAST_FREE_TID);
2592 if (tid < 0)
2593 return tid;
2594
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002595 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002596 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2597 pe.index = tid;
2598
2599 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2600
2601 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2602 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2603 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2604 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2605 MVPP2_PRS_RI_PPPOE_MASK);
2606
2607 /* Update shadow table and hw entry */
2608 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2609 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2610 priv->prs_shadow[pe.index].finish = false;
2611 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2612 MVPP2_PRS_RI_PPPOE_MASK);
2613 mvpp2_prs_hw_write(priv, &pe);
2614
2615 /* Ethertype: ARP */
2616 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2617 MVPP2_PE_LAST_FREE_TID);
2618 if (tid < 0)
2619 return tid;
2620
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002621 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002622 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2623 pe.index = tid;
2624
2625 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2626
2627 /* Generate flow in the next iteration*/
2628 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2629 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2630 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2631 MVPP2_PRS_RI_L3_PROTO_MASK);
2632 /* Set L3 offset */
2633 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2634 MVPP2_ETH_TYPE_LEN,
2635 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2636
2637 /* Update shadow table and hw entry */
2638 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2639 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2640 priv->prs_shadow[pe.index].finish = true;
2641 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2642 MVPP2_PRS_RI_L3_PROTO_MASK);
2643 mvpp2_prs_hw_write(priv, &pe);
2644
2645 /* Ethertype: LBTD */
2646 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2647 MVPP2_PE_LAST_FREE_TID);
2648 if (tid < 0)
2649 return tid;
2650
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002651 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002652 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2653 pe.index = tid;
2654
2655 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2656
2657 /* Generate flow in the next iteration*/
2658 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2659 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2660 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2661 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2662 MVPP2_PRS_RI_CPU_CODE_MASK |
2663 MVPP2_PRS_RI_UDF3_MASK);
2664 /* Set L3 offset */
2665 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2666 MVPP2_ETH_TYPE_LEN,
2667 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2668
2669 /* Update shadow table and hw entry */
2670 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2671 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2672 priv->prs_shadow[pe.index].finish = true;
2673 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2674 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2675 MVPP2_PRS_RI_CPU_CODE_MASK |
2676 MVPP2_PRS_RI_UDF3_MASK);
2677 mvpp2_prs_hw_write(priv, &pe);
2678
2679 /* Ethertype: IPv4 without options */
2680 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2681 MVPP2_PE_LAST_FREE_TID);
2682 if (tid < 0)
2683 return tid;
2684
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002685 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002686 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2687 pe.index = tid;
2688
2689 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2690 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2691 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2692 MVPP2_PRS_IPV4_HEAD_MASK |
2693 MVPP2_PRS_IPV4_IHL_MASK);
2694
2695 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2696 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2697 MVPP2_PRS_RI_L3_PROTO_MASK);
2698 /* Skip eth_type + 4 bytes of IP header */
2699 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2700 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2701 /* Set L3 offset */
2702 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2703 MVPP2_ETH_TYPE_LEN,
2704 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2705
2706 /* Update shadow table and hw entry */
2707 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2708 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2709 priv->prs_shadow[pe.index].finish = false;
2710 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2711 MVPP2_PRS_RI_L3_PROTO_MASK);
2712 mvpp2_prs_hw_write(priv, &pe);
2713
2714 /* Ethertype: IPv4 with options */
2715 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2716 MVPP2_PE_LAST_FREE_TID);
2717 if (tid < 0)
2718 return tid;
2719
2720 pe.index = tid;
2721
2722 /* Clear tcam data before updating */
2723 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2724 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2725
2726 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2727 MVPP2_PRS_IPV4_HEAD,
2728 MVPP2_PRS_IPV4_HEAD_MASK);
2729
2730 /* Clear ri before updating */
2731 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2732 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2733 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2734 MVPP2_PRS_RI_L3_PROTO_MASK);
2735
2736 /* Update shadow table and hw entry */
2737 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2738 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2739 priv->prs_shadow[pe.index].finish = false;
2740 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2741 MVPP2_PRS_RI_L3_PROTO_MASK);
2742 mvpp2_prs_hw_write(priv, &pe);
2743
2744 /* Ethertype: IPv6 without options */
2745 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2746 MVPP2_PE_LAST_FREE_TID);
2747 if (tid < 0)
2748 return tid;
2749
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002750 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002751 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2752 pe.index = tid;
2753
2754 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2755
2756 /* Skip DIP of IPV6 header */
2757 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2758 MVPP2_MAX_L3_ADDR_SIZE,
2759 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2760 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2761 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2762 MVPP2_PRS_RI_L3_PROTO_MASK);
2763 /* Set L3 offset */
2764 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2765 MVPP2_ETH_TYPE_LEN,
2766 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2767
2768 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2769 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2770 priv->prs_shadow[pe.index].finish = false;
2771 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2772 MVPP2_PRS_RI_L3_PROTO_MASK);
2773 mvpp2_prs_hw_write(priv, &pe);
2774
2775 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2776 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2777 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2778 pe.index = MVPP2_PE_ETH_TYPE_UN;
2779
2780 /* Unmask all ports */
2781 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2782
2783 /* Generate flow in the next iteration*/
2784 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2785 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2786 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2787 MVPP2_PRS_RI_L3_PROTO_MASK);
2788 /* Set L3 offset even it's unknown L3 */
2789 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2790 MVPP2_ETH_TYPE_LEN,
2791 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2792
2793 /* Update shadow table and hw entry */
2794 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2795 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2796 priv->prs_shadow[pe.index].finish = true;
2797 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2798 MVPP2_PRS_RI_L3_PROTO_MASK);
2799 mvpp2_prs_hw_write(priv, &pe);
2800
2801 return 0;
2802}
2803
2804/* Configure vlan entries and detect up to 2 successive VLAN tags.
2805 * Possible options:
2806 * 0x8100, 0x88A8
2807 * 0x8100, 0x8100
2808 * 0x8100
2809 * 0x88A8
2810 */
2811static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2812{
2813 struct mvpp2_prs_entry pe;
2814 int err;
2815
2816 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2817 MVPP2_PRS_DBL_VLANS_MAX,
2818 GFP_KERNEL);
2819 if (!priv->prs_double_vlans)
2820 return -ENOMEM;
2821
2822 /* Double VLAN: 0x8100, 0x88A8 */
2823 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2824 MVPP2_PRS_PORT_MASK);
2825 if (err)
2826 return err;
2827
2828 /* Double VLAN: 0x8100, 0x8100 */
2829 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2830 MVPP2_PRS_PORT_MASK);
2831 if (err)
2832 return err;
2833
2834 /* Single VLAN: 0x88a8 */
2835 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2836 MVPP2_PRS_PORT_MASK);
2837 if (err)
2838 return err;
2839
2840 /* Single VLAN: 0x8100 */
2841 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2842 MVPP2_PRS_PORT_MASK);
2843 if (err)
2844 return err;
2845
2846 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002847 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002848 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2849 pe.index = MVPP2_PE_VLAN_DBL;
2850
2851 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2852 /* Clear ai for next iterations */
2853 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2854 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2855 MVPP2_PRS_RI_VLAN_MASK);
2856
2857 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2858 MVPP2_PRS_DBL_VLAN_AI_BIT);
2859 /* Unmask all ports */
2860 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2861
2862 /* Update shadow table and hw entry */
2863 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2864 mvpp2_prs_hw_write(priv, &pe);
2865
2866 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002867 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002868 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2869 pe.index = MVPP2_PE_VLAN_NONE;
2870
2871 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2872 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2873 MVPP2_PRS_RI_VLAN_MASK);
2874
2875 /* Unmask all ports */
2876 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2877
2878 /* Update shadow table and hw entry */
2879 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2880 mvpp2_prs_hw_write(priv, &pe);
2881
2882 return 0;
2883}
2884
2885/* Set entries for PPPoE ethertype */
2886static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2887{
2888 struct mvpp2_prs_entry pe;
2889 int tid;
2890
2891 /* IPv4 over PPPoE with options */
2892 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2893 MVPP2_PE_LAST_FREE_TID);
2894 if (tid < 0)
2895 return tid;
2896
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002897 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002898 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2899 pe.index = tid;
2900
2901 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2902
2903 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2904 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2905 MVPP2_PRS_RI_L3_PROTO_MASK);
2906 /* Skip eth_type + 4 bytes of IP header */
2907 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2908 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2909 /* Set L3 offset */
2910 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2911 MVPP2_ETH_TYPE_LEN,
2912 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2913
2914 /* Update shadow table and hw entry */
2915 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2916 mvpp2_prs_hw_write(priv, &pe);
2917
2918 /* IPv4 over PPPoE without options */
2919 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2920 MVPP2_PE_LAST_FREE_TID);
2921 if (tid < 0)
2922 return tid;
2923
2924 pe.index = tid;
2925
2926 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2927 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2928 MVPP2_PRS_IPV4_HEAD_MASK |
2929 MVPP2_PRS_IPV4_IHL_MASK);
2930
2931 /* Clear ri before updating */
2932 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2933 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2934 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2935 MVPP2_PRS_RI_L3_PROTO_MASK);
2936
2937 /* Update shadow table and hw entry */
2938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2939 mvpp2_prs_hw_write(priv, &pe);
2940
2941 /* IPv6 over PPPoE */
2942 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2943 MVPP2_PE_LAST_FREE_TID);
2944 if (tid < 0)
2945 return tid;
2946
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002947 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002948 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2949 pe.index = tid;
2950
2951 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2952
2953 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2954 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2955 MVPP2_PRS_RI_L3_PROTO_MASK);
2956 /* Skip eth_type + 4 bytes of IPv6 header */
2957 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2958 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2959 /* Set L3 offset */
2960 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2961 MVPP2_ETH_TYPE_LEN,
2962 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2963
2964 /* Update shadow table and hw entry */
2965 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2966 mvpp2_prs_hw_write(priv, &pe);
2967
2968 /* Non-IP over PPPoE */
2969 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2970 MVPP2_PE_LAST_FREE_TID);
2971 if (tid < 0)
2972 return tid;
2973
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002974 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002975 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2976 pe.index = tid;
2977
2978 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2979 MVPP2_PRS_RI_L3_PROTO_MASK);
2980
2981 /* Finished: go to flowid generation */
2982 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2983 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2984 /* Set L3 offset even if it's unknown L3 */
2985 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2986 MVPP2_ETH_TYPE_LEN,
2987 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2988
2989 /* Update shadow table and hw entry */
2990 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2991 mvpp2_prs_hw_write(priv, &pe);
2992
2993 return 0;
2994}
2995
2996/* Initialize entries for IPv4 */
2997static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
2998{
2999 struct mvpp2_prs_entry pe;
3000 int err;
3001
3002 /* Set entries for TCP, UDP and IGMP over IPv4 */
3003 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3004 MVPP2_PRS_RI_L4_PROTO_MASK);
3005 if (err)
3006 return err;
3007
3008 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3009 MVPP2_PRS_RI_L4_PROTO_MASK);
3010 if (err)
3011 return err;
3012
3013 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3014 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3015 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3016 MVPP2_PRS_RI_CPU_CODE_MASK |
3017 MVPP2_PRS_RI_UDF3_MASK);
3018 if (err)
3019 return err;
3020
3021 /* IPv4 Broadcast */
3022 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3023 if (err)
3024 return err;
3025
3026 /* IPv4 Multicast */
3027 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3028 if (err)
3029 return err;
3030
3031 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003032 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003033 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3034 pe.index = MVPP2_PE_IP4_PROTO_UN;
3035
3036 /* Set next lu to IPv4 */
3037 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3038 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3039 /* Set L4 offset */
3040 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3041 sizeof(struct iphdr) - 4,
3042 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3043 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3044 MVPP2_PRS_IPV4_DIP_AI_BIT);
3045 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3046 MVPP2_PRS_RI_L4_PROTO_MASK);
3047
3048 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3049 /* Unmask all ports */
3050 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3051
3052 /* Update shadow table and hw entry */
3053 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3054 mvpp2_prs_hw_write(priv, &pe);
3055
3056 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003057 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003058 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3059 pe.index = MVPP2_PE_IP4_ADDR_UN;
3060
3061 /* Finished: go to flowid generation */
3062 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3063 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3064 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3065 MVPP2_PRS_RI_L3_ADDR_MASK);
3066
3067 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3068 MVPP2_PRS_IPV4_DIP_AI_BIT);
3069 /* Unmask all ports */
3070 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3071
3072 /* Update shadow table and hw entry */
3073 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3074 mvpp2_prs_hw_write(priv, &pe);
3075
3076 return 0;
3077}
3078
3079/* Initialize entries for IPv6 */
3080static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3081{
3082 struct mvpp2_prs_entry pe;
3083 int tid, err;
3084
3085 /* Set entries for TCP, UDP and ICMP over IPv6 */
3086 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3087 MVPP2_PRS_RI_L4_TCP,
3088 MVPP2_PRS_RI_L4_PROTO_MASK);
3089 if (err)
3090 return err;
3091
3092 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3093 MVPP2_PRS_RI_L4_UDP,
3094 MVPP2_PRS_RI_L4_PROTO_MASK);
3095 if (err)
3096 return err;
3097
3098 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3099 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3100 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3101 MVPP2_PRS_RI_CPU_CODE_MASK |
3102 MVPP2_PRS_RI_UDF3_MASK);
3103 if (err)
3104 return err;
3105
3106 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3107 /* Result Info: UDF7=1, DS lite */
3108 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3109 MVPP2_PRS_RI_UDF7_IP6_LITE,
3110 MVPP2_PRS_RI_UDF7_MASK);
3111 if (err)
3112 return err;
3113
3114 /* IPv6 multicast */
3115 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3116 if (err)
3117 return err;
3118
3119 /* Entry for checking hop limit */
3120 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3121 MVPP2_PE_LAST_FREE_TID);
3122 if (tid < 0)
3123 return tid;
3124
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003125 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003126 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3127 pe.index = tid;
3128
3129 /* Finished: go to flowid generation */
3130 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3131 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3132 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3133 MVPP2_PRS_RI_DROP_MASK,
3134 MVPP2_PRS_RI_L3_PROTO_MASK |
3135 MVPP2_PRS_RI_DROP_MASK);
3136
3137 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3138 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3139 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3140
3141 /* Update shadow table and hw entry */
3142 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3143 mvpp2_prs_hw_write(priv, &pe);
3144
3145 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003146 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003147 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3148 pe.index = MVPP2_PE_IP6_PROTO_UN;
3149
3150 /* Finished: go to flowid generation */
3151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3152 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3153 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3154 MVPP2_PRS_RI_L4_PROTO_MASK);
3155 /* Set L4 offset relatively to our current place */
3156 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3157 sizeof(struct ipv6hdr) - 4,
3158 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3159
3160 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3161 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3162 /* Unmask all ports */
3163 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3164
3165 /* Update shadow table and hw entry */
3166 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3167 mvpp2_prs_hw_write(priv, &pe);
3168
3169 /* Default IPv6 entry for unknown ext protocols */
3170 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3171 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3172 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3173
3174 /* Finished: go to flowid generation */
3175 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3176 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3177 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3178 MVPP2_PRS_RI_L4_PROTO_MASK);
3179
3180 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3181 MVPP2_PRS_IPV6_EXT_AI_BIT);
3182 /* Unmask all ports */
3183 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3184
3185 /* Update shadow table and hw entry */
3186 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3187 mvpp2_prs_hw_write(priv, &pe);
3188
3189 /* Default IPv6 entry for unicast address */
3190 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3191 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3192 pe.index = MVPP2_PE_IP6_ADDR_UN;
3193
3194 /* Finished: go to IPv6 again */
3195 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3196 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3197 MVPP2_PRS_RI_L3_ADDR_MASK);
3198 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3199 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3200 /* Shift back to IPV6 NH */
3201 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3202
3203 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3204 /* Unmask all ports */
3205 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3206
3207 /* Update shadow table and hw entry */
3208 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3209 mvpp2_prs_hw_write(priv, &pe);
3210
3211 return 0;
3212}
3213
3214/* Parser default initialization */
3215static int mvpp2_prs_default_init(struct platform_device *pdev,
3216 struct mvpp2 *priv)
3217{
3218 int err, index, i;
3219
3220 /* Enable tcam table */
3221 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3222
3223 /* Clear all tcam and sram entries */
3224 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3225 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3226 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3227 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3228
3229 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3230 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3231 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3232 }
3233
3234 /* Invalidate all tcam entries */
3235 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3236 mvpp2_prs_hw_inv(priv, index);
3237
3238 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003239 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003240 GFP_KERNEL);
3241 if (!priv->prs_shadow)
3242 return -ENOMEM;
3243
3244 /* Always start from lookup = 0 */
3245 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3246 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3247 MVPP2_PRS_PORT_LU_MAX, 0);
3248
3249 mvpp2_prs_def_flow_init(priv);
3250
3251 mvpp2_prs_mh_init(priv);
3252
3253 mvpp2_prs_mac_init(priv);
3254
3255 mvpp2_prs_dsa_init(priv);
3256
3257 err = mvpp2_prs_etype_init(priv);
3258 if (err)
3259 return err;
3260
3261 err = mvpp2_prs_vlan_init(pdev, priv);
3262 if (err)
3263 return err;
3264
3265 err = mvpp2_prs_pppoe_init(priv);
3266 if (err)
3267 return err;
3268
3269 err = mvpp2_prs_ip6_init(priv);
3270 if (err)
3271 return err;
3272
3273 err = mvpp2_prs_ip4_init(priv);
3274 if (err)
3275 return err;
3276
3277 return 0;
3278}
3279
3280/* Compare MAC DA with tcam entry data */
3281static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3282 const u8 *da, unsigned char *mask)
3283{
3284 unsigned char tcam_byte, tcam_mask;
3285 int index;
3286
3287 for (index = 0; index < ETH_ALEN; index++) {
3288 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3289 if (tcam_mask != mask[index])
3290 return false;
3291
3292 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3293 return false;
3294 }
3295
3296 return true;
3297}
3298
3299/* Find tcam entry with matched pair <MAC DA, port> */
3300static struct mvpp2_prs_entry *
3301mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3302 unsigned char *mask, int udf_type)
3303{
3304 struct mvpp2_prs_entry *pe;
3305 int tid;
3306
3307 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3308 if (!pe)
3309 return NULL;
3310 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3311
3312 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3313 for (tid = MVPP2_PE_FIRST_FREE_TID;
3314 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3315 unsigned int entry_pmap;
3316
3317 if (!priv->prs_shadow[tid].valid ||
3318 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3319 (priv->prs_shadow[tid].udf != udf_type))
3320 continue;
3321
3322 pe->index = tid;
3323 mvpp2_prs_hw_read(priv, pe);
3324 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3325
3326 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3327 entry_pmap == pmap)
3328 return pe;
3329 }
3330 kfree(pe);
3331
3332 return NULL;
3333}
3334
3335/* Update parser's mac da entry */
3336static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3337 const u8 *da, bool add)
3338{
3339 struct mvpp2_prs_entry *pe;
3340 unsigned int pmap, len, ri;
3341 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3342 int tid;
3343
3344 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3345 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3346 MVPP2_PRS_UDF_MAC_DEF);
3347
3348 /* No such entry */
3349 if (!pe) {
3350 if (!add)
3351 return 0;
3352
3353 /* Create new TCAM entry */
3354 /* Find first range mac entry*/
3355 for (tid = MVPP2_PE_FIRST_FREE_TID;
3356 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3357 if (priv->prs_shadow[tid].valid &&
3358 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3359 (priv->prs_shadow[tid].udf ==
3360 MVPP2_PRS_UDF_MAC_RANGE))
3361 break;
3362
3363 /* Go through the all entries from first to last */
3364 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3365 tid - 1);
3366 if (tid < 0)
3367 return tid;
3368
3369 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3370 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303371 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003372 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3373 pe->index = tid;
3374
3375 /* Mask all ports */
3376 mvpp2_prs_tcam_port_map_set(pe, 0);
3377 }
3378
3379 /* Update port mask */
3380 mvpp2_prs_tcam_port_set(pe, port, add);
3381
3382 /* Invalidate the entry if no ports are left enabled */
3383 pmap = mvpp2_prs_tcam_port_map_get(pe);
3384 if (pmap == 0) {
3385 if (add) {
3386 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303387 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003388 }
3389 mvpp2_prs_hw_inv(priv, pe->index);
3390 priv->prs_shadow[pe->index].valid = false;
3391 kfree(pe);
3392 return 0;
3393 }
3394
3395 /* Continue - set next lookup */
3396 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3397
3398 /* Set match on DA */
3399 len = ETH_ALEN;
3400 while (len--)
3401 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3402
3403 /* Set result info bits */
3404 if (is_broadcast_ether_addr(da))
3405 ri = MVPP2_PRS_RI_L2_BCAST;
3406 else if (is_multicast_ether_addr(da))
3407 ri = MVPP2_PRS_RI_L2_MCAST;
3408 else
3409 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3410
3411 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3412 MVPP2_PRS_RI_MAC_ME_MASK);
3413 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3414 MVPP2_PRS_RI_MAC_ME_MASK);
3415
3416 /* Shift to ethertype */
3417 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3418 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3419
3420 /* Update shadow table and hw entry */
3421 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3422 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3423 mvpp2_prs_hw_write(priv, pe);
3424
3425 kfree(pe);
3426
3427 return 0;
3428}
3429
3430static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3431{
3432 struct mvpp2_port *port = netdev_priv(dev);
3433 int err;
3434
3435 /* Remove old parser entry */
3436 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3437 false);
3438 if (err)
3439 return err;
3440
3441 /* Add new parser entry */
3442 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3443 if (err)
3444 return err;
3445
3446 /* Set addr in the device */
3447 ether_addr_copy(dev->dev_addr, da);
3448
3449 return 0;
3450}
3451
3452/* Delete all port's multicast simple (not range) entries */
3453static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3454{
3455 struct mvpp2_prs_entry pe;
3456 int index, tid;
3457
3458 for (tid = MVPP2_PE_FIRST_FREE_TID;
3459 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3460 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3461
3462 if (!priv->prs_shadow[tid].valid ||
3463 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3464 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3465 continue;
3466
3467 /* Only simple mac entries */
3468 pe.index = tid;
3469 mvpp2_prs_hw_read(priv, &pe);
3470
3471 /* Read mac addr from entry */
3472 for (index = 0; index < ETH_ALEN; index++)
3473 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3474 &da_mask[index]);
3475
3476 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3477 /* Delete this entry */
3478 mvpp2_prs_mac_da_accept(priv, port, da, false);
3479 }
3480}
3481
3482static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3483{
3484 switch (type) {
3485 case MVPP2_TAG_TYPE_EDSA:
3486 /* Add port to EDSA entries */
3487 mvpp2_prs_dsa_tag_set(priv, port, true,
3488 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3489 mvpp2_prs_dsa_tag_set(priv, port, true,
3490 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3491 /* Remove port from DSA entries */
3492 mvpp2_prs_dsa_tag_set(priv, port, false,
3493 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3494 mvpp2_prs_dsa_tag_set(priv, port, false,
3495 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3496 break;
3497
3498 case MVPP2_TAG_TYPE_DSA:
3499 /* Add port to DSA entries */
3500 mvpp2_prs_dsa_tag_set(priv, port, true,
3501 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3502 mvpp2_prs_dsa_tag_set(priv, port, true,
3503 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3504 /* Remove port from EDSA entries */
3505 mvpp2_prs_dsa_tag_set(priv, port, false,
3506 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3507 mvpp2_prs_dsa_tag_set(priv, port, false,
3508 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3509 break;
3510
3511 case MVPP2_TAG_TYPE_MH:
3512 case MVPP2_TAG_TYPE_NONE:
3513 /* Remove port form EDSA and DSA entries */
3514 mvpp2_prs_dsa_tag_set(priv, port, false,
3515 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3516 mvpp2_prs_dsa_tag_set(priv, port, false,
3517 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3518 mvpp2_prs_dsa_tag_set(priv, port, false,
3519 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3520 mvpp2_prs_dsa_tag_set(priv, port, false,
3521 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3522 break;
3523
3524 default:
3525 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3526 return -EINVAL;
3527 }
3528
3529 return 0;
3530}
3531
3532/* Set prs flow for the port */
3533static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3534{
3535 struct mvpp2_prs_entry *pe;
3536 int tid;
3537
3538 pe = mvpp2_prs_flow_find(port->priv, port->id);
3539
3540 /* Such entry not exist */
3541 if (!pe) {
3542 /* Go through the all entires from last to first */
3543 tid = mvpp2_prs_tcam_first_free(port->priv,
3544 MVPP2_PE_LAST_FREE_TID,
3545 MVPP2_PE_FIRST_FREE_TID);
3546 if (tid < 0)
3547 return tid;
3548
3549 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3550 if (!pe)
3551 return -ENOMEM;
3552
3553 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3554 pe->index = tid;
3555
3556 /* Set flow ID*/
3557 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3558 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3559
3560 /* Update shadow table */
3561 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3562 }
3563
3564 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3565 mvpp2_prs_hw_write(port->priv, pe);
3566 kfree(pe);
3567
3568 return 0;
3569}
3570
3571/* Classifier configuration routines */
3572
3573/* Update classification flow table registers */
3574static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3575 struct mvpp2_cls_flow_entry *fe)
3576{
3577 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3578 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3579 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3580 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3581}
3582
3583/* Update classification lookup table register */
3584static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3585 struct mvpp2_cls_lookup_entry *le)
3586{
3587 u32 val;
3588
3589 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3590 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3591 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3592}
3593
3594/* Classifier default initialization */
3595static void mvpp2_cls_init(struct mvpp2 *priv)
3596{
3597 struct mvpp2_cls_lookup_entry le;
3598 struct mvpp2_cls_flow_entry fe;
3599 int index;
3600
3601 /* Enable classifier */
3602 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3603
3604 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01003605 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003606 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3607 fe.index = index;
3608 mvpp2_cls_flow_write(priv, &fe);
3609 }
3610
3611 /* Clear classifier lookup table */
3612 le.data = 0;
3613 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3614 le.lkpid = index;
3615 le.way = 0;
3616 mvpp2_cls_lookup_write(priv, &le);
3617
3618 le.way = 1;
3619 mvpp2_cls_lookup_write(priv, &le);
3620 }
3621}
3622
3623static void mvpp2_cls_port_config(struct mvpp2_port *port)
3624{
3625 struct mvpp2_cls_lookup_entry le;
3626 u32 val;
3627
3628 /* Set way for the port */
3629 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3630 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3631 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3632
3633 /* Pick the entry to be accessed in lookup ID decoding table
3634 * according to the way and lkpid.
3635 */
3636 le.lkpid = port->id;
3637 le.way = 0;
3638 le.data = 0;
3639
3640 /* Set initial CPU queue for receiving packets */
3641 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3642 le.data |= port->first_rxq;
3643
3644 /* Disable classification engines */
3645 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3646
3647 /* Update lookup ID table entry */
3648 mvpp2_cls_lookup_write(port->priv, &le);
3649}
3650
3651/* Set CPU queue number for oversize packets */
3652static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3653{
3654 u32 val;
3655
3656 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3657 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3658
3659 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3660 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3661
3662 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3663 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3664 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3665}
3666
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003667static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3668{
3669 if (likely(pool->frag_size <= PAGE_SIZE))
3670 return netdev_alloc_frag(pool->frag_size);
3671 else
3672 return kmalloc(pool->frag_size, GFP_ATOMIC);
3673}
3674
3675static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3676{
3677 if (likely(pool->frag_size <= PAGE_SIZE))
3678 skb_free_frag(data);
3679 else
3680 kfree(data);
3681}
3682
Marcin Wojtas3f518502014-07-10 16:52:13 -03003683/* Buffer Manager configuration routines */
3684
3685/* Create pool */
3686static int mvpp2_bm_pool_create(struct platform_device *pdev,
3687 struct mvpp2 *priv,
3688 struct mvpp2_bm_pool *bm_pool, int size)
3689{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003690 u32 val;
3691
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003692 /* Number of buffer pointers must be a multiple of 16, as per
3693 * hardware constraints
3694 */
3695 if (!IS_ALIGNED(size, 16))
3696 return -EINVAL;
3697
3698 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3699 * bytes per buffer pointer
3700 */
3701 if (priv->hw_version == MVPP21)
3702 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3703 else
3704 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3705
3706 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003707 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003708 GFP_KERNEL);
3709 if (!bm_pool->virt_addr)
3710 return -ENOMEM;
3711
Thomas Petazzonid3158802017-02-21 11:28:13 +01003712 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3713 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003714 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3715 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003716 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3717 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3718 return -ENOMEM;
3719 }
3720
3721 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003722 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003723 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3724
3725 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3726 val |= MVPP2_BM_START_MASK;
3727 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3728
3729 bm_pool->type = MVPP2_BM_FREE;
3730 bm_pool->size = size;
3731 bm_pool->pkt_size = 0;
3732 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003733
3734 return 0;
3735}
3736
3737/* Set pool buffer size */
3738static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3739 struct mvpp2_bm_pool *bm_pool,
3740 int buf_size)
3741{
3742 u32 val;
3743
3744 bm_pool->buf_size = buf_size;
3745
3746 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3747 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3748}
3749
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003750static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3751 struct mvpp2_bm_pool *bm_pool,
3752 dma_addr_t *dma_addr,
3753 phys_addr_t *phys_addr)
3754{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003755 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01003756
3757 *dma_addr = mvpp2_percpu_read(priv, cpu,
3758 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3759 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003760
3761 if (priv->hw_version == MVPP22) {
3762 u32 val;
3763 u32 dma_addr_highbits, phys_addr_highbits;
3764
Thomas Petazzonia7868412017-03-07 16:53:13 +01003765 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003766 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3767 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3768 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3769
3770 if (sizeof(dma_addr_t) == 8)
3771 *dma_addr |= (u64)dma_addr_highbits << 32;
3772
3773 if (sizeof(phys_addr_t) == 8)
3774 *phys_addr |= (u64)phys_addr_highbits << 32;
3775 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003776
3777 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003778}
3779
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003780/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003781static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3782 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003783{
3784 int i;
3785
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003786 for (i = 0; i < bm_pool->buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01003787 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003788 phys_addr_t buf_phys_addr;
3789 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003790
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003791 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3792 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01003793
Thomas Petazzoni20396132017-03-07 16:53:00 +01003794 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01003795 bm_pool->buf_size, DMA_FROM_DEVICE);
3796
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003797 data = (void *)phys_to_virt(buf_phys_addr);
3798 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003799 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003800
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003801 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003802 }
3803
3804 /* Update BM driver with number of buffers removed from pool */
3805 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003806}
3807
3808/* Cleanup pool */
3809static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3810 struct mvpp2 *priv,
3811 struct mvpp2_bm_pool *bm_pool)
3812{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003813 u32 val;
3814
Marcin Wojtas4229d502015-12-03 15:20:50 +01003815 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03003816 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003817 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3818 return 0;
3819 }
3820
3821 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3822 val |= MVPP2_BM_STOP_MASK;
3823 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3824
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003825 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003826 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003827 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003828 return 0;
3829}
3830
3831static int mvpp2_bm_pools_init(struct platform_device *pdev,
3832 struct mvpp2 *priv)
3833{
3834 int i, err, size;
3835 struct mvpp2_bm_pool *bm_pool;
3836
3837 /* Create all pools with maximum size */
3838 size = MVPP2_BM_POOL_SIZE_MAX;
3839 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3840 bm_pool = &priv->bm_pools[i];
3841 bm_pool->id = i;
3842 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3843 if (err)
3844 goto err_unroll_pools;
3845 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3846 }
3847 return 0;
3848
3849err_unroll_pools:
3850 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3851 for (i = i - 1; i >= 0; i--)
3852 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3853 return err;
3854}
3855
3856static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3857{
3858 int i, err;
3859
3860 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3861 /* Mask BM all interrupts */
3862 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3863 /* Clear BM cause register */
3864 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3865 }
3866
3867 /* Allocate and initialize BM pools */
3868 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02003869 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003870 if (!priv->bm_pools)
3871 return -ENOMEM;
3872
3873 err = mvpp2_bm_pools_init(pdev, priv);
3874 if (err < 0)
3875 return err;
3876 return 0;
3877}
3878
3879/* Attach long pool to rxq */
3880static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3881 int lrxq, int long_pool)
3882{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003883 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003884 int prxq;
3885
3886 /* Get queue physical ID */
3887 prxq = port->rxqs[lrxq]->id;
3888
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003889 if (port->priv->hw_version == MVPP21)
3890 mask = MVPP21_RXQ_POOL_LONG_MASK;
3891 else
3892 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003893
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003894 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3895 val &= ~mask;
3896 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003897 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3898}
3899
3900/* Attach short pool to rxq */
3901static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3902 int lrxq, int short_pool)
3903{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003904 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003905 int prxq;
3906
3907 /* Get queue physical ID */
3908 prxq = port->rxqs[lrxq]->id;
3909
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003910 if (port->priv->hw_version == MVPP21)
3911 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3912 else
3913 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003914
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003915 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3916 val &= ~mask;
3917 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003918 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3919}
3920
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003921static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3922 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003923 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003924 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003925 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003926{
Thomas Petazzoni20396132017-03-07 16:53:00 +01003927 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003928 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003929
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003930 data = mvpp2_frag_alloc(bm_pool);
3931 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003932 return NULL;
3933
Thomas Petazzoni20396132017-03-07 16:53:00 +01003934 dma_addr = dma_map_single(port->dev->dev.parent, data,
3935 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3936 DMA_FROM_DEVICE);
3937 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003938 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003939 return NULL;
3940 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01003941 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003942 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003943
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003944 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003945}
3946
Marcin Wojtas3f518502014-07-10 16:52:13 -03003947/* Release buffer to BM */
3948static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003949 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003950 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003951{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003952 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01003953
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003954 if (port->priv->hw_version == MVPP22) {
3955 u32 val = 0;
3956
3957 if (sizeof(dma_addr_t) == 8)
3958 val |= upper_32_bits(buf_dma_addr) &
3959 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3960
3961 if (sizeof(phys_addr_t) == 8)
3962 val |= (upper_32_bits(buf_phys_addr)
3963 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3964 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3965
Thomas Petazzonia7868412017-03-07 16:53:13 +01003966 mvpp2_percpu_write(port->priv, cpu,
3967 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003968 }
3969
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003970 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3971 * returned in the "cookie" field of the RX
3972 * descriptor. Instead of storing the virtual address, we
3973 * store the physical address
3974 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01003975 mvpp2_percpu_write(port->priv, cpu,
3976 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3977 mvpp2_percpu_write(port->priv, cpu,
3978 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003979
3980 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03003981}
3982
Marcin Wojtas3f518502014-07-10 16:52:13 -03003983/* Allocate buffers for the pool */
3984static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3985 struct mvpp2_bm_pool *bm_pool, int buf_num)
3986{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003987 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01003988 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003989 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003990 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003991
3992 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3993 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
3994
3995 if (buf_num < 0 ||
3996 (buf_num + bm_pool->buf_num > bm_pool->size)) {
3997 netdev_err(port->dev,
3998 "cannot allocate %d buffers for pool %d\n",
3999 buf_num, bm_pool->id);
4000 return 0;
4001 }
4002
Marcin Wojtas3f518502014-07-10 16:52:13 -03004003 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004004 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4005 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004006 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004007 break;
4008
Thomas Petazzoni20396132017-03-07 16:53:00 +01004009 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004010 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004011 }
4012
4013 /* Update BM driver with number of buffers added to pool */
4014 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004015
4016 netdev_dbg(port->dev,
4017 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4018 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4019 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4020
4021 netdev_dbg(port->dev,
4022 "%s pool %d: %d of %d buffers added\n",
4023 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4024 bm_pool->id, i, buf_num);
4025 return i;
4026}
4027
4028/* Notify the driver that BM pool is being used as specific type and return the
4029 * pool pointer on success
4030 */
4031static struct mvpp2_bm_pool *
4032mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4033 int pkt_size)
4034{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004035 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4036 int num;
4037
4038 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4039 netdev_err(port->dev, "mixing pool types is forbidden\n");
4040 return NULL;
4041 }
4042
Marcin Wojtas3f518502014-07-10 16:52:13 -03004043 if (new_pool->type == MVPP2_BM_FREE)
4044 new_pool->type = type;
4045
4046 /* Allocate buffers in case BM pool is used as long pool, but packet
4047 * size doesn't match MTU or BM pool hasn't being used yet
4048 */
4049 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4050 (new_pool->pkt_size == 0)) {
4051 int pkts_num;
4052
4053 /* Set default buffer number or free all the buffers in case
4054 * the pool is not empty
4055 */
4056 pkts_num = new_pool->buf_num;
4057 if (pkts_num == 0)
4058 pkts_num = type == MVPP2_BM_SWF_LONG ?
4059 MVPP2_BM_LONG_BUF_NUM :
4060 MVPP2_BM_SHORT_BUF_NUM;
4061 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004062 mvpp2_bm_bufs_free(port->dev->dev.parent,
4063 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004064
4065 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004066 new_pool->frag_size =
4067 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4068 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004069
4070 /* Allocate buffers for this pool */
4071 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4072 if (num != pkts_num) {
4073 WARN(1, "pool %d: %d of %d allocated\n",
4074 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004075 return NULL;
4076 }
4077 }
4078
4079 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4080 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4081
Marcin Wojtas3f518502014-07-10 16:52:13 -03004082 return new_pool;
4083}
4084
4085/* Initialize pools for swf */
4086static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4087{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004088 int rxq;
4089
4090 if (!port->pool_long) {
4091 port->pool_long =
4092 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4093 MVPP2_BM_SWF_LONG,
4094 port->pkt_size);
4095 if (!port->pool_long)
4096 return -ENOMEM;
4097
Marcin Wojtas3f518502014-07-10 16:52:13 -03004098 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004099
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004100 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004101 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4102 }
4103
4104 if (!port->pool_short) {
4105 port->pool_short =
4106 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4107 MVPP2_BM_SWF_SHORT,
4108 MVPP2_BM_SHORT_PKT_SIZE);
4109 if (!port->pool_short)
4110 return -ENOMEM;
4111
Marcin Wojtas3f518502014-07-10 16:52:13 -03004112 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004113
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004114 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004115 mvpp2_rxq_short_pool_set(port, rxq,
4116 port->pool_short->id);
4117 }
4118
4119 return 0;
4120}
4121
4122static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4123{
4124 struct mvpp2_port *port = netdev_priv(dev);
4125 struct mvpp2_bm_pool *port_pool = port->pool_long;
4126 int num, pkts_num = port_pool->buf_num;
4127 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4128
4129 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004130 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004131 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004132 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4133 return -EIO;
4134 }
4135
4136 port_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004137 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4138 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004139 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4140 if (num != pkts_num) {
4141 WARN(1, "pool %d: %d of %d allocated\n",
4142 port_pool->id, num, pkts_num);
4143 return -EIO;
4144 }
4145
4146 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4147 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4148 dev->mtu = mtu;
4149 netdev_update_features(dev);
4150 return 0;
4151}
4152
4153static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4154{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004155 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004156
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004157 for (i = 0; i < port->nqvecs; i++)
4158 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4159
Marcin Wojtas3f518502014-07-10 16:52:13 -03004160 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004161 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004162}
4163
4164static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4165{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004166 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004167
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004168 for (i = 0; i < port->nqvecs; i++)
4169 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4170
Marcin Wojtas3f518502014-07-10 16:52:13 -03004171 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004172 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4173}
4174
4175static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4176{
4177 struct mvpp2_port *port = qvec->port;
4178
4179 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4180 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4181}
4182
4183static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4184{
4185 struct mvpp2_port *port = qvec->port;
4186
4187 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4188 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004189}
4190
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004191/* Mask the current CPU's Rx/Tx interrupts
4192 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4193 * using smp_processor_id() is OK.
4194 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004195static void mvpp2_interrupts_mask(void *arg)
4196{
4197 struct mvpp2_port *port = arg;
4198
Thomas Petazzonia7868412017-03-07 16:53:13 +01004199 mvpp2_percpu_write(port->priv, smp_processor_id(),
4200 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004201}
4202
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004203/* Unmask the current CPU's Rx/Tx interrupts.
4204 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4205 * using smp_processor_id() is OK.
4206 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004207static void mvpp2_interrupts_unmask(void *arg)
4208{
4209 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004210 u32 val;
4211
4212 val = MVPP2_CAUSE_MISC_SUM_MASK |
4213 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4214 if (port->has_tx_irqs)
4215 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004216
Thomas Petazzonia7868412017-03-07 16:53:13 +01004217 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004218 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4219}
4220
4221static void
4222mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4223{
4224 u32 val;
4225 int i;
4226
4227 if (port->priv->hw_version != MVPP22)
4228 return;
4229
4230 if (mask)
4231 val = 0;
4232 else
4233 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4234
4235 for (i = 0; i < port->nqvecs; i++) {
4236 struct mvpp2_queue_vector *v = port->qvecs + i;
4237
4238 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4239 continue;
4240
4241 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4242 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4243 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004244}
4245
4246/* Port configuration routines */
4247
Thomas Petazzoni26975822017-03-07 16:53:14 +01004248static void mvpp22_port_mii_set(struct mvpp2_port *port)
4249{
4250 u32 val;
4251
Thomas Petazzoni26975822017-03-07 16:53:14 +01004252 /* Only GOP port 0 has an XLG MAC */
4253 if (port->gop_id == 0) {
4254 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4255 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02004256
4257 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4258 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4259 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4260 else
4261 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4262
Thomas Petazzoni26975822017-03-07 16:53:14 +01004263 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4264 }
4265
4266 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4267 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII)
4268 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4269 else
4270 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4271 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4272 val |= MVPP22_CTRL4_SYNC_BYPASS;
4273 val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4274 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4275}
4276
Marcin Wojtas3f518502014-07-10 16:52:13 -03004277static void mvpp2_port_mii_set(struct mvpp2_port *port)
4278{
Marcin Wojtas08a23752014-07-21 13:48:12 -03004279 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004280
Thomas Petazzoni26975822017-03-07 16:53:14 +01004281 if (port->priv->hw_version == MVPP22)
4282 mvpp22_port_mii_set(port);
4283
Marcin Wojtas08a23752014-07-21 13:48:12 -03004284 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004285
Marcin Wojtas08a23752014-07-21 13:48:12 -03004286 switch (port->phy_interface) {
4287 case PHY_INTERFACE_MODE_SGMII:
4288 val |= MVPP2_GMAC_INBAND_AN_MASK;
4289 break;
4290 case PHY_INTERFACE_MODE_RGMII:
4291 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4292 default:
4293 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4294 }
4295
4296 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4297}
4298
4299static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4300{
4301 u32 val;
4302
4303 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4304 val |= MVPP2_GMAC_FC_ADV_EN;
4305 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004306}
4307
4308static void mvpp2_port_enable(struct mvpp2_port *port)
4309{
4310 u32 val;
4311
Antoine Ténart725757a2017-06-12 16:01:39 +02004312 /* Only GOP port 0 has an XLG MAC */
4313 if (port->gop_id == 0 &&
4314 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4315 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4316 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4317 val |= MVPP22_XLG_CTRL0_PORT_EN |
4318 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4319 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4320 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4321 } else {
4322 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4323 val |= MVPP2_GMAC_PORT_EN_MASK;
4324 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4325 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4326 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004327}
4328
4329static void mvpp2_port_disable(struct mvpp2_port *port)
4330{
4331 u32 val;
4332
Antoine Ténart725757a2017-06-12 16:01:39 +02004333 /* Only GOP port 0 has an XLG MAC */
4334 if (port->gop_id == 0 &&
4335 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4336 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4337 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4338 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4339 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4340 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4341 } else {
4342 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4343 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4344 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4345 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004346}
4347
4348/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4349static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4350{
4351 u32 val;
4352
4353 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4354 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4355 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4356}
4357
4358/* Configure loopback port */
4359static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4360{
4361 u32 val;
4362
4363 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4364
4365 if (port->speed == 1000)
4366 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4367 else
4368 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4369
4370 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4371 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4372 else
4373 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4374
4375 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4376}
4377
4378static void mvpp2_port_reset(struct mvpp2_port *port)
4379{
4380 u32 val;
4381
4382 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4383 ~MVPP2_GMAC_PORT_RESET_MASK;
4384 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4385
4386 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4387 MVPP2_GMAC_PORT_RESET_MASK)
4388 continue;
4389}
4390
4391/* Change maximum receive size of the port */
4392static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4393{
4394 u32 val;
4395
4396 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4397 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4398 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4399 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4400 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4401}
4402
4403/* Set defaults to the MVPP2 port */
4404static void mvpp2_defaults_set(struct mvpp2_port *port)
4405{
4406 int tx_port_num, val, queue, ptxq, lrxq;
4407
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004408 if (port->priv->hw_version == MVPP21) {
4409 /* Configure port to loopback if needed */
4410 if (port->flags & MVPP2_F_LOOPBACK)
4411 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004412
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004413 /* Update TX FIFO MIN Threshold */
4414 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4415 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4416 /* Min. TX threshold must be less than minimal packet length */
4417 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4418 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4419 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004420
4421 /* Disable Legacy WRR, Disable EJP, Release from reset */
4422 tx_port_num = mvpp2_egress_port(port);
4423 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4424 tx_port_num);
4425 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4426
4427 /* Close bandwidth for all queues */
4428 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4429 ptxq = mvpp2_txq_phys(port->id, queue);
4430 mvpp2_write(port->priv,
4431 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4432 }
4433
4434 /* Set refill period to 1 usec, refill tokens
4435 * and bucket size to maximum
4436 */
4437 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4438 port->priv->tclk / USEC_PER_SEC);
4439 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4440 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4441 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4442 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4443 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4444 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4445 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4446
4447 /* Set MaximumLowLatencyPacketSize value to 256 */
4448 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4449 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4450 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4451
4452 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004453 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004454 queue = port->rxqs[lrxq]->id;
4455 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4456 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4457 MVPP2_SNOOP_BUF_HDR_MASK;
4458 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4459 }
4460
4461 /* At default, mask all interrupts to all present cpus */
4462 mvpp2_interrupts_disable(port);
4463}
4464
4465/* Enable/disable receiving packets */
4466static void mvpp2_ingress_enable(struct mvpp2_port *port)
4467{
4468 u32 val;
4469 int lrxq, queue;
4470
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004471 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004472 queue = port->rxqs[lrxq]->id;
4473 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4474 val &= ~MVPP2_RXQ_DISABLE_MASK;
4475 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4476 }
4477}
4478
4479static void mvpp2_ingress_disable(struct mvpp2_port *port)
4480{
4481 u32 val;
4482 int lrxq, queue;
4483
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004484 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004485 queue = port->rxqs[lrxq]->id;
4486 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4487 val |= MVPP2_RXQ_DISABLE_MASK;
4488 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4489 }
4490}
4491
4492/* Enable transmit via physical egress queue
4493 * - HW starts take descriptors from DRAM
4494 */
4495static void mvpp2_egress_enable(struct mvpp2_port *port)
4496{
4497 u32 qmap;
4498 int queue;
4499 int tx_port_num = mvpp2_egress_port(port);
4500
4501 /* Enable all initialized TXs. */
4502 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004503 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004504 struct mvpp2_tx_queue *txq = port->txqs[queue];
4505
Markus Elfringdbbb2f02017-04-17 14:07:52 +02004506 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004507 qmap |= (1 << queue);
4508 }
4509
4510 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4511 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4512}
4513
4514/* Disable transmit via physical egress queue
4515 * - HW doesn't take descriptors from DRAM
4516 */
4517static void mvpp2_egress_disable(struct mvpp2_port *port)
4518{
4519 u32 reg_data;
4520 int delay;
4521 int tx_port_num = mvpp2_egress_port(port);
4522
4523 /* Issue stop command for active channels only */
4524 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4525 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4526 MVPP2_TXP_SCHED_ENQ_MASK;
4527 if (reg_data != 0)
4528 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4529 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4530
4531 /* Wait for all Tx activity to terminate. */
4532 delay = 0;
4533 do {
4534 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4535 netdev_warn(port->dev,
4536 "Tx stop timed out, status=0x%08x\n",
4537 reg_data);
4538 break;
4539 }
4540 mdelay(1);
4541 delay++;
4542
4543 /* Check port TX Command register that all
4544 * Tx queues are stopped
4545 */
4546 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4547 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4548}
4549
4550/* Rx descriptors helper methods */
4551
4552/* Get number of Rx descriptors occupied by received packets */
4553static inline int
4554mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4555{
4556 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4557
4558 return val & MVPP2_RXQ_OCCUPIED_MASK;
4559}
4560
4561/* Update Rx queue status with the number of occupied and available
4562 * Rx descriptor slots.
4563 */
4564static inline void
4565mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4566 int used_count, int free_count)
4567{
4568 /* Decrement the number of used descriptors and increment count
4569 * increment the number of free descriptors.
4570 */
4571 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4572
4573 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4574}
4575
4576/* Get pointer to next RX descriptor to be processed by SW */
4577static inline struct mvpp2_rx_desc *
4578mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4579{
4580 int rx_desc = rxq->next_desc_to_proc;
4581
4582 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4583 prefetch(rxq->descs + rxq->next_desc_to_proc);
4584 return rxq->descs + rx_desc;
4585}
4586
4587/* Set rx queue offset */
4588static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4589 int prxq, int offset)
4590{
4591 u32 val;
4592
4593 /* Convert offset from bytes to units of 32 bytes */
4594 offset = offset >> 5;
4595
4596 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4597 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4598
4599 /* Offset is in */
4600 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4601 MVPP2_RXQ_PACKET_OFFSET_MASK);
4602
4603 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4604}
4605
Marcin Wojtas3f518502014-07-10 16:52:13 -03004606/* Tx descriptors helper methods */
4607
Marcin Wojtas3f518502014-07-10 16:52:13 -03004608/* Get pointer to next Tx descriptor to be processed (send) by HW */
4609static struct mvpp2_tx_desc *
4610mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4611{
4612 int tx_desc = txq->next_desc_to_proc;
4613
4614 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4615 return txq->descs + tx_desc;
4616}
4617
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004618/* Update HW with number of aggregated Tx descriptors to be sent
4619 *
4620 * Called only from mvpp2_tx(), so migration is disabled, using
4621 * smp_processor_id() is OK.
4622 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004623static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4624{
4625 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004626 mvpp2_percpu_write(port->priv, smp_processor_id(),
4627 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004628}
4629
4630
4631/* Check if there are enough free descriptors in aggregated txq.
4632 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004633 *
4634 * Called only from mvpp2_tx(), so migration is disabled, using
4635 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03004636 */
4637static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4638 struct mvpp2_tx_queue *aggr_txq, int num)
4639{
4640 if ((aggr_txq->count + num) > aggr_txq->size) {
4641 /* Update number of occupied aggregated Tx descriptors */
4642 int cpu = smp_processor_id();
4643 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4644
4645 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4646 }
4647
4648 if ((aggr_txq->count + num) > aggr_txq->size)
4649 return -ENOMEM;
4650
4651 return 0;
4652}
4653
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004654/* Reserved Tx descriptors allocation request
4655 *
4656 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
4657 * only by mvpp2_tx(), so migration is disabled, using
4658 * smp_processor_id() is OK.
4659 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004660static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4661 struct mvpp2_tx_queue *txq, int num)
4662{
4663 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004664 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004665
4666 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004667 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004668
Thomas Petazzonia7868412017-03-07 16:53:13 +01004669 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004670
4671 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4672}
4673
4674/* Check if there are enough reserved descriptors for transmission.
4675 * If not, request chunk of reserved descriptors and check again.
4676 */
4677static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4678 struct mvpp2_tx_queue *txq,
4679 struct mvpp2_txq_pcpu *txq_pcpu,
4680 int num)
4681{
4682 int req, cpu, desc_count;
4683
4684 if (txq_pcpu->reserved_num >= num)
4685 return 0;
4686
4687 /* Not enough descriptors reserved! Update the reserved descriptor
4688 * count and check again.
4689 */
4690
4691 desc_count = 0;
4692 /* Compute total of used descriptors */
4693 for_each_present_cpu(cpu) {
4694 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4695
4696 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4697 desc_count += txq_pcpu_aux->count;
4698 desc_count += txq_pcpu_aux->reserved_num;
4699 }
4700
4701 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4702 desc_count += req;
4703
4704 if (desc_count >
4705 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4706 return -ENOMEM;
4707
4708 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4709
4710 /* OK, the descriptor cound has been updated: check again. */
4711 if (txq_pcpu->reserved_num < num)
4712 return -ENOMEM;
4713 return 0;
4714}
4715
4716/* Release the last allocated Tx descriptor. Useful to handle DMA
4717 * mapping failures in the Tx path.
4718 */
4719static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4720{
4721 if (txq->next_desc_to_proc == 0)
4722 txq->next_desc_to_proc = txq->last_desc - 1;
4723 else
4724 txq->next_desc_to_proc--;
4725}
4726
4727/* Set Tx descriptors fields relevant for CSUM calculation */
4728static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4729 int ip_hdr_len, int l4_proto)
4730{
4731 u32 command;
4732
4733 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4734 * G_L4_chk, L4_type required only for checksum calculation
4735 */
4736 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4737 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4738 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4739
4740 if (l3_proto == swab16(ETH_P_IP)) {
4741 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4742 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4743 } else {
4744 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4745 }
4746
4747 if (l4_proto == IPPROTO_TCP) {
4748 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4749 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4750 } else if (l4_proto == IPPROTO_UDP) {
4751 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4752 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4753 } else {
4754 command |= MVPP2_TXD_L4_CSUM_NOT;
4755 }
4756
4757 return command;
4758}
4759
4760/* Get number of sent descriptors and decrement counter.
4761 * The number of sent descriptors is returned.
4762 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004763 *
4764 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
4765 * (migration disabled) and from the TX completion tasklet (migration
4766 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03004767 */
4768static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4769 struct mvpp2_tx_queue *txq)
4770{
4771 u32 val;
4772
4773 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004774 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4775 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004776
4777 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4778 MVPP2_TRANSMITTED_COUNT_OFFSET;
4779}
4780
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004781/* Called through on_each_cpu(), so runs on all CPUs, with migration
4782 * disabled, therefore using smp_processor_id() is OK.
4783 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004784static void mvpp2_txq_sent_counter_clear(void *arg)
4785{
4786 struct mvpp2_port *port = arg;
4787 int queue;
4788
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004789 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004790 int id = port->txqs[queue]->id;
4791
Thomas Petazzonia7868412017-03-07 16:53:13 +01004792 mvpp2_percpu_read(port->priv, smp_processor_id(),
4793 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004794 }
4795}
4796
4797/* Set max sizes for Tx queues */
4798static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4799{
4800 u32 val, size, mtu;
4801 int txq, tx_port_num;
4802
4803 mtu = port->pkt_size * 8;
4804 if (mtu > MVPP2_TXP_MTU_MAX)
4805 mtu = MVPP2_TXP_MTU_MAX;
4806
4807 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4808 mtu = 3 * mtu;
4809
4810 /* Indirect access to registers */
4811 tx_port_num = mvpp2_egress_port(port);
4812 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4813
4814 /* Set MTU */
4815 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4816 val &= ~MVPP2_TXP_MTU_MAX;
4817 val |= mtu;
4818 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4819
4820 /* TXP token size and all TXQs token size must be larger that MTU */
4821 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4822 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4823 if (size < mtu) {
4824 size = mtu;
4825 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4826 val |= size;
4827 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4828 }
4829
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004830 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004831 val = mvpp2_read(port->priv,
4832 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4833 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4834
4835 if (size < mtu) {
4836 size = mtu;
4837 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4838 val |= size;
4839 mvpp2_write(port->priv,
4840 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4841 val);
4842 }
4843 }
4844}
4845
4846/* Set the number of packets that will be received before Rx interrupt
4847 * will be generated by HW.
4848 */
4849static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004850 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004851{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004852 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004853
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01004854 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4855 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004856
Thomas Petazzonia7868412017-03-07 16:53:13 +01004857 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4858 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4859 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004860
4861 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004862}
4863
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004864/* For some reason in the LSP this is done on each CPU. Why ? */
4865static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
4866 struct mvpp2_tx_queue *txq)
4867{
4868 int cpu = get_cpu();
4869 u32 val;
4870
4871 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
4872 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
4873
4874 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
4875 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
4876 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
4877
4878 put_cpu();
4879}
4880
Thomas Petazzoniab426762017-02-21 11:28:04 +01004881static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4882{
4883 u64 tmp = (u64)clk_hz * usec;
4884
4885 do_div(tmp, USEC_PER_SEC);
4886
4887 return tmp > U32_MAX ? U32_MAX : tmp;
4888}
4889
4890static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4891{
4892 u64 tmp = (u64)cycles * USEC_PER_SEC;
4893
4894 do_div(tmp, clk_hz);
4895
4896 return tmp > U32_MAX ? U32_MAX : tmp;
4897}
4898
Marcin Wojtas3f518502014-07-10 16:52:13 -03004899/* Set the time delay in usec before Rx interrupt */
4900static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004901 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004902{
Thomas Petazzoniab426762017-02-21 11:28:04 +01004903 unsigned long freq = port->priv->tclk;
4904 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004905
Thomas Petazzoniab426762017-02-21 11:28:04 +01004906 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4907 rxq->time_coal =
4908 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4909
4910 /* re-evaluate to get actual register value */
4911 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4912 }
4913
Marcin Wojtas3f518502014-07-10 16:52:13 -03004914 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004915}
4916
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004917static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
4918{
4919 unsigned long freq = port->priv->tclk;
4920 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
4921
4922 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
4923 port->tx_time_coal =
4924 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
4925
4926 /* re-evaluate to get actual register value */
4927 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
4928 }
4929
4930 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
4931}
4932
Marcin Wojtas3f518502014-07-10 16:52:13 -03004933/* Free Tx queue skbuffs */
4934static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4935 struct mvpp2_tx_queue *txq,
4936 struct mvpp2_txq_pcpu *txq_pcpu, int num)
4937{
4938 int i;
4939
4940 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01004941 struct mvpp2_txq_pcpu_buf *tx_buf =
4942 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004943
Thomas Petazzoni20396132017-03-07 16:53:00 +01004944 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
Thomas Petazzoni83544912016-12-21 11:28:49 +01004945 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01004946 if (tx_buf->skb)
4947 dev_kfree_skb_any(tx_buf->skb);
4948
4949 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004950 }
4951}
4952
4953static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4954 u32 cause)
4955{
4956 int queue = fls(cause) - 1;
4957
4958 return port->rxqs[queue];
4959}
4960
4961static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4962 u32 cause)
4963{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004964 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004965
4966 return port->txqs[queue];
4967}
4968
4969/* Handle end of transmission */
4970static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4971 struct mvpp2_txq_pcpu *txq_pcpu)
4972{
4973 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
4974 int tx_done;
4975
4976 if (txq_pcpu->cpu != smp_processor_id())
4977 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
4978
4979 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
4980 if (!tx_done)
4981 return;
4982 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
4983
4984 txq_pcpu->count -= tx_done;
4985
4986 if (netif_tx_queue_stopped(nq))
4987 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
4988 netif_tx_wake_queue(nq);
4989}
4990
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004991static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
4992 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02004993{
4994 struct mvpp2_tx_queue *txq;
4995 struct mvpp2_txq_pcpu *txq_pcpu;
4996 unsigned int tx_todo = 0;
4997
4998 while (cause) {
4999 txq = mvpp2_get_tx_queue(port, cause);
5000 if (!txq)
5001 break;
5002
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005003 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005004
5005 if (txq_pcpu->count) {
5006 mvpp2_txq_done(port, txq, txq_pcpu);
5007 tx_todo += txq_pcpu->count;
5008 }
5009
5010 cause &= ~(1 << txq->log_id);
5011 }
5012 return tx_todo;
5013}
5014
Marcin Wojtas3f518502014-07-10 16:52:13 -03005015/* Rx/Tx queue initialization/cleanup methods */
5016
5017/* Allocate and initialize descriptors for aggr TXQ */
5018static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5019 struct mvpp2_tx_queue *aggr_txq,
5020 int desc_num, int cpu,
5021 struct mvpp2 *priv)
5022{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005023 u32 txq_dma;
5024
Marcin Wojtas3f518502014-07-10 16:52:13 -03005025 /* Allocate memory for TX descriptors */
5026 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
5027 desc_num * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005028 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005029 if (!aggr_txq->descs)
5030 return -ENOMEM;
5031
Marcin Wojtas3f518502014-07-10 16:52:13 -03005032 aggr_txq->last_desc = aggr_txq->size - 1;
5033
5034 /* Aggr TXQ no reset WA */
5035 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5036 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5037
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005038 /* Set Tx descriptors queue starting address indirect
5039 * access
5040 */
5041 if (priv->hw_version == MVPP21)
5042 txq_dma = aggr_txq->descs_dma;
5043 else
5044 txq_dma = aggr_txq->descs_dma >>
5045 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5046
5047 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005048 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
5049
5050 return 0;
5051}
5052
5053/* Create a specified Rx queue */
5054static int mvpp2_rxq_init(struct mvpp2_port *port,
5055 struct mvpp2_rx_queue *rxq)
5056
5057{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005058 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005059 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005060
Marcin Wojtas3f518502014-07-10 16:52:13 -03005061 rxq->size = port->rx_ring_size;
5062
5063 /* Allocate memory for RX descriptors */
5064 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5065 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005066 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005067 if (!rxq->descs)
5068 return -ENOMEM;
5069
Marcin Wojtas3f518502014-07-10 16:52:13 -03005070 rxq->last_desc = rxq->size - 1;
5071
5072 /* Zero occupied and non-occupied counters - direct access */
5073 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5074
5075 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005076 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005077 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005078 if (port->priv->hw_version == MVPP21)
5079 rxq_dma = rxq->descs_dma;
5080 else
5081 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005082 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5083 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5084 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005085 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005086
5087 /* Set Offset */
5088 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5089
5090 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005091 mvpp2_rx_pkts_coal_set(port, rxq);
5092 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005093
5094 /* Add number of descriptors ready for receiving packets */
5095 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5096
5097 return 0;
5098}
5099
5100/* Push packets received by the RXQ to BM pool */
5101static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5102 struct mvpp2_rx_queue *rxq)
5103{
5104 int rx_received, i;
5105
5106 rx_received = mvpp2_rxq_received(port, rxq->id);
5107 if (!rx_received)
5108 return;
5109
5110 for (i = 0; i < rx_received; i++) {
5111 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005112 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5113 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005114
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005115 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5116 MVPP2_RXD_BM_POOL_ID_OFFS;
5117
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005118 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005119 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5120 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005121 }
5122 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5123}
5124
5125/* Cleanup Rx queue */
5126static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5127 struct mvpp2_rx_queue *rxq)
5128{
Thomas Petazzonia7868412017-03-07 16:53:13 +01005129 int cpu;
5130
Marcin Wojtas3f518502014-07-10 16:52:13 -03005131 mvpp2_rxq_drop_pkts(port, rxq);
5132
5133 if (rxq->descs)
5134 dma_free_coherent(port->dev->dev.parent,
5135 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5136 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005137 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005138
5139 rxq->descs = NULL;
5140 rxq->last_desc = 0;
5141 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005142 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005143
5144 /* Clear Rx descriptors queue starting address and size;
5145 * free descriptor number
5146 */
5147 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005148 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005149 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5150 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5151 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005152 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005153}
5154
5155/* Create and initialize a Tx queue */
5156static int mvpp2_txq_init(struct mvpp2_port *port,
5157 struct mvpp2_tx_queue *txq)
5158{
5159 u32 val;
5160 int cpu, desc, desc_per_txq, tx_port_num;
5161 struct mvpp2_txq_pcpu *txq_pcpu;
5162
5163 txq->size = port->tx_ring_size;
5164
5165 /* Allocate memory for Tx descriptors */
5166 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5167 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005168 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005169 if (!txq->descs)
5170 return -ENOMEM;
5171
Marcin Wojtas3f518502014-07-10 16:52:13 -03005172 txq->last_desc = txq->size - 1;
5173
5174 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005175 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005176 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5177 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5178 txq->descs_dma);
5179 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5180 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5181 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5182 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5183 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5184 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005185 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005186 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005187
5188 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5189 * for each existing TXQ.
5190 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5191 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5192 */
5193 desc_per_txq = 16;
5194 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5195 (txq->log_id * desc_per_txq);
5196
Thomas Petazzonia7868412017-03-07 16:53:13 +01005197 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5198 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5199 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005200 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005201
5202 /* WRR / EJP configuration - indirect access */
5203 tx_port_num = mvpp2_egress_port(port);
5204 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5205
5206 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5207 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5208 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5209 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5210 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5211
5212 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5213 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5214 val);
5215
5216 for_each_present_cpu(cpu) {
5217 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5218 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02005219 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5220 sizeof(*txq_pcpu->buffs),
5221 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005222 if (!txq_pcpu->buffs)
Markus Elfring20b1e162017-04-17 12:58:33 +02005223 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005224
5225 txq_pcpu->count = 0;
5226 txq_pcpu->reserved_num = 0;
5227 txq_pcpu->txq_put_index = 0;
5228 txq_pcpu->txq_get_index = 0;
5229 }
5230
5231 return 0;
Markus Elfring20b1e162017-04-17 12:58:33 +02005232cleanup:
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005233 for_each_present_cpu(cpu) {
5234 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005235 kfree(txq_pcpu->buffs);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005236 }
5237
5238 dma_free_coherent(port->dev->dev.parent,
5239 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005240 txq->descs, txq->descs_dma);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005241
5242 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005243}
5244
5245/* Free allocated TXQ resources */
5246static void mvpp2_txq_deinit(struct mvpp2_port *port,
5247 struct mvpp2_tx_queue *txq)
5248{
5249 struct mvpp2_txq_pcpu *txq_pcpu;
5250 int cpu;
5251
5252 for_each_present_cpu(cpu) {
5253 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005254 kfree(txq_pcpu->buffs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005255 }
5256
5257 if (txq->descs)
5258 dma_free_coherent(port->dev->dev.parent,
5259 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005260 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005261
5262 txq->descs = NULL;
5263 txq->last_desc = 0;
5264 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005265 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005266
5267 /* Set minimum bandwidth for disabled TXQs */
5268 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5269
5270 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005271 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005272 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5273 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5274 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005275 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005276}
5277
5278/* Cleanup Tx ports */
5279static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5280{
5281 struct mvpp2_txq_pcpu *txq_pcpu;
5282 int delay, pending, cpu;
5283 u32 val;
5284
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005285 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005286 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5287 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005288 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005289 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005290
5291 /* The napi queue has been stopped so wait for all packets
5292 * to be transmitted.
5293 */
5294 delay = 0;
5295 do {
5296 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5297 netdev_warn(port->dev,
5298 "port %d: cleaning queue %d timed out\n",
5299 port->id, txq->log_id);
5300 break;
5301 }
5302 mdelay(1);
5303 delay++;
5304
Thomas Petazzonia7868412017-03-07 16:53:13 +01005305 pending = mvpp2_percpu_read(port->priv, cpu,
5306 MVPP2_TXQ_PENDING_REG);
5307 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005308 } while (pending);
5309
5310 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005311 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005312 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005313
5314 for_each_present_cpu(cpu) {
5315 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5316
5317 /* Release all packets */
5318 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5319
5320 /* Reset queue */
5321 txq_pcpu->count = 0;
5322 txq_pcpu->txq_put_index = 0;
5323 txq_pcpu->txq_get_index = 0;
5324 }
5325}
5326
5327/* Cleanup all Tx queues */
5328static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5329{
5330 struct mvpp2_tx_queue *txq;
5331 int queue;
5332 u32 val;
5333
5334 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5335
5336 /* Reset Tx ports and delete Tx queues */
5337 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5338 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5339
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005340 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005341 txq = port->txqs[queue];
5342 mvpp2_txq_clean(port, txq);
5343 mvpp2_txq_deinit(port, txq);
5344 }
5345
5346 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5347
5348 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5349 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5350}
5351
5352/* Cleanup all Rx queues */
5353static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5354{
5355 int queue;
5356
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005357 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005358 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5359}
5360
5361/* Init all Rx queues for port */
5362static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5363{
5364 int queue, err;
5365
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005366 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005367 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5368 if (err)
5369 goto err_cleanup;
5370 }
5371 return 0;
5372
5373err_cleanup:
5374 mvpp2_cleanup_rxqs(port);
5375 return err;
5376}
5377
5378/* Init all tx queues for port */
5379static int mvpp2_setup_txqs(struct mvpp2_port *port)
5380{
5381 struct mvpp2_tx_queue *txq;
5382 int queue, err;
5383
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005384 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005385 txq = port->txqs[queue];
5386 err = mvpp2_txq_init(port, txq);
5387 if (err)
5388 goto err_cleanup;
5389 }
5390
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005391 if (port->has_tx_irqs) {
5392 mvpp2_tx_time_coal_set(port);
5393 for (queue = 0; queue < port->ntxqs; queue++) {
5394 txq = port->txqs[queue];
5395 mvpp2_tx_pkts_coal_set(port, txq);
5396 }
5397 }
5398
Marcin Wojtas3f518502014-07-10 16:52:13 -03005399 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5400 return 0;
5401
5402err_cleanup:
5403 mvpp2_cleanup_txqs(port);
5404 return err;
5405}
5406
5407/* The callback for per-port interrupt */
5408static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5409{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005410 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005411
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005412 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005413
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005414 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005415
5416 return IRQ_HANDLED;
5417}
5418
5419/* Adjust link */
5420static void mvpp2_link_event(struct net_device *dev)
5421{
5422 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02005423 struct phy_device *phydev = dev->phydev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005424 int status_change = 0;
5425 u32 val;
5426
5427 if (phydev->link) {
5428 if ((port->speed != phydev->speed) ||
5429 (port->duplex != phydev->duplex)) {
5430 u32 val;
5431
5432 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5433 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5434 MVPP2_GMAC_CONFIG_GMII_SPEED |
5435 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5436 MVPP2_GMAC_AN_SPEED_EN |
5437 MVPP2_GMAC_AN_DUPLEX_EN);
5438
5439 if (phydev->duplex)
5440 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5441
5442 if (phydev->speed == SPEED_1000)
5443 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni2add5112014-07-27 23:21:35 +02005444 else if (phydev->speed == SPEED_100)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005445 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5446
5447 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5448
5449 port->duplex = phydev->duplex;
5450 port->speed = phydev->speed;
5451 }
5452 }
5453
5454 if (phydev->link != port->link) {
5455 if (!phydev->link) {
5456 port->duplex = -1;
5457 port->speed = 0;
5458 }
5459
5460 port->link = phydev->link;
5461 status_change = 1;
5462 }
5463
5464 if (status_change) {
5465 if (phydev->link) {
5466 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5467 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5468 MVPP2_GMAC_FORCE_LINK_DOWN);
5469 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5470 mvpp2_egress_enable(port);
5471 mvpp2_ingress_enable(port);
5472 } else {
5473 mvpp2_ingress_disable(port);
5474 mvpp2_egress_disable(port);
5475 }
5476 phy_print_status(phydev);
5477 }
5478}
5479
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005480static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5481{
5482 ktime_t interval;
5483
5484 if (!port_pcpu->timer_scheduled) {
5485 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01005486 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005487 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5488 HRTIMER_MODE_REL_PINNED);
5489 }
5490}
5491
5492static void mvpp2_tx_proc_cb(unsigned long data)
5493{
5494 struct net_device *dev = (struct net_device *)data;
5495 struct mvpp2_port *port = netdev_priv(dev);
5496 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5497 unsigned int tx_todo, cause;
5498
5499 if (!netif_running(dev))
5500 return;
5501 port_pcpu->timer_scheduled = false;
5502
5503 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005504 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005505 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005506
5507 /* Set the timer in case not all the packets were processed */
5508 if (tx_todo)
5509 mvpp2_timer_set(port_pcpu);
5510}
5511
5512static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5513{
5514 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5515 struct mvpp2_port_pcpu,
5516 tx_done_timer);
5517
5518 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5519
5520 return HRTIMER_NORESTART;
5521}
5522
Marcin Wojtas3f518502014-07-10 16:52:13 -03005523/* Main RX/TX processing routines */
5524
5525/* Display more error info */
5526static void mvpp2_rx_error(struct mvpp2_port *port,
5527 struct mvpp2_rx_desc *rx_desc)
5528{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005529 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5530 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005531
5532 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5533 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005534 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5535 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005536 break;
5537 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005538 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5539 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005540 break;
5541 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005542 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5543 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005544 break;
5545 }
5546}
5547
5548/* Handle RX checksum offload */
5549static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5550 struct sk_buff *skb)
5551{
5552 if (((status & MVPP2_RXD_L3_IP4) &&
5553 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5554 (status & MVPP2_RXD_L3_IP6))
5555 if (((status & MVPP2_RXD_L4_UDP) ||
5556 (status & MVPP2_RXD_L4_TCP)) &&
5557 (status & MVPP2_RXD_L4_CSUM_OK)) {
5558 skb->csum = 0;
5559 skb->ip_summed = CHECKSUM_UNNECESSARY;
5560 return;
5561 }
5562
5563 skb->ip_summed = CHECKSUM_NONE;
5564}
5565
5566/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5567static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005568 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005569{
Thomas Petazzoni20396132017-03-07 16:53:00 +01005570 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005571 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005572 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005573
Marcin Wojtas3f518502014-07-10 16:52:13 -03005574 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005575 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5576 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005577 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005578 return -ENOMEM;
5579
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005580 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005581
Marcin Wojtas3f518502014-07-10 16:52:13 -03005582 return 0;
5583}
5584
5585/* Handle tx checksum */
5586static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5587{
5588 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5589 int ip_hdr_len = 0;
5590 u8 l4_proto;
5591
5592 if (skb->protocol == htons(ETH_P_IP)) {
5593 struct iphdr *ip4h = ip_hdr(skb);
5594
5595 /* Calculate IPv4 checksum and L4 checksum */
5596 ip_hdr_len = ip4h->ihl;
5597 l4_proto = ip4h->protocol;
5598 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5599 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5600
5601 /* Read l4_protocol from one of IPv6 extra headers */
5602 if (skb_network_header_len(skb) > 0)
5603 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5604 l4_proto = ip6h->nexthdr;
5605 } else {
5606 return MVPP2_TXD_L4_CSUM_NOT;
5607 }
5608
5609 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5610 skb->protocol, ip_hdr_len, l4_proto);
5611 }
5612
5613 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5614}
5615
Marcin Wojtas3f518502014-07-10 16:52:13 -03005616/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005617static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
5618 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005619{
5620 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005621 int rx_received;
5622 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005623 u32 rcvd_pkts = 0;
5624 u32 rcvd_bytes = 0;
5625
5626 /* Get number of received packets and clamp the to-do */
5627 rx_received = mvpp2_rxq_received(port, rxq->id);
5628 if (rx_todo > rx_received)
5629 rx_todo = rx_received;
5630
Marcin Wojtasb5015852015-12-03 15:20:51 +01005631 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005632 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5633 struct mvpp2_bm_pool *bm_pool;
5634 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005635 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005636 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005637 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005638 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005639 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005640 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005641
Marcin Wojtasb5015852015-12-03 15:20:51 +01005642 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005643 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5644 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5645 rx_bytes -= MVPP2_MH_SIZE;
5646 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5647 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5648 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005649
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005650 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5651 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005652 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03005653
5654 /* In case of an error, release the requested buffer pointer
5655 * to the Buffer Manager. This request process is controlled
5656 * by the hardware, and the information about the buffer is
5657 * comprised by the RX descriptor.
5658 */
5659 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02005660err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005661 dev->stats.rx_errors++;
5662 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005663 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005664 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005665 continue;
5666 }
5667
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005668 if (bm_pool->frag_size > PAGE_SIZE)
5669 frag_size = 0;
5670 else
5671 frag_size = bm_pool->frag_size;
5672
5673 skb = build_skb(data, frag_size);
5674 if (!skb) {
5675 netdev_warn(port->dev, "skb build failed\n");
5676 goto err_drop_frame;
5677 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005678
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005679 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005680 if (err) {
5681 netdev_err(port->dev, "failed to refill BM pools\n");
5682 goto err_drop_frame;
5683 }
5684
Thomas Petazzoni20396132017-03-07 16:53:00 +01005685 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01005686 bm_pool->buf_size, DMA_FROM_DEVICE);
5687
Marcin Wojtas3f518502014-07-10 16:52:13 -03005688 rcvd_pkts++;
5689 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005690
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005691 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005692 skb_put(skb, rx_bytes);
5693 skb->protocol = eth_type_trans(skb, dev);
5694 mvpp2_rx_csum(port, rx_status, skb);
5695
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005696 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005697 }
5698
5699 if (rcvd_pkts) {
5700 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5701
5702 u64_stats_update_begin(&stats->syncp);
5703 stats->rx_packets += rcvd_pkts;
5704 stats->rx_bytes += rcvd_bytes;
5705 u64_stats_update_end(&stats->syncp);
5706 }
5707
5708 /* Update Rx queue management counters */
5709 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01005710 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005711
5712 return rx_todo;
5713}
5714
5715static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005716tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005717 struct mvpp2_tx_desc *desc)
5718{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005719 dma_addr_t buf_dma_addr =
5720 mvpp2_txdesc_dma_addr_get(port, desc);
5721 size_t buf_sz =
5722 mvpp2_txdesc_size_get(port, desc);
5723 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5724 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005725 mvpp2_txq_desc_put(txq);
5726}
5727
5728/* Handle tx fragmentation processing */
5729static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5730 struct mvpp2_tx_queue *aggr_txq,
5731 struct mvpp2_tx_queue *txq)
5732{
5733 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5734 struct mvpp2_tx_desc *tx_desc;
5735 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005736 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005737
5738 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5739 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5740 void *addr = page_address(frag->page.p) + frag->page_offset;
5741
5742 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005743 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5744 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005745
Thomas Petazzoni20396132017-03-07 16:53:00 +01005746 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005747 frag->size,
5748 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01005749 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005750 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02005751 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005752 }
5753
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005754 mvpp2_txdesc_offset_set(port, tx_desc,
5755 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5756 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5757 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005758
5759 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5760 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005761 mvpp2_txdesc_cmd_set(port, tx_desc,
5762 MVPP2_TXD_L_DESC);
5763 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005764 } else {
5765 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005766 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5767 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005768 }
5769 }
5770
5771 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02005772cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005773 /* Release all descriptors that were used to map fragments of
5774 * this packet, as well as the corresponding DMA mappings
5775 */
5776 for (i = i - 1; i >= 0; i--) {
5777 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005778 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005779 }
5780
5781 return -ENOMEM;
5782}
5783
5784/* Main tx processing */
5785static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5786{
5787 struct mvpp2_port *port = netdev_priv(dev);
5788 struct mvpp2_tx_queue *txq, *aggr_txq;
5789 struct mvpp2_txq_pcpu *txq_pcpu;
5790 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005791 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005792 int frags = 0;
5793 u16 txq_id;
5794 u32 tx_cmd;
5795
5796 txq_id = skb_get_queue_mapping(skb);
5797 txq = port->txqs[txq_id];
5798 txq_pcpu = this_cpu_ptr(txq->pcpu);
5799 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5800
5801 frags = skb_shinfo(skb)->nr_frags + 1;
5802
5803 /* Check number of available descriptors */
5804 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5805 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5806 txq_pcpu, frags)) {
5807 frags = 0;
5808 goto out;
5809 }
5810
5811 /* Get a descriptor for the first part of the packet */
5812 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005813 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5814 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005815
Thomas Petazzoni20396132017-03-07 16:53:00 +01005816 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005817 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01005818 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005819 mvpp2_txq_desc_put(txq);
5820 frags = 0;
5821 goto out;
5822 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005823
5824 mvpp2_txdesc_offset_set(port, tx_desc,
5825 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5826 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5827 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005828
5829 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5830
5831 if (frags == 1) {
5832 /* First and Last descriptor */
5833 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005834 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5835 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005836 } else {
5837 /* First but not Last */
5838 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005839 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5840 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005841
5842 /* Continue with other skb fragments */
5843 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005844 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005845 frags = 0;
5846 goto out;
5847 }
5848 }
5849
5850 txq_pcpu->reserved_num -= frags;
5851 txq_pcpu->count += frags;
5852 aggr_txq->count += frags;
5853
5854 /* Enable transmit */
5855 wmb();
5856 mvpp2_aggr_txq_pend_desc_add(port, frags);
5857
5858 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5859 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5860
5861 netif_tx_stop_queue(nq);
5862 }
5863out:
5864 if (frags > 0) {
5865 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5866
5867 u64_stats_update_begin(&stats->syncp);
5868 stats->tx_packets++;
5869 stats->tx_bytes += skb->len;
5870 u64_stats_update_end(&stats->syncp);
5871 } else {
5872 dev->stats.tx_dropped++;
5873 dev_kfree_skb_any(skb);
5874 }
5875
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005876 /* Finalize TX processing */
5877 if (txq_pcpu->count >= txq->done_pkts_coal)
5878 mvpp2_txq_done(port, txq, txq_pcpu);
5879
5880 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005881 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
5882 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005883 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5884
5885 mvpp2_timer_set(port_pcpu);
5886 }
5887
Marcin Wojtas3f518502014-07-10 16:52:13 -03005888 return NETDEV_TX_OK;
5889}
5890
5891static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5892{
5893 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5894 netdev_err(dev, "FCS error\n");
5895 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5896 netdev_err(dev, "rx fifo overrun error\n");
5897 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5898 netdev_err(dev, "tx fifo underrun error\n");
5899}
5900
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005901static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005902{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005903 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005904 int rx_done = 0;
5905 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005906 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005907 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005908
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005909 qv = container_of(napi, struct mvpp2_queue_vector, napi);
5910
Marcin Wojtas3f518502014-07-10 16:52:13 -03005911 /* Rx/Tx cause register
5912 *
5913 * Bits 0-15: each bit indicates received packets on the Rx queue
5914 * (bit 0 is for Rx queue 0).
5915 *
5916 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5917 * (bit 16 is for Tx queue 0).
5918 *
5919 * Each CPU has its own Rx/Tx cause register
5920 */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005921 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
Thomas Petazzonia7868412017-03-07 16:53:13 +01005922 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005923
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005924 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005925 if (cause_misc) {
5926 mvpp2_cause_error(port->dev, cause_misc);
5927
5928 /* Clear the cause register */
5929 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01005930 mvpp2_percpu_write(port->priv, cpu,
5931 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
5932 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005933 }
5934
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005935 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5936 if (cause_tx) {
5937 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
5938 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
5939 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005940
5941 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005942 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5943 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005944 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005945 while (cause_rx && budget > 0) {
5946 int count;
5947 struct mvpp2_rx_queue *rxq;
5948
5949 rxq = mvpp2_get_rx_queue(port, cause_rx);
5950 if (!rxq)
5951 break;
5952
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005953 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005954 rx_done += count;
5955 budget -= count;
5956 if (budget > 0) {
5957 /* Clear the bit associated to this Rx queue
5958 * so that next iteration will continue from
5959 * the next Rx queue.
5960 */
5961 cause_rx &= ~(1 << rxq->logic_rxq);
5962 }
5963 }
5964
5965 if (budget > 0) {
5966 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08005967 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005968
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005969 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005970 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005971 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005972 return rx_done;
5973}
5974
5975/* Set hw internals when starting port */
5976static void mvpp2_start_dev(struct mvpp2_port *port)
5977{
Philippe Reynes8e072692016-06-28 00:08:11 +02005978 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005979 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02005980
Marcin Wojtas3f518502014-07-10 16:52:13 -03005981 mvpp2_gmac_max_rx_size_set(port);
5982 mvpp2_txp_max_tx_size_set(port);
5983
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005984 for (i = 0; i < port->nqvecs; i++)
5985 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005986
5987 /* Enable interrupts on all CPUs */
5988 mvpp2_interrupts_enable(port);
5989
5990 mvpp2_port_enable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02005991 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005992 netif_tx_start_all_queues(port->dev);
5993}
5994
5995/* Set hw internals when stopping port */
5996static void mvpp2_stop_dev(struct mvpp2_port *port)
5997{
Philippe Reynes8e072692016-06-28 00:08:11 +02005998 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005999 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02006000
Marcin Wojtas3f518502014-07-10 16:52:13 -03006001 /* Stop new packets from arriving to RXQs */
6002 mvpp2_ingress_disable(port);
6003
6004 mdelay(10);
6005
6006 /* Disable interrupts on all CPUs */
6007 mvpp2_interrupts_disable(port);
6008
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006009 for (i = 0; i < port->nqvecs; i++)
6010 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006011
6012 netif_carrier_off(port->dev);
6013 netif_tx_stop_all_queues(port->dev);
6014
6015 mvpp2_egress_disable(port);
6016 mvpp2_port_disable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02006017 phy_stop(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006018}
6019
Marcin Wojtas3f518502014-07-10 16:52:13 -03006020static int mvpp2_check_ringparam_valid(struct net_device *dev,
6021 struct ethtool_ringparam *ring)
6022{
6023 u16 new_rx_pending = ring->rx_pending;
6024 u16 new_tx_pending = ring->tx_pending;
6025
6026 if (ring->rx_pending == 0 || ring->tx_pending == 0)
6027 return -EINVAL;
6028
6029 if (ring->rx_pending > MVPP2_MAX_RXD)
6030 new_rx_pending = MVPP2_MAX_RXD;
6031 else if (!IS_ALIGNED(ring->rx_pending, 16))
6032 new_rx_pending = ALIGN(ring->rx_pending, 16);
6033
6034 if (ring->tx_pending > MVPP2_MAX_TXD)
6035 new_tx_pending = MVPP2_MAX_TXD;
6036 else if (!IS_ALIGNED(ring->tx_pending, 32))
6037 new_tx_pending = ALIGN(ring->tx_pending, 32);
6038
6039 if (ring->rx_pending != new_rx_pending) {
6040 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6041 ring->rx_pending, new_rx_pending);
6042 ring->rx_pending = new_rx_pending;
6043 }
6044
6045 if (ring->tx_pending != new_tx_pending) {
6046 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
6047 ring->tx_pending, new_tx_pending);
6048 ring->tx_pending = new_tx_pending;
6049 }
6050
6051 return 0;
6052}
6053
Thomas Petazzoni26975822017-03-07 16:53:14 +01006054static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006055{
6056 u32 mac_addr_l, mac_addr_m, mac_addr_h;
6057
6058 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
6059 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
6060 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
6061 addr[0] = (mac_addr_h >> 24) & 0xFF;
6062 addr[1] = (mac_addr_h >> 16) & 0xFF;
6063 addr[2] = (mac_addr_h >> 8) & 0xFF;
6064 addr[3] = mac_addr_h & 0xFF;
6065 addr[4] = mac_addr_m & 0xFF;
6066 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
6067}
6068
6069static int mvpp2_phy_connect(struct mvpp2_port *port)
6070{
6071 struct phy_device *phy_dev;
6072
6073 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
6074 port->phy_interface);
6075 if (!phy_dev) {
6076 netdev_err(port->dev, "cannot connect to phy\n");
6077 return -ENODEV;
6078 }
6079 phy_dev->supported &= PHY_GBIT_FEATURES;
6080 phy_dev->advertising = phy_dev->supported;
6081
Marcin Wojtas3f518502014-07-10 16:52:13 -03006082 port->link = 0;
6083 port->duplex = 0;
6084 port->speed = 0;
6085
6086 return 0;
6087}
6088
6089static void mvpp2_phy_disconnect(struct mvpp2_port *port)
6090{
Philippe Reynes8e072692016-06-28 00:08:11 +02006091 struct net_device *ndev = port->dev;
6092
6093 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006094}
6095
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006096static int mvpp2_irqs_init(struct mvpp2_port *port)
6097{
6098 int err, i;
6099
6100 for (i = 0; i < port->nqvecs; i++) {
6101 struct mvpp2_queue_vector *qv = port->qvecs + i;
6102
6103 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
6104 if (err)
6105 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006106
6107 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6108 irq_set_affinity_hint(qv->irq,
6109 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006110 }
6111
6112 return 0;
6113err:
6114 for (i = 0; i < port->nqvecs; i++) {
6115 struct mvpp2_queue_vector *qv = port->qvecs + i;
6116
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006117 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006118 free_irq(qv->irq, qv);
6119 }
6120
6121 return err;
6122}
6123
6124static void mvpp2_irqs_deinit(struct mvpp2_port *port)
6125{
6126 int i;
6127
6128 for (i = 0; i < port->nqvecs; i++) {
6129 struct mvpp2_queue_vector *qv = port->qvecs + i;
6130
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006131 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006132 free_irq(qv->irq, qv);
6133 }
6134}
6135
Marcin Wojtas3f518502014-07-10 16:52:13 -03006136static int mvpp2_open(struct net_device *dev)
6137{
6138 struct mvpp2_port *port = netdev_priv(dev);
6139 unsigned char mac_bcast[ETH_ALEN] = {
6140 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6141 int err;
6142
6143 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
6144 if (err) {
6145 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
6146 return err;
6147 }
6148 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
6149 dev->dev_addr, true);
6150 if (err) {
6151 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
6152 return err;
6153 }
6154 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
6155 if (err) {
6156 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
6157 return err;
6158 }
6159 err = mvpp2_prs_def_flow(port);
6160 if (err) {
6161 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
6162 return err;
6163 }
6164
6165 /* Allocate the Rx/Tx queues */
6166 err = mvpp2_setup_rxqs(port);
6167 if (err) {
6168 netdev_err(port->dev, "cannot allocate Rx queues\n");
6169 return err;
6170 }
6171
6172 err = mvpp2_setup_txqs(port);
6173 if (err) {
6174 netdev_err(port->dev, "cannot allocate Tx queues\n");
6175 goto err_cleanup_rxqs;
6176 }
6177
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006178 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006179 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006180 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006181 goto err_cleanup_txqs;
6182 }
6183
6184 /* In default link is down */
6185 netif_carrier_off(port->dev);
6186
6187 err = mvpp2_phy_connect(port);
6188 if (err < 0)
6189 goto err_free_irq;
6190
6191 /* Unmask interrupts on all CPUs */
6192 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006193 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006194
6195 mvpp2_start_dev(port);
6196
6197 return 0;
6198
6199err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006200 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006201err_cleanup_txqs:
6202 mvpp2_cleanup_txqs(port);
6203err_cleanup_rxqs:
6204 mvpp2_cleanup_rxqs(port);
6205 return err;
6206}
6207
6208static int mvpp2_stop(struct net_device *dev)
6209{
6210 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006211 struct mvpp2_port_pcpu *port_pcpu;
6212 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006213
6214 mvpp2_stop_dev(port);
6215 mvpp2_phy_disconnect(port);
6216
6217 /* Mask interrupts on all CPUs */
6218 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006219 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006220
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006221 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006222 if (!port->has_tx_irqs) {
6223 for_each_present_cpu(cpu) {
6224 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006225
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006226 hrtimer_cancel(&port_pcpu->tx_done_timer);
6227 port_pcpu->timer_scheduled = false;
6228 tasklet_kill(&port_pcpu->tx_done_tasklet);
6229 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006230 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006231 mvpp2_cleanup_rxqs(port);
6232 mvpp2_cleanup_txqs(port);
6233
6234 return 0;
6235}
6236
6237static void mvpp2_set_rx_mode(struct net_device *dev)
6238{
6239 struct mvpp2_port *port = netdev_priv(dev);
6240 struct mvpp2 *priv = port->priv;
6241 struct netdev_hw_addr *ha;
6242 int id = port->id;
6243 bool allmulti = dev->flags & IFF_ALLMULTI;
6244
6245 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6246 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6247 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6248
6249 /* Remove all port->id's mcast enries */
6250 mvpp2_prs_mcast_del_all(priv, id);
6251
6252 if (allmulti && !netdev_mc_empty(dev)) {
6253 netdev_for_each_mc_addr(ha, dev)
6254 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6255 }
6256}
6257
6258static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6259{
6260 struct mvpp2_port *port = netdev_priv(dev);
6261 const struct sockaddr *addr = p;
6262 int err;
6263
6264 if (!is_valid_ether_addr(addr->sa_data)) {
6265 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02006266 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006267 }
6268
6269 if (!netif_running(dev)) {
6270 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6271 if (!err)
6272 return 0;
6273 /* Reconfigure parser to accept the original MAC address */
6274 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6275 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006276 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006277 }
6278
6279 mvpp2_stop_dev(port);
6280
6281 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6282 if (!err)
6283 goto out_start;
6284
6285 /* Reconfigure parser accept the original MAC address */
6286 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6287 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006288 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006289out_start:
6290 mvpp2_start_dev(port);
6291 mvpp2_egress_enable(port);
6292 mvpp2_ingress_enable(port);
6293 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02006294log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02006295 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006296 return err;
6297}
6298
6299static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6300{
6301 struct mvpp2_port *port = netdev_priv(dev);
6302 int err;
6303
Jarod Wilson57779872016-10-17 15:54:06 -04006304 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6305 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6306 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6307 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006308 }
6309
6310 if (!netif_running(dev)) {
6311 err = mvpp2_bm_update_mtu(dev, mtu);
6312 if (!err) {
6313 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6314 return 0;
6315 }
6316
6317 /* Reconfigure BM to the original MTU */
6318 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6319 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006320 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006321 }
6322
6323 mvpp2_stop_dev(port);
6324
6325 err = mvpp2_bm_update_mtu(dev, mtu);
6326 if (!err) {
6327 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6328 goto out_start;
6329 }
6330
6331 /* Reconfigure BM to the original MTU */
6332 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6333 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006334 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006335
6336out_start:
6337 mvpp2_start_dev(port);
6338 mvpp2_egress_enable(port);
6339 mvpp2_ingress_enable(port);
6340
6341 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02006342log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02006343 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006344 return err;
6345}
6346
stephen hemmingerbc1f4472017-01-06 19:12:52 -08006347static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03006348mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6349{
6350 struct mvpp2_port *port = netdev_priv(dev);
6351 unsigned int start;
6352 int cpu;
6353
6354 for_each_possible_cpu(cpu) {
6355 struct mvpp2_pcpu_stats *cpu_stats;
6356 u64 rx_packets;
6357 u64 rx_bytes;
6358 u64 tx_packets;
6359 u64 tx_bytes;
6360
6361 cpu_stats = per_cpu_ptr(port->stats, cpu);
6362 do {
6363 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6364 rx_packets = cpu_stats->rx_packets;
6365 rx_bytes = cpu_stats->rx_bytes;
6366 tx_packets = cpu_stats->tx_packets;
6367 tx_bytes = cpu_stats->tx_bytes;
6368 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6369
6370 stats->rx_packets += rx_packets;
6371 stats->rx_bytes += rx_bytes;
6372 stats->tx_packets += tx_packets;
6373 stats->tx_bytes += tx_bytes;
6374 }
6375
6376 stats->rx_errors = dev->stats.rx_errors;
6377 stats->rx_dropped = dev->stats.rx_dropped;
6378 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006379}
6380
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006381static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6382{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006383 int ret;
6384
Philippe Reynes8e072692016-06-28 00:08:11 +02006385 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006386 return -ENOTSUPP;
6387
Philippe Reynes8e072692016-06-28 00:08:11 +02006388 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006389 if (!ret)
6390 mvpp2_link_event(dev);
6391
6392 return ret;
6393}
6394
Marcin Wojtas3f518502014-07-10 16:52:13 -03006395/* Ethtool methods */
6396
Marcin Wojtas3f518502014-07-10 16:52:13 -03006397/* Set interrupt coalescing for ethtools */
6398static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6399 struct ethtool_coalesce *c)
6400{
6401 struct mvpp2_port *port = netdev_priv(dev);
6402 int queue;
6403
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006404 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006405 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6406
6407 rxq->time_coal = c->rx_coalesce_usecs;
6408 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01006409 mvpp2_rx_pkts_coal_set(port, rxq);
6410 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006411 }
6412
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006413 if (port->has_tx_irqs) {
6414 port->tx_time_coal = c->tx_coalesce_usecs;
6415 mvpp2_tx_time_coal_set(port);
6416 }
6417
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006418 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006419 struct mvpp2_tx_queue *txq = port->txqs[queue];
6420
6421 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006422
6423 if (port->has_tx_irqs)
6424 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006425 }
6426
Marcin Wojtas3f518502014-07-10 16:52:13 -03006427 return 0;
6428}
6429
6430/* get coalescing for ethtools */
6431static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6432 struct ethtool_coalesce *c)
6433{
6434 struct mvpp2_port *port = netdev_priv(dev);
6435
6436 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6437 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6438 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6439 return 0;
6440}
6441
6442static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6443 struct ethtool_drvinfo *drvinfo)
6444{
6445 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6446 sizeof(drvinfo->driver));
6447 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6448 sizeof(drvinfo->version));
6449 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6450 sizeof(drvinfo->bus_info));
6451}
6452
6453static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6454 struct ethtool_ringparam *ring)
6455{
6456 struct mvpp2_port *port = netdev_priv(dev);
6457
6458 ring->rx_max_pending = MVPP2_MAX_RXD;
6459 ring->tx_max_pending = MVPP2_MAX_TXD;
6460 ring->rx_pending = port->rx_ring_size;
6461 ring->tx_pending = port->tx_ring_size;
6462}
6463
6464static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6465 struct ethtool_ringparam *ring)
6466{
6467 struct mvpp2_port *port = netdev_priv(dev);
6468 u16 prev_rx_ring_size = port->rx_ring_size;
6469 u16 prev_tx_ring_size = port->tx_ring_size;
6470 int err;
6471
6472 err = mvpp2_check_ringparam_valid(dev, ring);
6473 if (err)
6474 return err;
6475
6476 if (!netif_running(dev)) {
6477 port->rx_ring_size = ring->rx_pending;
6478 port->tx_ring_size = ring->tx_pending;
6479 return 0;
6480 }
6481
6482 /* The interface is running, so we have to force a
6483 * reallocation of the queues
6484 */
6485 mvpp2_stop_dev(port);
6486 mvpp2_cleanup_rxqs(port);
6487 mvpp2_cleanup_txqs(port);
6488
6489 port->rx_ring_size = ring->rx_pending;
6490 port->tx_ring_size = ring->tx_pending;
6491
6492 err = mvpp2_setup_rxqs(port);
6493 if (err) {
6494 /* Reallocate Rx queues with the original ring size */
6495 port->rx_ring_size = prev_rx_ring_size;
6496 ring->rx_pending = prev_rx_ring_size;
6497 err = mvpp2_setup_rxqs(port);
6498 if (err)
6499 goto err_out;
6500 }
6501 err = mvpp2_setup_txqs(port);
6502 if (err) {
6503 /* Reallocate Tx queues with the original ring size */
6504 port->tx_ring_size = prev_tx_ring_size;
6505 ring->tx_pending = prev_tx_ring_size;
6506 err = mvpp2_setup_txqs(port);
6507 if (err)
6508 goto err_clean_rxqs;
6509 }
6510
6511 mvpp2_start_dev(port);
6512 mvpp2_egress_enable(port);
6513 mvpp2_ingress_enable(port);
6514
6515 return 0;
6516
6517err_clean_rxqs:
6518 mvpp2_cleanup_rxqs(port);
6519err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02006520 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006521 return err;
6522}
6523
6524/* Device ops */
6525
6526static const struct net_device_ops mvpp2_netdev_ops = {
6527 .ndo_open = mvpp2_open,
6528 .ndo_stop = mvpp2_stop,
6529 .ndo_start_xmit = mvpp2_tx,
6530 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6531 .ndo_set_mac_address = mvpp2_set_mac_address,
6532 .ndo_change_mtu = mvpp2_change_mtu,
6533 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006534 .ndo_do_ioctl = mvpp2_ioctl,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006535};
6536
6537static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08006538 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006539 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006540 .set_coalesce = mvpp2_ethtool_set_coalesce,
6541 .get_coalesce = mvpp2_ethtool_get_coalesce,
6542 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6543 .get_ringparam = mvpp2_ethtool_get_ringparam,
6544 .set_ringparam = mvpp2_ethtool_set_ringparam,
Philippe Reynesfb773e92016-06-28 00:08:12 +02006545 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6546 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006547};
6548
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006549/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
6550 * had a single IRQ defined per-port.
6551 */
6552static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
6553 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006554{
6555 struct mvpp2_queue_vector *v = &port->qvecs[0];
6556
6557 v->first_rxq = 0;
6558 v->nrxqs = port->nrxqs;
6559 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6560 v->sw_thread_id = 0;
6561 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
6562 v->port = port;
6563 v->irq = irq_of_parse_and_map(port_node, 0);
6564 if (v->irq <= 0)
6565 return -EINVAL;
6566 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6567 NAPI_POLL_WEIGHT);
6568
6569 port->nqvecs = 1;
6570
6571 return 0;
6572}
6573
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006574static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
6575 struct device_node *port_node)
6576{
6577 struct mvpp2_queue_vector *v;
6578 int i, ret;
6579
6580 port->nqvecs = num_possible_cpus();
6581 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
6582 port->nqvecs += 1;
6583
6584 for (i = 0; i < port->nqvecs; i++) {
6585 char irqname[16];
6586
6587 v = port->qvecs + i;
6588
6589 v->port = port;
6590 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
6591 v->sw_thread_id = i;
6592 v->sw_thread_mask = BIT(i);
6593
6594 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
6595
6596 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
6597 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
6598 v->nrxqs = MVPP2_DEFAULT_RXQ;
6599 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
6600 i == (port->nqvecs - 1)) {
6601 v->first_rxq = 0;
6602 v->nrxqs = port->nrxqs;
6603 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6604 strncpy(irqname, "rx-shared", sizeof(irqname));
6605 }
6606
6607 v->irq = of_irq_get_byname(port_node, irqname);
6608 if (v->irq <= 0) {
6609 ret = -EINVAL;
6610 goto err;
6611 }
6612
6613 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6614 NAPI_POLL_WEIGHT);
6615 }
6616
6617 return 0;
6618
6619err:
6620 for (i = 0; i < port->nqvecs; i++)
6621 irq_dispose_mapping(port->qvecs[i].irq);
6622 return ret;
6623}
6624
6625static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
6626 struct device_node *port_node)
6627{
6628 if (port->has_tx_irqs)
6629 return mvpp2_multi_queue_vectors_init(port, port_node);
6630 else
6631 return mvpp2_simple_queue_vectors_init(port, port_node);
6632}
6633
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006634static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
6635{
6636 int i;
6637
6638 for (i = 0; i < port->nqvecs; i++)
6639 irq_dispose_mapping(port->qvecs[i].irq);
6640}
6641
6642/* Configure Rx queue group interrupt for this port */
6643static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
6644{
6645 struct mvpp2 *priv = port->priv;
6646 u32 val;
6647 int i;
6648
6649 if (priv->hw_version == MVPP21) {
6650 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6651 port->nrxqs);
6652 return;
6653 }
6654
6655 /* Handle the more complicated PPv2.2 case */
6656 for (i = 0; i < port->nqvecs; i++) {
6657 struct mvpp2_queue_vector *qv = port->qvecs + i;
6658
6659 if (!qv->nrxqs)
6660 continue;
6661
6662 val = qv->sw_thread_id;
6663 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
6664 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6665
6666 val = qv->first_rxq;
6667 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
6668 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6669 }
6670}
6671
Marcin Wojtas3f518502014-07-10 16:52:13 -03006672/* Initialize port HW */
6673static int mvpp2_port_init(struct mvpp2_port *port)
6674{
6675 struct device *dev = port->dev->dev.parent;
6676 struct mvpp2 *priv = port->priv;
6677 struct mvpp2_txq_pcpu *txq_pcpu;
6678 int queue, cpu, err;
6679
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006680 /* Checks for hardware constraints */
6681 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006682 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006683 return -EINVAL;
6684
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006685 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
6686 (port->ntxqs > MVPP2_MAX_TXQ))
6687 return -EINVAL;
6688
Marcin Wojtas3f518502014-07-10 16:52:13 -03006689 /* Disable port */
6690 mvpp2_egress_disable(port);
6691 mvpp2_port_disable(port);
6692
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006693 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
6694
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006695 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03006696 GFP_KERNEL);
6697 if (!port->txqs)
6698 return -ENOMEM;
6699
6700 /* Associate physical Tx queues to this port and initialize.
6701 * The mapping is predefined.
6702 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006703 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006704 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6705 struct mvpp2_tx_queue *txq;
6706
6707 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01006708 if (!txq) {
6709 err = -ENOMEM;
6710 goto err_free_percpu;
6711 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006712
6713 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6714 if (!txq->pcpu) {
6715 err = -ENOMEM;
6716 goto err_free_percpu;
6717 }
6718
6719 txq->id = queue_phy_id;
6720 txq->log_id = queue;
6721 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6722 for_each_present_cpu(cpu) {
6723 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6724 txq_pcpu->cpu = cpu;
6725 }
6726
6727 port->txqs[queue] = txq;
6728 }
6729
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006730 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03006731 GFP_KERNEL);
6732 if (!port->rxqs) {
6733 err = -ENOMEM;
6734 goto err_free_percpu;
6735 }
6736
6737 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006738 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006739 struct mvpp2_rx_queue *rxq;
6740
6741 /* Map physical Rx queue to port's logical Rx queue */
6742 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006743 if (!rxq) {
6744 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006745 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006746 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006747 /* Map this Rx queue to a physical queue */
6748 rxq->id = port->first_rxq + queue;
6749 rxq->port = port->id;
6750 rxq->logic_rxq = queue;
6751
6752 port->rxqs[queue] = rxq;
6753 }
6754
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006755 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006756
6757 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006758 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006759 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6760
6761 rxq->size = port->rx_ring_size;
6762 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6763 rxq->time_coal = MVPP2_RX_COAL_USEC;
6764 }
6765
6766 mvpp2_ingress_disable(port);
6767
6768 /* Port default configuration */
6769 mvpp2_defaults_set(port);
6770
6771 /* Port's classifier configuration */
6772 mvpp2_cls_oversize_rxq_set(port);
6773 mvpp2_cls_port_config(port);
6774
6775 /* Provide an initial Rx packet size */
6776 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6777
6778 /* Initialize pools for swf */
6779 err = mvpp2_swf_bm_pool_init(port);
6780 if (err)
6781 goto err_free_percpu;
6782
6783 return 0;
6784
6785err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006786 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006787 if (!port->txqs[queue])
6788 continue;
6789 free_percpu(port->txqs[queue]->pcpu);
6790 }
6791 return err;
6792}
6793
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006794/* Checks if the port DT description has the TX interrupts
6795 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
6796 * there are available, but we need to keep support for old DTs.
6797 */
6798static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
6799 struct device_node *port_node)
6800{
6801 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
6802 "tx-cpu2", "tx-cpu3" };
6803 int ret, i;
6804
6805 if (priv->hw_version == MVPP21)
6806 return false;
6807
6808 for (i = 0; i < 5; i++) {
6809 ret = of_property_match_string(port_node, "interrupt-names",
6810 irqs[i]);
6811 if (ret < 0)
6812 return false;
6813 }
6814
6815 return true;
6816}
6817
Marcin Wojtas3f518502014-07-10 16:52:13 -03006818/* Ports initialization */
6819static int mvpp2_port_probe(struct platform_device *pdev,
6820 struct device_node *port_node,
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006821 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006822{
6823 struct device_node *phy_node;
6824 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006825 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006826 struct net_device *dev;
6827 struct resource *res;
6828 const char *dt_mac_addr;
6829 const char *mac_from;
6830 char hw_mac_addr[ETH_ALEN];
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006831 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006832 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006833 u32 id;
6834 int features;
6835 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006836 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006837
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006838 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
6839
6840 if (!has_tx_irqs)
6841 queue_mode = MVPP2_QDIST_SINGLE_MODE;
6842
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006843 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006844 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
6845 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
6846 else
6847 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006848
6849 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006850 if (!dev)
6851 return -ENOMEM;
6852
6853 phy_node = of_parse_phandle(port_node, "phy", 0);
6854 if (!phy_node) {
6855 dev_err(&pdev->dev, "missing phy\n");
6856 err = -ENODEV;
6857 goto err_free_netdev;
6858 }
6859
6860 phy_mode = of_get_phy_mode(port_node);
6861 if (phy_mode < 0) {
6862 dev_err(&pdev->dev, "incorrect phy mode\n");
6863 err = phy_mode;
6864 goto err_free_netdev;
6865 }
6866
6867 if (of_property_read_u32(port_node, "port-id", &id)) {
6868 err = -EINVAL;
6869 dev_err(&pdev->dev, "missing port-id value\n");
6870 goto err_free_netdev;
6871 }
6872
6873 dev->tx_queue_len = MVPP2_MAX_TXD;
6874 dev->watchdog_timeo = 5 * HZ;
6875 dev->netdev_ops = &mvpp2_netdev_ops;
6876 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6877
6878 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006879 port->dev = dev;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006880 port->ntxqs = ntxqs;
6881 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006882 port->priv = priv;
6883 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006884
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006885 err = mvpp2_queue_vectors_init(port, port_node);
6886 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006887 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006888
6889 if (of_property_read_bool(port_node, "marvell,loopback"))
6890 port->flags |= MVPP2_F_LOOPBACK;
6891
Marcin Wojtas3f518502014-07-10 16:52:13 -03006892 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006893 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006894 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006895 else
6896 port->first_rxq = port->id * priv->max_port_rxqs;
6897
Marcin Wojtas3f518502014-07-10 16:52:13 -03006898 port->phy_node = phy_node;
6899 port->phy_interface = phy_mode;
6900
Thomas Petazzonia7868412017-03-07 16:53:13 +01006901 if (priv->hw_version == MVPP21) {
6902 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6903 port->base = devm_ioremap_resource(&pdev->dev, res);
6904 if (IS_ERR(port->base)) {
6905 err = PTR_ERR(port->base);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006906 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006907 }
6908 } else {
6909 if (of_property_read_u32(port_node, "gop-port-id",
6910 &port->gop_id)) {
6911 err = -EINVAL;
6912 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006913 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006914 }
6915
6916 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006917 }
6918
6919 /* Alloc per-cpu stats */
6920 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6921 if (!port->stats) {
6922 err = -ENOMEM;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006923 goto err_deinit_qvecs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006924 }
6925
6926 dt_mac_addr = of_get_mac_address(port_node);
6927 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
6928 mac_from = "device tree";
6929 ether_addr_copy(dev->dev_addr, dt_mac_addr);
6930 } else {
Thomas Petazzoni26975822017-03-07 16:53:14 +01006931 if (priv->hw_version == MVPP21)
6932 mvpp21_get_mac_address(port, hw_mac_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006933 if (is_valid_ether_addr(hw_mac_addr)) {
6934 mac_from = "hardware";
6935 ether_addr_copy(dev->dev_addr, hw_mac_addr);
6936 } else {
6937 mac_from = "random";
6938 eth_hw_addr_random(dev);
6939 }
6940 }
6941
6942 port->tx_ring_size = MVPP2_MAX_TXD;
6943 port->rx_ring_size = MVPP2_MAX_RXD;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006944 SET_NETDEV_DEV(dev, &pdev->dev);
6945
6946 err = mvpp2_port_init(port);
6947 if (err < 0) {
6948 dev_err(&pdev->dev, "failed to init port %d\n", id);
6949 goto err_free_stats;
6950 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01006951
6952 mvpp2_port_mii_set(port);
6953 mvpp2_port_periodic_xon_disable(port);
6954
6955 if (priv->hw_version == MVPP21)
6956 mvpp2_port_fc_adv_enable(port);
6957
6958 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006959
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006960 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6961 if (!port->pcpu) {
6962 err = -ENOMEM;
6963 goto err_free_txq_pcpu;
6964 }
6965
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006966 if (!port->has_tx_irqs) {
6967 for_each_present_cpu(cpu) {
6968 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006969
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006970 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6971 HRTIMER_MODE_REL_PINNED);
6972 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6973 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006974
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006975 tasklet_init(&port_pcpu->tx_done_tasklet,
6976 mvpp2_tx_proc_cb,
6977 (unsigned long)dev);
6978 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006979 }
6980
Marcin Wojtas3f518502014-07-10 16:52:13 -03006981 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6982 dev->features = features | NETIF_F_RXCSUM;
6983 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
6984 dev->vlan_features |= features;
6985
Jarod Wilson57779872016-10-17 15:54:06 -04006986 /* MTU range: 68 - 9676 */
6987 dev->min_mtu = ETH_MIN_MTU;
6988 /* 9676 == 9700 - 20 and rounding to 8 */
6989 dev->max_mtu = 9676;
6990
Marcin Wojtas3f518502014-07-10 16:52:13 -03006991 err = register_netdev(dev);
6992 if (err < 0) {
6993 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006994 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006995 }
6996 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6997
Marcin Wojtas3f518502014-07-10 16:52:13 -03006998 priv->port_list[id] = port;
6999 return 0;
7000
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007001err_free_port_pcpu:
7002 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007003err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007004 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007005 free_percpu(port->txqs[i]->pcpu);
7006err_free_stats:
7007 free_percpu(port->stats);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007008err_deinit_qvecs:
7009 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007010err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08007011 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007012 free_netdev(dev);
7013 return err;
7014}
7015
7016/* Ports removal routine */
7017static void mvpp2_port_remove(struct mvpp2_port *port)
7018{
7019 int i;
7020
7021 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08007022 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007023 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007024 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007025 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007026 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007027 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007028 free_netdev(port->dev);
7029}
7030
7031/* Initialize decoding windows */
7032static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7033 struct mvpp2 *priv)
7034{
7035 u32 win_enable;
7036 int i;
7037
7038 for (i = 0; i < 6; i++) {
7039 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7040 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7041
7042 if (i < 4)
7043 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7044 }
7045
7046 win_enable = 0;
7047
7048 for (i = 0; i < dram->num_cs; i++) {
7049 const struct mbus_dram_window *cs = dram->cs + i;
7050
7051 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7052 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7053 dram->mbus_dram_target_id);
7054
7055 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7056 (cs->size - 1) & 0xffff0000);
7057
7058 win_enable |= (1 << i);
7059 }
7060
7061 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7062}
7063
7064/* Initialize Rx FIFO's */
7065static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7066{
7067 int port;
7068
7069 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7070 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7071 MVPP2_RX_FIFO_PORT_DATA_SIZE);
7072 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7073 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
7074 }
7075
7076 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7077 MVPP2_RX_FIFO_PORT_MIN_PKT);
7078 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7079}
7080
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01007081static void mvpp2_axi_init(struct mvpp2 *priv)
7082{
7083 u32 val, rdval, wrval;
7084
7085 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7086
7087 /* AXI Bridge Configuration */
7088
7089 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7090 << MVPP22_AXI_ATTR_CACHE_OFFS;
7091 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7092 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7093
7094 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7095 << MVPP22_AXI_ATTR_CACHE_OFFS;
7096 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7097 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7098
7099 /* BM */
7100 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7101 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7102
7103 /* Descriptors */
7104 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7105 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7106 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7107 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7108
7109 /* Buffer Data */
7110 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7111 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7112
7113 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7114 << MVPP22_AXI_CODE_CACHE_OFFS;
7115 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7116 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7117 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7118 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7119
7120 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7121 << MVPP22_AXI_CODE_CACHE_OFFS;
7122 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7123 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7124
7125 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7126
7127 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7128 << MVPP22_AXI_CODE_CACHE_OFFS;
7129 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7130 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7131
7132 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7133}
7134
Marcin Wojtas3f518502014-07-10 16:52:13 -03007135/* Initialize network controller common part HW */
7136static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7137{
7138 const struct mbus_dram_target_info *dram_target_info;
7139 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03007140 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007141
Marcin Wojtas3f518502014-07-10 16:52:13 -03007142 /* MBUS windows configuration */
7143 dram_target_info = mv_mbus_dram_info();
7144 if (dram_target_info)
7145 mvpp2_conf_mbus_windows(dram_target_info, priv);
7146
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01007147 if (priv->hw_version == MVPP22)
7148 mvpp2_axi_init(priv);
7149
Marcin Wojtas08a23752014-07-21 13:48:12 -03007150 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01007151 if (priv->hw_version == MVPP21) {
7152 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7153 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7154 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7155 } else {
7156 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7157 val &= ~MVPP22_SMI_POLLING_EN;
7158 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7159 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03007160
Marcin Wojtas3f518502014-07-10 16:52:13 -03007161 /* Allocate and initialize aggregated TXQs */
7162 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02007163 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007164 GFP_KERNEL);
7165 if (!priv->aggr_txqs)
7166 return -ENOMEM;
7167
7168 for_each_present_cpu(i) {
7169 priv->aggr_txqs[i].id = i;
7170 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7171 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
7172 MVPP2_AGGR_TXQ_SIZE, i, priv);
7173 if (err < 0)
7174 return err;
7175 }
7176
7177 /* Rx Fifo Init */
7178 mvpp2_rx_fifo_init(priv);
7179
Thomas Petazzoni26975822017-03-07 16:53:14 +01007180 if (priv->hw_version == MVPP21)
7181 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7182 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007183
7184 /* Allow cache snoop when transmiting packets */
7185 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7186
7187 /* Buffer Manager initialization */
7188 err = mvpp2_bm_init(pdev, priv);
7189 if (err < 0)
7190 return err;
7191
7192 /* Parser default initialization */
7193 err = mvpp2_prs_default_init(pdev, priv);
7194 if (err < 0)
7195 return err;
7196
7197 /* Classifier default initialization */
7198 mvpp2_cls_init(priv);
7199
7200 return 0;
7201}
7202
7203static int mvpp2_probe(struct platform_device *pdev)
7204{
7205 struct device_node *dn = pdev->dev.of_node;
7206 struct device_node *port_node;
7207 struct mvpp2 *priv;
7208 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007209 void __iomem *base;
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007210 int port_count, i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007211 int err;
7212
Markus Elfring0b92e592017-04-17 08:38:32 +02007213 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007214 if (!priv)
7215 return -ENOMEM;
7216
Thomas Petazzonifaca9242017-03-07 16:53:06 +01007217 priv->hw_version =
7218 (unsigned long)of_device_get_match_data(&pdev->dev);
7219
Marcin Wojtas3f518502014-07-10 16:52:13 -03007220 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007221 base = devm_ioremap_resource(&pdev->dev, res);
7222 if (IS_ERR(base))
7223 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007224
Thomas Petazzonia7868412017-03-07 16:53:13 +01007225 if (priv->hw_version == MVPP21) {
7226 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7227 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
7228 if (IS_ERR(priv->lms_base))
7229 return PTR_ERR(priv->lms_base);
7230 } else {
7231 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7232 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7233 if (IS_ERR(priv->iface_base))
7234 return PTR_ERR(priv->iface_base);
7235 }
7236
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007237 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01007238 u32 addr_space_sz;
7239
7240 addr_space_sz = (priv->hw_version == MVPP21 ?
7241 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007242 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007243 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007244
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007245 if (priv->hw_version == MVPP21)
7246 priv->max_port_rxqs = 8;
7247 else
7248 priv->max_port_rxqs = 32;
7249
Marcin Wojtas3f518502014-07-10 16:52:13 -03007250 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7251 if (IS_ERR(priv->pp_clk))
7252 return PTR_ERR(priv->pp_clk);
7253 err = clk_prepare_enable(priv->pp_clk);
7254 if (err < 0)
7255 return err;
7256
7257 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7258 if (IS_ERR(priv->gop_clk)) {
7259 err = PTR_ERR(priv->gop_clk);
7260 goto err_pp_clk;
7261 }
7262 err = clk_prepare_enable(priv->gop_clk);
7263 if (err < 0)
7264 goto err_pp_clk;
7265
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007266 if (priv->hw_version == MVPP22) {
7267 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7268 if (IS_ERR(priv->mg_clk)) {
7269 err = PTR_ERR(priv->mg_clk);
7270 goto err_gop_clk;
7271 }
7272
7273 err = clk_prepare_enable(priv->mg_clk);
7274 if (err < 0)
7275 goto err_gop_clk;
7276 }
7277
Marcin Wojtas3f518502014-07-10 16:52:13 -03007278 /* Get system's tclk rate */
7279 priv->tclk = clk_get_rate(priv->pp_clk);
7280
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01007281 if (priv->hw_version == MVPP22) {
7282 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
7283 if (err)
7284 goto err_mg_clk;
7285 /* Sadly, the BM pools all share the same register to
7286 * store the high 32 bits of their address. So they
7287 * must all have the same high 32 bits, which forces
7288 * us to restrict coherent memory to DMA_BIT_MASK(32).
7289 */
7290 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7291 if (err)
7292 goto err_mg_clk;
7293 }
7294
Marcin Wojtas3f518502014-07-10 16:52:13 -03007295 /* Initialize network controller */
7296 err = mvpp2_init(pdev, priv);
7297 if (err < 0) {
7298 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007299 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007300 }
7301
7302 port_count = of_get_available_child_count(dn);
7303 if (port_count == 0) {
7304 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08007305 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007306 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007307 }
7308
7309 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
Markus Elfring0b92e592017-04-17 08:38:32 +02007310 sizeof(*priv->port_list),
7311 GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007312 if (!priv->port_list) {
7313 err = -ENOMEM;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007314 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007315 }
7316
7317 /* Initialize ports */
Marcin Wojtas3f518502014-07-10 16:52:13 -03007318 for_each_available_child_of_node(dn, port_node) {
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007319 err = mvpp2_port_probe(pdev, port_node, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007320 if (err < 0)
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007321 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007322 }
7323
7324 platform_set_drvdata(pdev, priv);
7325 return 0;
7326
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007327err_mg_clk:
7328 if (priv->hw_version == MVPP22)
7329 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007330err_gop_clk:
7331 clk_disable_unprepare(priv->gop_clk);
7332err_pp_clk:
7333 clk_disable_unprepare(priv->pp_clk);
7334 return err;
7335}
7336
7337static int mvpp2_remove(struct platform_device *pdev)
7338{
7339 struct mvpp2 *priv = platform_get_drvdata(pdev);
7340 struct device_node *dn = pdev->dev.of_node;
7341 struct device_node *port_node;
7342 int i = 0;
7343
7344 for_each_available_child_of_node(dn, port_node) {
7345 if (priv->port_list[i])
7346 mvpp2_port_remove(priv->port_list[i]);
7347 i++;
7348 }
7349
7350 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
7351 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7352
7353 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7354 }
7355
7356 for_each_present_cpu(i) {
7357 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7358
7359 dma_free_coherent(&pdev->dev,
7360 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7361 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01007362 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007363 }
7364
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007365 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007366 clk_disable_unprepare(priv->pp_clk);
7367 clk_disable_unprepare(priv->gop_clk);
7368
7369 return 0;
7370}
7371
7372static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01007373 {
7374 .compatible = "marvell,armada-375-pp2",
7375 .data = (void *)MVPP21,
7376 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01007377 {
7378 .compatible = "marvell,armada-7k-pp22",
7379 .data = (void *)MVPP22,
7380 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03007381 { }
7382};
7383MODULE_DEVICE_TABLE(of, mvpp2_match);
7384
7385static struct platform_driver mvpp2_driver = {
7386 .probe = mvpp2_probe,
7387 .remove = mvpp2_remove,
7388 .driver = {
7389 .name = MVPP2_DRIVER_NAME,
7390 .of_match_table = mvpp2_match,
7391 },
7392};
7393
7394module_platform_driver(mvpp2_driver);
7395
7396MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7397MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03007398MODULE_LICENSE("GPL v2");