blob: eee878809c085c11a377f0e1ddea7a63d36303bb [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020021#include <linux/mfd/syscon.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030022#include <linux/interrupt.h>
23#include <linux/cpumask.h>
24#include <linux/of.h>
25#include <linux/of_irq.h>
26#include <linux/of_mdio.h>
27#include <linux/of_net.h>
28#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010029#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030030#include <linux/phy.h>
31#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020032#include <linux/hrtimer.h>
33#include <linux/ktime.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020034#include <linux/regmap.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030035#include <uapi/linux/ppp_defs.h>
36#include <net/ip.h>
37#include <net/ipv6.h>
38
39/* RX Fifo Registers */
40#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
41#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
42#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
43#define MVPP2_RX_FIFO_INIT_REG 0x64
44
45/* RX DMA Top Registers */
46#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
47#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
48#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
49#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
50#define MVPP2_POOL_BUF_SIZE_OFFSET 5
51#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
52#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
53#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
54#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010055#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
56#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030057#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010058#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
59#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030060#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
61#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
62#define MVPP2_RXQ_DISABLE_MASK BIT(31)
63
64/* Parser Registers */
65#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
66#define MVPP2_PRS_PORT_LU_MAX 0xf
67#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
68#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
69#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
70#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
71#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
72#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
73#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
74#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
75#define MVPP2_PRS_TCAM_IDX_REG 0x1100
76#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
77#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
78#define MVPP2_PRS_SRAM_IDX_REG 0x1200
79#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
80#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
81#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
82
83/* Classifier Registers */
84#define MVPP2_CLS_MODE_REG 0x1800
85#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
86#define MVPP2_CLS_PORT_WAY_REG 0x1810
87#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
88#define MVPP2_CLS_LKP_INDEX_REG 0x1814
89#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
90#define MVPP2_CLS_LKP_TBL_REG 0x1818
91#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
92#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
93#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
94#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
95#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
96#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
97#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
98#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
99#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
100#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
101#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
102#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
103
104/* Descriptor Manager Top Registers */
105#define MVPP2_RXQ_NUM_REG 0x2040
106#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100107#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300108#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
109#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
110#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
111#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
112#define MVPP2_RXQ_NUM_NEW_OFFSET 16
113#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
114#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
115#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
116#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
117#define MVPP2_RXQ_THRESH_REG 0x204c
118#define MVPP2_OCCUPIED_THRESH_OFFSET 0
119#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
120#define MVPP2_RXQ_INDEX_REG 0x2050
121#define MVPP2_TXQ_NUM_REG 0x2080
122#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
123#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
124#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200125#define MVPP2_TXQ_THRESH_REG 0x2094
126#define MVPP2_TXQ_THRESH_OFFSET 16
127#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300128#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300129#define MVPP2_TXQ_INDEX_REG 0x2098
130#define MVPP2_TXQ_PREF_BUF_REG 0x209c
131#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
132#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
133#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
134#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
135#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
136#define MVPP2_TXQ_PENDING_REG 0x20a0
137#define MVPP2_TXQ_PENDING_MASK 0x3fff
138#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
139#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
140#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
141#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
142#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
143#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
144#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
145#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
146#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
147#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
148#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100149#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300150#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
151#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
152#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
153#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
154#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
155
156/* MBUS bridge registers */
157#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
158#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
159#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
160#define MVPP2_BASE_ADDR_ENABLE 0x4060
161
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100162/* AXI Bridge Registers */
163#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
164#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
165#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
166#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
167#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
168#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
169#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
170#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
171#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
172#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
173#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
174#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
175
176/* Values for AXI Bridge registers */
177#define MVPP22_AXI_ATTR_CACHE_OFFS 0
178#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
179
180#define MVPP22_AXI_CODE_CACHE_OFFS 0
181#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
182
183#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
184#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
185#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
186
187#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
188#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
189
Marcin Wojtas3f518502014-07-10 16:52:13 -0300190/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200191#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
192#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
193
Marcin Wojtas3f518502014-07-10 16:52:13 -0300194#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100195#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200196#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100197
Antoine Ténart81b66302017-08-22 19:08:21 +0200198#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100199#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200200#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
201#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100202
203#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200204#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100205
Antoine Ténart81b66302017-08-22 19:08:21 +0200206#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
207#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
208#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
209#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100210
Marcin Wojtas3f518502014-07-10 16:52:13 -0300211#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
212#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
213#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
214#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
215#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
216#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200217#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300218#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
219#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
220#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
221#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
222#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
223#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
224#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
225#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
226#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
227#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
228#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
229#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
230
231/* Buffer Manager registers */
232#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
233#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
234#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
235#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
236#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
237#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
238#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
239#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
240#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
241#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
242#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
243#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
244#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
245#define MVPP2_BM_START_MASK BIT(0)
246#define MVPP2_BM_STOP_MASK BIT(1)
247#define MVPP2_BM_STATE_MASK BIT(4)
248#define MVPP2_BM_LOW_THRESH_OFFS 8
249#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
250#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
251 MVPP2_BM_LOW_THRESH_OFFS)
252#define MVPP2_BM_HIGH_THRESH_OFFS 16
253#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
254#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
255 MVPP2_BM_HIGH_THRESH_OFFS)
256#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
257#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
258#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
259#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
260#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
261#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
262#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
263#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
264#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
265#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100266#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
267#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
268#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
269#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300270#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
271#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
272#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
273#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
274#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100275#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
276#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
Antoine Ténart81b66302017-08-22 19:08:21 +0200277#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100278#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300279
280/* TX Scheduler registers */
281#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
282#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
283#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
284#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
285#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
286#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
287#define MVPP2_TXP_SCHED_MTU_REG 0x801c
288#define MVPP2_TXP_MTU_MAX 0x7FFFF
289#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
290#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
291#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
292#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
293#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
294#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
295#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
296#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
297#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
298#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
299#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
300#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
301#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
302#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
303
304/* TX general registers */
305#define MVPP2_TX_SNOOP_REG 0x8800
306#define MVPP2_TX_PORT_FLUSH_REG 0x8810
307#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
308
309/* LMS registers */
310#define MVPP2_SRC_ADDR_MIDDLE 0x24
311#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300312#define MVPP2_PHY_AN_CFG0_REG 0x34
313#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300314#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100315#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300316
317/* Per-port registers */
318#define MVPP2_GMAC_CTRL_0_REG 0x0
Antoine Ténart81b66302017-08-22 19:08:21 +0200319#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200320#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200321#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
322#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
323#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300324#define MVPP2_GMAC_CTRL_1_REG 0x4
Antoine Ténart81b66302017-08-22 19:08:21 +0200325#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
326#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
327#define MVPP2_GMAC_PCS_LB_EN_BIT 6
328#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
329#define MVPP2_GMAC_SA_LOW_OFFS 7
Marcin Wojtas3f518502014-07-10 16:52:13 -0300330#define MVPP2_GMAC_CTRL_2_REG 0x8
Antoine Ténart81b66302017-08-22 19:08:21 +0200331#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200332#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200333#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
334#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
Antoine Ténart39193572017-08-22 19:08:24 +0200335#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
Antoine Ténart81b66302017-08-22 19:08:21 +0200336#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300337#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
Antoine Ténart81b66302017-08-22 19:08:21 +0200338#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
339#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Antoine Ténart39193572017-08-22 19:08:24 +0200340#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
341#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
Antoine Ténart81b66302017-08-22 19:08:21 +0200342#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
343#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
344#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
345#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Antoine Ténart39193572017-08-22 19:08:24 +0200346#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
Antoine Ténart81b66302017-08-22 19:08:21 +0200347#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
348#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300349#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
Antoine Ténart81b66302017-08-22 19:08:21 +0200350#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
351#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
352#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
Marcin Wojtas3f518502014-07-10 16:52:13 -0300353 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100354#define MVPP22_GMAC_CTRL_4_REG 0x90
Antoine Ténart81b66302017-08-22 19:08:21 +0200355#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
356#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
Antoine Ténart1068ec72017-08-22 19:08:22 +0200357#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
Antoine Ténart81b66302017-08-22 19:08:21 +0200358#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100359
360/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
361 * relative to port->base.
362 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200363#define MVPP22_XLG_CTRL0_REG 0x100
Antoine Ténart81b66302017-08-22 19:08:21 +0200364#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
365#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
Antoine Ténart77321952017-08-22 19:08:25 +0200366#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
Antoine Ténart81b66302017-08-22 19:08:21 +0200367#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200368#define MVPP22_XLG_CTRL1_REG 0x104
369#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT BIT(0)
370#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
Thomas Petazzoni26975822017-03-07 16:53:14 +0100371#define MVPP22_XLG_CTRL3_REG 0x11c
Antoine Ténart81b66302017-08-22 19:08:21 +0200372#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
373#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
374#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100375
Antoine Ténart77321952017-08-22 19:08:25 +0200376#define MVPP22_XLG_CTRL4_REG 0x184
377#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
378#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
379#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
380
Thomas Petazzoni26975822017-03-07 16:53:14 +0100381/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
382#define MVPP22_SMI_MISC_CFG_REG 0x1204
Antoine Ténart81b66302017-08-22 19:08:21 +0200383#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300384
Thomas Petazzonia7868412017-03-07 16:53:13 +0100385#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
386
Marcin Wojtas3f518502014-07-10 16:52:13 -0300387#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
388
389/* Descriptor ring Macros */
390#define MVPP2_QUEUE_NEXT_DESC(q, index) \
391 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
392
Antoine Ténartf84bf382017-08-22 19:08:27 +0200393/* XPCS registers. PPv2.2 only */
394#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
395#define MVPP22_MPCS_CTRL 0x14
396#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
397#define MVPP22_MPCS_CLK_RESET 0x14c
398#define MAC_CLK_RESET_SD_TX BIT(0)
399#define MAC_CLK_RESET_SD_RX BIT(1)
400#define MAC_CLK_RESET_MAC BIT(2)
401#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
402#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
403
404/* XPCS registers. PPv2.2 only */
405#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
406#define MVPP22_XPCS_CFG0 0x0
407#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
408#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
409
410/* System controller registers. Accessed through a regmap. */
411#define GENCONF_SOFT_RESET1 0x1108
412#define GENCONF_SOFT_RESET1_GOP BIT(6)
413#define GENCONF_PORT_CTRL0 0x1110
414#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
415#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
416#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
417#define GENCONF_PORT_CTRL1 0x1114
418#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
419#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
420#define GENCONF_CTRL0 0x1120
421#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
422#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
423#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
424
Marcin Wojtas3f518502014-07-10 16:52:13 -0300425/* Various constants */
426
427/* Coalescing */
428#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200429#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200430#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300431#define MVPP2_RX_COAL_PKTS 32
432#define MVPP2_RX_COAL_USEC 100
433
434/* The two bytes Marvell header. Either contains a special value used
435 * by Marvell switches when a specific hardware mode is enabled (not
436 * supported by this driver) or is filled automatically by zeroes on
437 * the RX side. Those two bytes being at the front of the Ethernet
438 * header, they allow to have the IP header aligned on a 4 bytes
439 * boundary automatically: the hardware skips those two bytes on its
440 * own.
441 */
442#define MVPP2_MH_SIZE 2
443#define MVPP2_ETH_TYPE_LEN 2
444#define MVPP2_PPPOE_HDR_SIZE 8
445#define MVPP2_VLAN_TAG_LEN 4
446
447/* Lbtd 802.3 type */
448#define MVPP2_IP_LBDT_TYPE 0xfffa
449
Marcin Wojtas3f518502014-07-10 16:52:13 -0300450#define MVPP2_TX_CSUM_MAX_SIZE 9800
451
452/* Timeout constants */
453#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
454#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
455
456#define MVPP2_TX_MTU_MAX 0x7ffff
457
458/* Maximum number of T-CONTs of PON port */
459#define MVPP2_MAX_TCONT 16
460
461/* Maximum number of supported ports */
462#define MVPP2_MAX_PORTS 4
463
464/* Maximum number of TXQs used by single port */
465#define MVPP2_MAX_TXQ 8
466
Marcin Wojtas3f518502014-07-10 16:52:13 -0300467/* Dfault number of RXQs in use */
468#define MVPP2_DEFAULT_RXQ 4
469
Marcin Wojtas3f518502014-07-10 16:52:13 -0300470/* Max number of Rx descriptors */
471#define MVPP2_MAX_RXD 128
472
473/* Max number of Tx descriptors */
474#define MVPP2_MAX_TXD 1024
475
476/* Amount of Tx descriptors that can be reserved at once by CPU */
477#define MVPP2_CPU_DESC_CHUNK 64
478
479/* Max number of Tx descriptors in each aggregated queue */
480#define MVPP2_AGGR_TXQ_SIZE 256
481
482/* Descriptor aligned size */
483#define MVPP2_DESC_ALIGNED_SIZE 32
484
485/* Descriptor alignment mask */
486#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
487
488/* RX FIFO constants */
489#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
490#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
491#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
492
493/* RX buffer constants */
494#define MVPP2_SKB_SHINFO_SIZE \
495 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
496
497#define MVPP2_RX_PKT_SIZE(mtu) \
498 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800499 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300500
501#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
502#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
503#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
504 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
505
506#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
507
508/* IPv6 max L3 address size */
509#define MVPP2_MAX_L3_ADDR_SIZE 16
510
511/* Port flags */
512#define MVPP2_F_LOOPBACK BIT(0)
513
514/* Marvell tag types */
515enum mvpp2_tag_type {
516 MVPP2_TAG_TYPE_NONE = 0,
517 MVPP2_TAG_TYPE_MH = 1,
518 MVPP2_TAG_TYPE_DSA = 2,
519 MVPP2_TAG_TYPE_EDSA = 3,
520 MVPP2_TAG_TYPE_VLAN = 4,
521 MVPP2_TAG_TYPE_LAST = 5
522};
523
524/* Parser constants */
525#define MVPP2_PRS_TCAM_SRAM_SIZE 256
526#define MVPP2_PRS_TCAM_WORDS 6
527#define MVPP2_PRS_SRAM_WORDS 4
528#define MVPP2_PRS_FLOW_ID_SIZE 64
529#define MVPP2_PRS_FLOW_ID_MASK 0x3f
530#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
531#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
532#define MVPP2_PRS_IPV4_HEAD 0x40
533#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
534#define MVPP2_PRS_IPV4_MC 0xe0
535#define MVPP2_PRS_IPV4_MC_MASK 0xf0
536#define MVPP2_PRS_IPV4_BC_MASK 0xff
537#define MVPP2_PRS_IPV4_IHL 0x5
538#define MVPP2_PRS_IPV4_IHL_MASK 0xf
539#define MVPP2_PRS_IPV6_MC 0xff
540#define MVPP2_PRS_IPV6_MC_MASK 0xff
541#define MVPP2_PRS_IPV6_HOP_MASK 0xff
542#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
543#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
544#define MVPP2_PRS_DBL_VLANS_MAX 100
545
546/* Tcam structure:
547 * - lookup ID - 4 bits
548 * - port ID - 1 byte
549 * - additional information - 1 byte
550 * - header data - 8 bytes
551 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
552 */
553#define MVPP2_PRS_AI_BITS 8
554#define MVPP2_PRS_PORT_MASK 0xff
555#define MVPP2_PRS_LU_MASK 0xf
556#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
557 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
558#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
559 (((offs) * 2) - ((offs) % 2) + 2)
560#define MVPP2_PRS_TCAM_AI_BYTE 16
561#define MVPP2_PRS_TCAM_PORT_BYTE 17
562#define MVPP2_PRS_TCAM_LU_BYTE 20
563#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
564#define MVPP2_PRS_TCAM_INV_WORD 5
565/* Tcam entries ID */
566#define MVPP2_PE_DROP_ALL 0
567#define MVPP2_PE_FIRST_FREE_TID 1
568#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
569#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
570#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
571#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
572#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
573#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
574#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
575#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
576#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
577#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
578#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
579#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
580#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
581#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
582#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
583#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
584#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
585#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
586#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
587#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
588#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
589#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
590#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
591#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
592#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
593
594/* Sram structure
595 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
596 */
597#define MVPP2_PRS_SRAM_RI_OFFS 0
598#define MVPP2_PRS_SRAM_RI_WORD 0
599#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
600#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
601#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
602#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
603#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
604#define MVPP2_PRS_SRAM_UDF_OFFS 73
605#define MVPP2_PRS_SRAM_UDF_BITS 8
606#define MVPP2_PRS_SRAM_UDF_MASK 0xff
607#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
608#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
609#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
610#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
611#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
612#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
613#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
614#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
615#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
616#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
617#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
618#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
619#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
620#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
621#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
622#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
623#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
624#define MVPP2_PRS_SRAM_AI_OFFS 90
625#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
626#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
627#define MVPP2_PRS_SRAM_AI_MASK 0xff
628#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
629#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
630#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
631#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
632
633/* Sram result info bits assignment */
634#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
635#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100636#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
637#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300638#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
639#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
640#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
641#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
642#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100643#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
644#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300645#define MVPP2_PRS_RI_L2_MCAST BIT(9)
646#define MVPP2_PRS_RI_L2_BCAST BIT(10)
647#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100648#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
649#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300650#define MVPP2_PRS_RI_L3_IP4 BIT(12)
651#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
652#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
653#define MVPP2_PRS_RI_L3_IP6 BIT(14)
654#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
655#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100656#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
657#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300658#define MVPP2_PRS_RI_L3_MCAST BIT(15)
659#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
660#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
661#define MVPP2_PRS_RI_UDF3_MASK 0x300000
662#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
663#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
664#define MVPP2_PRS_RI_L4_TCP BIT(22)
665#define MVPP2_PRS_RI_L4_UDP BIT(23)
666#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
667#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
668#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
669#define MVPP2_PRS_RI_DROP_MASK 0x80000000
670
671/* Sram additional info bits assignment */
672#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
673#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
674#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
675#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
676#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
677#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
678#define MVPP2_PRS_SINGLE_VLAN_AI 0
679#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
680
681/* DSA/EDSA type */
682#define MVPP2_PRS_TAGGED true
683#define MVPP2_PRS_UNTAGGED false
684#define MVPP2_PRS_EDSA true
685#define MVPP2_PRS_DSA false
686
687/* MAC entries, shadow udf */
688enum mvpp2_prs_udf {
689 MVPP2_PRS_UDF_MAC_DEF,
690 MVPP2_PRS_UDF_MAC_RANGE,
691 MVPP2_PRS_UDF_L2_DEF,
692 MVPP2_PRS_UDF_L2_DEF_COPY,
693 MVPP2_PRS_UDF_L2_USER,
694};
695
696/* Lookup ID */
697enum mvpp2_prs_lookup {
698 MVPP2_PRS_LU_MH,
699 MVPP2_PRS_LU_MAC,
700 MVPP2_PRS_LU_DSA,
701 MVPP2_PRS_LU_VLAN,
702 MVPP2_PRS_LU_L2,
703 MVPP2_PRS_LU_PPPOE,
704 MVPP2_PRS_LU_IP4,
705 MVPP2_PRS_LU_IP6,
706 MVPP2_PRS_LU_FLOWS,
707 MVPP2_PRS_LU_LAST,
708};
709
710/* L3 cast enum */
711enum mvpp2_prs_l3_cast {
712 MVPP2_PRS_L3_UNI_CAST,
713 MVPP2_PRS_L3_MULTI_CAST,
714 MVPP2_PRS_L3_BROAD_CAST
715};
716
717/* Classifier constants */
718#define MVPP2_CLS_FLOWS_TBL_SIZE 512
719#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
720#define MVPP2_CLS_LKP_TBL_SIZE 64
721
722/* BM constants */
723#define MVPP2_BM_POOLS_NUM 8
724#define MVPP2_BM_LONG_BUF_NUM 1024
725#define MVPP2_BM_SHORT_BUF_NUM 2048
726#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
727#define MVPP2_BM_POOL_PTR_ALIGN 128
728#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
729#define MVPP2_BM_SWF_SHORT_POOL 3
730
731/* BM cookie (32 bits) definition */
732#define MVPP2_BM_COOKIE_POOL_OFFS 8
733#define MVPP2_BM_COOKIE_CPU_OFFS 24
734
735/* BM short pool packet size
736 * These value assure that for SWF the total number
737 * of bytes allocated for each buffer will be 512
738 */
739#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
740
Thomas Petazzonia7868412017-03-07 16:53:13 +0100741#define MVPP21_ADDR_SPACE_SZ 0
742#define MVPP22_ADDR_SPACE_SZ SZ_64K
743
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200744#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200745#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100746
Marcin Wojtas3f518502014-07-10 16:52:13 -0300747enum mvpp2_bm_type {
748 MVPP2_BM_FREE,
749 MVPP2_BM_SWF_LONG,
750 MVPP2_BM_SWF_SHORT
751};
752
753/* Definitions */
754
755/* Shared Packet Processor resources */
756struct mvpp2 {
757 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300758 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100759 void __iomem *iface_base;
760
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200761 /* On PPv2.2, each "software thread" can access the base
762 * register through a separate address space, each 64 KB apart
763 * from each other. Typically, such address spaces will be
764 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100765 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200766 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300767
Antoine Ténartf84bf382017-08-22 19:08:27 +0200768 /* On PPv2.2, some port control registers are located into the system
769 * controller space. These registers are accessible through a regmap.
770 */
771 struct regmap *sysctrl_base;
772
Marcin Wojtas3f518502014-07-10 16:52:13 -0300773 /* Common clocks */
774 struct clk *pp_clk;
775 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100776 struct clk *mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300777
778 /* List of pointers to port structures */
779 struct mvpp2_port **port_list;
780
781 /* Aggregated TXQs */
782 struct mvpp2_tx_queue *aggr_txqs;
783
784 /* BM pools */
785 struct mvpp2_bm_pool *bm_pools;
786
787 /* PRS shadow table */
788 struct mvpp2_prs_shadow *prs_shadow;
789 /* PRS auxiliary table for double vlan entries control */
790 bool *prs_double_vlans;
791
792 /* Tclk value */
793 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100794
795 /* HW version */
796 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100797
798 /* Maximum number of RXQs per port */
799 unsigned int max_port_rxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300800};
801
802struct mvpp2_pcpu_stats {
803 struct u64_stats_sync syncp;
804 u64 rx_packets;
805 u64 rx_bytes;
806 u64 tx_packets;
807 u64 tx_bytes;
808};
809
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200810/* Per-CPU port control */
811struct mvpp2_port_pcpu {
812 struct hrtimer tx_done_timer;
813 bool timer_scheduled;
814 /* Tasklet for egress finalization */
815 struct tasklet_struct tx_done_tasklet;
816};
817
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200818struct mvpp2_queue_vector {
819 int irq;
820 struct napi_struct napi;
821 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
822 int sw_thread_id;
823 u16 sw_thread_mask;
824 int first_rxq;
825 int nrxqs;
826 u32 pending_cause_rx;
827 struct mvpp2_port *port;
828};
829
Marcin Wojtas3f518502014-07-10 16:52:13 -0300830struct mvpp2_port {
831 u8 id;
832
Thomas Petazzonia7868412017-03-07 16:53:13 +0100833 /* Index of the port from the "group of ports" complex point
834 * of view
835 */
836 int gop_id;
837
Marcin Wojtas3f518502014-07-10 16:52:13 -0300838 struct mvpp2 *priv;
839
840 /* Per-port registers' base address */
841 void __iomem *base;
842
843 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200844 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300845 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200846 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300847 struct net_device *dev;
848
849 int pkt_size;
850
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200851 /* Per-CPU port control */
852 struct mvpp2_port_pcpu __percpu *pcpu;
853
Marcin Wojtas3f518502014-07-10 16:52:13 -0300854 /* Flags */
855 unsigned long flags;
856
857 u16 tx_ring_size;
858 u16 rx_ring_size;
859 struct mvpp2_pcpu_stats __percpu *stats;
860
Marcin Wojtas3f518502014-07-10 16:52:13 -0300861 phy_interface_t phy_interface;
862 struct device_node *phy_node;
863 unsigned int link;
864 unsigned int duplex;
865 unsigned int speed;
866
867 struct mvpp2_bm_pool *pool_long;
868 struct mvpp2_bm_pool *pool_short;
869
870 /* Index of first port's physical RXQ */
871 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200872
873 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
874 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200875 bool has_tx_irqs;
876
877 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300878};
879
880/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
881 * layout of the transmit and reception DMA descriptors, and their
882 * layout is therefore defined by the hardware design
883 */
884
885#define MVPP2_TXD_L3_OFF_SHIFT 0
886#define MVPP2_TXD_IP_HLEN_SHIFT 8
887#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
888#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
889#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
890#define MVPP2_TXD_PADDING_DISABLE BIT(23)
891#define MVPP2_TXD_L4_UDP BIT(24)
892#define MVPP2_TXD_L3_IP6 BIT(26)
893#define MVPP2_TXD_L_DESC BIT(28)
894#define MVPP2_TXD_F_DESC BIT(29)
895
896#define MVPP2_RXD_ERR_SUMMARY BIT(15)
897#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
898#define MVPP2_RXD_ERR_CRC 0x0
899#define MVPP2_RXD_ERR_OVERRUN BIT(13)
900#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
901#define MVPP2_RXD_BM_POOL_ID_OFFS 16
902#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
903#define MVPP2_RXD_HWF_SYNC BIT(21)
904#define MVPP2_RXD_L4_CSUM_OK BIT(22)
905#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
906#define MVPP2_RXD_L4_TCP BIT(25)
907#define MVPP2_RXD_L4_UDP BIT(26)
908#define MVPP2_RXD_L3_IP4 BIT(28)
909#define MVPP2_RXD_L3_IP6 BIT(30)
910#define MVPP2_RXD_BUF_HDR BIT(31)
911
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100912/* HW TX descriptor for PPv2.1 */
913struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300914 u32 command; /* Options used by HW for packet transmitting.*/
915 u8 packet_offset; /* the offset from the buffer beginning */
916 u8 phys_txq; /* destination queue ID */
917 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100918 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300919 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
920 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
921 u32 reserved2; /* reserved (for future use) */
922};
923
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100924/* HW RX descriptor for PPv2.1 */
925struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300926 u32 status; /* info about received packet */
927 u16 reserved1; /* parser_info (for future use, PnC) */
928 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100929 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300930 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
931 u16 reserved2; /* gem_port_id (for future use, PON) */
932 u16 reserved3; /* csum_l4 (for future use, PnC) */
933 u8 reserved4; /* bm_qset (for future use, BM) */
934 u8 reserved5;
935 u16 reserved6; /* classify_info (for future use, PnC) */
936 u32 reserved7; /* flow_id (for future use, PnC) */
937 u32 reserved8;
938};
939
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100940/* HW TX descriptor for PPv2.2 */
941struct mvpp22_tx_desc {
942 u32 command;
943 u8 packet_offset;
944 u8 phys_txq;
945 u16 data_size;
946 u64 reserved1;
947 u64 buf_dma_addr_ptp;
948 u64 buf_cookie_misc;
949};
950
951/* HW RX descriptor for PPv2.2 */
952struct mvpp22_rx_desc {
953 u32 status;
954 u16 reserved1;
955 u16 data_size;
956 u32 reserved2;
957 u32 reserved3;
958 u64 buf_dma_addr_key_hash;
959 u64 buf_cookie_misc;
960};
961
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100962/* Opaque type used by the driver to manipulate the HW TX and RX
963 * descriptors
964 */
965struct mvpp2_tx_desc {
966 union {
967 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100968 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100969 };
970};
971
972struct mvpp2_rx_desc {
973 union {
974 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100975 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100976 };
977};
978
Thomas Petazzoni83544912016-12-21 11:28:49 +0100979struct mvpp2_txq_pcpu_buf {
980 /* Transmitted SKB */
981 struct sk_buff *skb;
982
983 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100984 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +0100985
986 /* Size transmitted */
987 size_t size;
988};
989
Marcin Wojtas3f518502014-07-10 16:52:13 -0300990/* Per-CPU Tx queue control */
991struct mvpp2_txq_pcpu {
992 int cpu;
993
994 /* Number of Tx DMA descriptors in the descriptor ring */
995 int size;
996
997 /* Number of currently used Tx DMA descriptor in the
998 * descriptor ring
999 */
1000 int count;
1001
1002 /* Number of Tx DMA descriptors reserved for each CPU */
1003 int reserved_num;
1004
Thomas Petazzoni83544912016-12-21 11:28:49 +01001005 /* Infos about transmitted buffers */
1006 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001007
Marcin Wojtas3f518502014-07-10 16:52:13 -03001008 /* Index of last TX DMA descriptor that was inserted */
1009 int txq_put_index;
1010
1011 /* Index of the TX DMA descriptor to be cleaned up */
1012 int txq_get_index;
1013};
1014
1015struct mvpp2_tx_queue {
1016 /* Physical number of this Tx queue */
1017 u8 id;
1018
1019 /* Logical number of this Tx queue */
1020 u8 log_id;
1021
1022 /* Number of Tx DMA descriptors in the descriptor ring */
1023 int size;
1024
1025 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1026 int count;
1027
1028 /* Per-CPU control of physical Tx queues */
1029 struct mvpp2_txq_pcpu __percpu *pcpu;
1030
Marcin Wojtas3f518502014-07-10 16:52:13 -03001031 u32 done_pkts_coal;
1032
1033 /* Virtual address of thex Tx DMA descriptors array */
1034 struct mvpp2_tx_desc *descs;
1035
1036 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001037 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001038
1039 /* Index of the last Tx DMA descriptor */
1040 int last_desc;
1041
1042 /* Index of the next Tx DMA descriptor to process */
1043 int next_desc_to_proc;
1044};
1045
1046struct mvpp2_rx_queue {
1047 /* RX queue number, in the range 0-31 for physical RXQs */
1048 u8 id;
1049
1050 /* Num of rx descriptors in the rx descriptor ring */
1051 int size;
1052
1053 u32 pkts_coal;
1054 u32 time_coal;
1055
1056 /* Virtual address of the RX DMA descriptors array */
1057 struct mvpp2_rx_desc *descs;
1058
1059 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001060 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001061
1062 /* Index of the last RX DMA descriptor */
1063 int last_desc;
1064
1065 /* Index of the next RX DMA descriptor to process */
1066 int next_desc_to_proc;
1067
1068 /* ID of port to which physical RXQ is mapped */
1069 int port;
1070
1071 /* Port's logic RXQ number to which physical RXQ is mapped */
1072 int logic_rxq;
1073};
1074
1075union mvpp2_prs_tcam_entry {
1076 u32 word[MVPP2_PRS_TCAM_WORDS];
1077 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1078};
1079
1080union mvpp2_prs_sram_entry {
1081 u32 word[MVPP2_PRS_SRAM_WORDS];
1082 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1083};
1084
1085struct mvpp2_prs_entry {
1086 u32 index;
1087 union mvpp2_prs_tcam_entry tcam;
1088 union mvpp2_prs_sram_entry sram;
1089};
1090
1091struct mvpp2_prs_shadow {
1092 bool valid;
1093 bool finish;
1094
1095 /* Lookup ID */
1096 int lu;
1097
1098 /* User defined offset */
1099 int udf;
1100
1101 /* Result info */
1102 u32 ri;
1103 u32 ri_mask;
1104};
1105
1106struct mvpp2_cls_flow_entry {
1107 u32 index;
1108 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1109};
1110
1111struct mvpp2_cls_lookup_entry {
1112 u32 lkpid;
1113 u32 way;
1114 u32 data;
1115};
1116
1117struct mvpp2_bm_pool {
1118 /* Pool number in the range 0-7 */
1119 int id;
1120 enum mvpp2_bm_type type;
1121
1122 /* Buffer Pointers Pool External (BPPE) size */
1123 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001124 /* BPPE size in bytes */
1125 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001126 /* Number of buffers for this pool */
1127 int buf_num;
1128 /* Pool buffer size */
1129 int buf_size;
1130 /* Packet size */
1131 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001132 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001133
1134 /* BPPE virtual base address */
1135 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001136 /* BPPE DMA base address */
1137 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001138
1139 /* Ports using BM pool */
1140 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001141};
1142
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001143/* Queue modes */
1144#define MVPP2_QDIST_SINGLE_MODE 0
1145#define MVPP2_QDIST_MULTI_MODE 1
1146
1147static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1148
1149module_param(queue_mode, int, 0444);
1150MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1151
Marcin Wojtas3f518502014-07-10 16:52:13 -03001152#define MVPP2_DRIVER_NAME "mvpp2"
1153#define MVPP2_DRIVER_VERSION "1.0"
1154
1155/* Utility/helper methods */
1156
1157static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1158{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001159 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001160}
1161
1162static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1163{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001164 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001165}
1166
1167/* These accessors should be used to access:
1168 *
1169 * - per-CPU registers, where each CPU has its own copy of the
1170 * register.
1171 *
1172 * MVPP2_BM_VIRT_ALLOC_REG
1173 * MVPP2_BM_ADDR_HIGH_ALLOC
1174 * MVPP22_BM_ADDR_HIGH_RLS_REG
1175 * MVPP2_BM_VIRT_RLS_REG
1176 * MVPP2_ISR_RX_TX_CAUSE_REG
1177 * MVPP2_ISR_RX_TX_MASK_REG
1178 * MVPP2_TXQ_NUM_REG
1179 * MVPP2_AGGR_TXQ_UPDATE_REG
1180 * MVPP2_TXQ_RSVD_REQ_REG
1181 * MVPP2_TXQ_RSVD_RSLT_REG
1182 * MVPP2_TXQ_SENT_REG
1183 * MVPP2_RXQ_NUM_REG
1184 *
1185 * - global registers that must be accessed through a specific CPU
1186 * window, because they are related to an access to a per-CPU
1187 * register
1188 *
1189 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1190 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1191 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1192 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1193 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1194 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1195 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1196 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1197 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1198 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1199 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1200 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1201 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1202 */
1203static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1204 u32 offset, u32 data)
1205{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001206 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001207}
1208
1209static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1210 u32 offset)
1211{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001212 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001213}
1214
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001215static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1216 struct mvpp2_tx_desc *tx_desc)
1217{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001218 if (port->priv->hw_version == MVPP21)
1219 return tx_desc->pp21.buf_dma_addr;
1220 else
1221 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001222}
1223
1224static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1225 struct mvpp2_tx_desc *tx_desc,
1226 dma_addr_t dma_addr)
1227{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001228 if (port->priv->hw_version == MVPP21) {
1229 tx_desc->pp21.buf_dma_addr = dma_addr;
1230 } else {
1231 u64 val = (u64)dma_addr;
1232
1233 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1234 tx_desc->pp22.buf_dma_addr_ptp |= val;
1235 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001236}
1237
1238static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1239 struct mvpp2_tx_desc *tx_desc)
1240{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001241 if (port->priv->hw_version == MVPP21)
1242 return tx_desc->pp21.data_size;
1243 else
1244 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001245}
1246
1247static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1248 struct mvpp2_tx_desc *tx_desc,
1249 size_t size)
1250{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001251 if (port->priv->hw_version == MVPP21)
1252 tx_desc->pp21.data_size = size;
1253 else
1254 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001255}
1256
1257static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1258 struct mvpp2_tx_desc *tx_desc,
1259 unsigned int txq)
1260{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001261 if (port->priv->hw_version == MVPP21)
1262 tx_desc->pp21.phys_txq = txq;
1263 else
1264 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001265}
1266
1267static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1268 struct mvpp2_tx_desc *tx_desc,
1269 unsigned int command)
1270{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001271 if (port->priv->hw_version == MVPP21)
1272 tx_desc->pp21.command = command;
1273 else
1274 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001275}
1276
1277static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1278 struct mvpp2_tx_desc *tx_desc,
1279 unsigned int offset)
1280{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001281 if (port->priv->hw_version == MVPP21)
1282 tx_desc->pp21.packet_offset = offset;
1283 else
1284 tx_desc->pp22.packet_offset = offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001285}
1286
1287static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1288 struct mvpp2_tx_desc *tx_desc)
1289{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001290 if (port->priv->hw_version == MVPP21)
1291 return tx_desc->pp21.packet_offset;
1292 else
1293 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001294}
1295
1296static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1297 struct mvpp2_rx_desc *rx_desc)
1298{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001299 if (port->priv->hw_version == MVPP21)
1300 return rx_desc->pp21.buf_dma_addr;
1301 else
1302 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001303}
1304
1305static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1306 struct mvpp2_rx_desc *rx_desc)
1307{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001308 if (port->priv->hw_version == MVPP21)
1309 return rx_desc->pp21.buf_cookie;
1310 else
1311 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001312}
1313
1314static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1315 struct mvpp2_rx_desc *rx_desc)
1316{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001317 if (port->priv->hw_version == MVPP21)
1318 return rx_desc->pp21.data_size;
1319 else
1320 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001321}
1322
1323static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1324 struct mvpp2_rx_desc *rx_desc)
1325{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001326 if (port->priv->hw_version == MVPP21)
1327 return rx_desc->pp21.status;
1328 else
1329 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001330}
1331
Marcin Wojtas3f518502014-07-10 16:52:13 -03001332static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1333{
1334 txq_pcpu->txq_get_index++;
1335 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1336 txq_pcpu->txq_get_index = 0;
1337}
1338
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001339static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1340 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001341 struct sk_buff *skb,
1342 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001343{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001344 struct mvpp2_txq_pcpu_buf *tx_buf =
1345 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1346 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001347 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1348 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1349 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001350 txq_pcpu->txq_put_index++;
1351 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1352 txq_pcpu->txq_put_index = 0;
1353}
1354
1355/* Get number of physical egress port */
1356static inline int mvpp2_egress_port(struct mvpp2_port *port)
1357{
1358 return MVPP2_MAX_TCONT + port->id;
1359}
1360
1361/* Get number of physical TXQ */
1362static inline int mvpp2_txq_phys(int port, int txq)
1363{
1364 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1365}
1366
1367/* Parser configuration routines */
1368
1369/* Update parser tcam and sram hw entries */
1370static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1371{
1372 int i;
1373
1374 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1375 return -EINVAL;
1376
1377 /* Clear entry invalidation bit */
1378 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1379
1380 /* Write tcam index - indirect access */
1381 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1382 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1383 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1384
1385 /* Write sram index - indirect access */
1386 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1387 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1388 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1389
1390 return 0;
1391}
1392
1393/* Read tcam entry from hw */
1394static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1395{
1396 int i;
1397
1398 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1399 return -EINVAL;
1400
1401 /* Write tcam index - indirect access */
1402 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1403
1404 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1405 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1406 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1407 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1408
1409 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1410 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1411
1412 /* Write sram index - indirect access */
1413 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1414 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1415 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1416
1417 return 0;
1418}
1419
1420/* Invalidate tcam hw entry */
1421static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1422{
1423 /* Write index - indirect access */
1424 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1425 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1426 MVPP2_PRS_TCAM_INV_MASK);
1427}
1428
1429/* Enable shadow table entry and set its lookup ID */
1430static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1431{
1432 priv->prs_shadow[index].valid = true;
1433 priv->prs_shadow[index].lu = lu;
1434}
1435
1436/* Update ri fields in shadow table entry */
1437static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1438 unsigned int ri, unsigned int ri_mask)
1439{
1440 priv->prs_shadow[index].ri_mask = ri_mask;
1441 priv->prs_shadow[index].ri = ri;
1442}
1443
1444/* Update lookup field in tcam sw entry */
1445static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1446{
1447 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1448
1449 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1450 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1451}
1452
1453/* Update mask for single port in tcam sw entry */
1454static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1455 unsigned int port, bool add)
1456{
1457 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1458
1459 if (add)
1460 pe->tcam.byte[enable_off] &= ~(1 << port);
1461 else
1462 pe->tcam.byte[enable_off] |= 1 << port;
1463}
1464
1465/* Update port map in tcam sw entry */
1466static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1467 unsigned int ports)
1468{
1469 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1470 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1471
1472 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1473 pe->tcam.byte[enable_off] &= ~port_mask;
1474 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1475}
1476
1477/* Obtain port map from tcam sw entry */
1478static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1479{
1480 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1481
1482 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1483}
1484
1485/* Set byte of data and its enable bits in tcam sw entry */
1486static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1487 unsigned int offs, unsigned char byte,
1488 unsigned char enable)
1489{
1490 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1491 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1492}
1493
1494/* Get byte of data and its enable bits from tcam sw entry */
1495static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1496 unsigned int offs, unsigned char *byte,
1497 unsigned char *enable)
1498{
1499 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1500 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1501}
1502
1503/* Compare tcam data bytes with a pattern */
1504static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1505 u16 data)
1506{
1507 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1508 u16 tcam_data;
1509
1510 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1511 if (tcam_data != data)
1512 return false;
1513 return true;
1514}
1515
1516/* Update ai bits in tcam sw entry */
1517static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1518 unsigned int bits, unsigned int enable)
1519{
1520 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1521
1522 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1523
1524 if (!(enable & BIT(i)))
1525 continue;
1526
1527 if (bits & BIT(i))
1528 pe->tcam.byte[ai_idx] |= 1 << i;
1529 else
1530 pe->tcam.byte[ai_idx] &= ~(1 << i);
1531 }
1532
1533 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1534}
1535
1536/* Get ai bits from tcam sw entry */
1537static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1538{
1539 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1540}
1541
1542/* Set ethertype in tcam sw entry */
1543static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1544 unsigned short ethertype)
1545{
1546 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1547 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1548}
1549
1550/* Set bits in sram sw entry */
1551static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1552 int val)
1553{
1554 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1555}
1556
1557/* Clear bits in sram sw entry */
1558static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1559 int val)
1560{
1561 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1562}
1563
1564/* Update ri bits in sram sw entry */
1565static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1566 unsigned int bits, unsigned int mask)
1567{
1568 unsigned int i;
1569
1570 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1571 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1572
1573 if (!(mask & BIT(i)))
1574 continue;
1575
1576 if (bits & BIT(i))
1577 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1578 else
1579 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1580
1581 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1582 }
1583}
1584
1585/* Obtain ri bits from sram sw entry */
1586static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1587{
1588 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1589}
1590
1591/* Update ai bits in sram sw entry */
1592static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1593 unsigned int bits, unsigned int mask)
1594{
1595 unsigned int i;
1596 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1597
1598 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1599
1600 if (!(mask & BIT(i)))
1601 continue;
1602
1603 if (bits & BIT(i))
1604 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1605 else
1606 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1607
1608 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1609 }
1610}
1611
1612/* Read ai bits from sram sw entry */
1613static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1614{
1615 u8 bits;
1616 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1617 int ai_en_off = ai_off + 1;
1618 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1619
1620 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1621 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1622
1623 return bits;
1624}
1625
1626/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1627 * lookup interation
1628 */
1629static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1630 unsigned int lu)
1631{
1632 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1633
1634 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1635 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1636 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1637}
1638
1639/* In the sram sw entry set sign and value of the next lookup offset
1640 * and the offset value generated to the classifier
1641 */
1642static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1643 unsigned int op)
1644{
1645 /* Set sign */
1646 if (shift < 0) {
1647 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1648 shift = 0 - shift;
1649 } else {
1650 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1651 }
1652
1653 /* Set value */
1654 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1655 (unsigned char)shift;
1656
1657 /* Reset and set operation */
1658 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1659 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1660 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1661
1662 /* Set base offset as current */
1663 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1664}
1665
1666/* In the sram sw entry set sign and value of the user defined offset
1667 * generated to the classifier
1668 */
1669static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1670 unsigned int type, int offset,
1671 unsigned int op)
1672{
1673 /* Set sign */
1674 if (offset < 0) {
1675 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1676 offset = 0 - offset;
1677 } else {
1678 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1679 }
1680
1681 /* Set value */
1682 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1683 MVPP2_PRS_SRAM_UDF_MASK);
1684 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1685 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1686 MVPP2_PRS_SRAM_UDF_BITS)] &=
1687 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1688 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1689 MVPP2_PRS_SRAM_UDF_BITS)] |=
1690 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1691
1692 /* Set offset type */
1693 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1694 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1695 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1696
1697 /* Set offset operation */
1698 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1699 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1700 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1701
1702 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1703 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1704 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1705 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1706
1707 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1708 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1709 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1710
1711 /* Set base offset as current */
1712 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1713}
1714
1715/* Find parser flow entry */
1716static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1717{
1718 struct mvpp2_prs_entry *pe;
1719 int tid;
1720
1721 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1722 if (!pe)
1723 return NULL;
1724 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1725
1726 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1727 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1728 u8 bits;
1729
1730 if (!priv->prs_shadow[tid].valid ||
1731 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1732 continue;
1733
1734 pe->index = tid;
1735 mvpp2_prs_hw_read(priv, pe);
1736 bits = mvpp2_prs_sram_ai_get(pe);
1737
1738 /* Sram store classification lookup ID in AI bits [5:0] */
1739 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1740 return pe;
1741 }
1742 kfree(pe);
1743
1744 return NULL;
1745}
1746
1747/* Return first free tcam index, seeking from start to end */
1748static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1749 unsigned char end)
1750{
1751 int tid;
1752
1753 if (start > end)
1754 swap(start, end);
1755
1756 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1757 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1758
1759 for (tid = start; tid <= end; tid++) {
1760 if (!priv->prs_shadow[tid].valid)
1761 return tid;
1762 }
1763
1764 return -EINVAL;
1765}
1766
1767/* Enable/disable dropping all mac da's */
1768static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1769{
1770 struct mvpp2_prs_entry pe;
1771
1772 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1773 /* Entry exist - update port only */
1774 pe.index = MVPP2_PE_DROP_ALL;
1775 mvpp2_prs_hw_read(priv, &pe);
1776 } else {
1777 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001778 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001779 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1780 pe.index = MVPP2_PE_DROP_ALL;
1781
1782 /* Non-promiscuous mode for all ports - DROP unknown packets */
1783 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1784 MVPP2_PRS_RI_DROP_MASK);
1785
1786 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1787 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1788
1789 /* Update shadow table */
1790 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1791
1792 /* Mask all ports */
1793 mvpp2_prs_tcam_port_map_set(&pe, 0);
1794 }
1795
1796 /* Update port mask */
1797 mvpp2_prs_tcam_port_set(&pe, port, add);
1798
1799 mvpp2_prs_hw_write(priv, &pe);
1800}
1801
1802/* Set port to promiscuous mode */
1803static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1804{
1805 struct mvpp2_prs_entry pe;
1806
Joe Perchesdbedd442015-03-06 20:49:12 -08001807 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001808
1809 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1810 /* Entry exist - update port only */
1811 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1812 mvpp2_prs_hw_read(priv, &pe);
1813 } else {
1814 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001815 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001816 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1817 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1818
1819 /* Continue - set next lookup */
1820 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1821
1822 /* Set result info bits */
1823 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1824 MVPP2_PRS_RI_L2_CAST_MASK);
1825
1826 /* Shift to ethertype */
1827 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1828 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1829
1830 /* Mask all ports */
1831 mvpp2_prs_tcam_port_map_set(&pe, 0);
1832
1833 /* Update shadow table */
1834 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1835 }
1836
1837 /* Update port mask */
1838 mvpp2_prs_tcam_port_set(&pe, port, add);
1839
1840 mvpp2_prs_hw_write(priv, &pe);
1841}
1842
1843/* Accept multicast */
1844static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1845 bool add)
1846{
1847 struct mvpp2_prs_entry pe;
1848 unsigned char da_mc;
1849
1850 /* Ethernet multicast address first byte is
1851 * 0x01 for IPv4 and 0x33 for IPv6
1852 */
1853 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1854
1855 if (priv->prs_shadow[index].valid) {
1856 /* Entry exist - update port only */
1857 pe.index = index;
1858 mvpp2_prs_hw_read(priv, &pe);
1859 } else {
1860 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001861 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001862 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1863 pe.index = index;
1864
1865 /* Continue - set next lookup */
1866 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1867
1868 /* Set result info bits */
1869 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1870 MVPP2_PRS_RI_L2_CAST_MASK);
1871
1872 /* Update tcam entry data first byte */
1873 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1874
1875 /* Shift to ethertype */
1876 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1877 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1878
1879 /* Mask all ports */
1880 mvpp2_prs_tcam_port_map_set(&pe, 0);
1881
1882 /* Update shadow table */
1883 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1884 }
1885
1886 /* Update port mask */
1887 mvpp2_prs_tcam_port_set(&pe, port, add);
1888
1889 mvpp2_prs_hw_write(priv, &pe);
1890}
1891
1892/* Set entry for dsa packets */
1893static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1894 bool tagged, bool extend)
1895{
1896 struct mvpp2_prs_entry pe;
1897 int tid, shift;
1898
1899 if (extend) {
1900 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1901 shift = 8;
1902 } else {
1903 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1904 shift = 4;
1905 }
1906
1907 if (priv->prs_shadow[tid].valid) {
1908 /* Entry exist - update port only */
1909 pe.index = tid;
1910 mvpp2_prs_hw_read(priv, &pe);
1911 } else {
1912 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001913 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001914 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1915 pe.index = tid;
1916
1917 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1918 mvpp2_prs_sram_shift_set(&pe, shift,
1919 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1920
1921 /* Update shadow table */
1922 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1923
1924 if (tagged) {
1925 /* Set tagged bit in DSA tag */
1926 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1927 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1928 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1929 /* Clear all ai bits for next iteration */
1930 mvpp2_prs_sram_ai_update(&pe, 0,
1931 MVPP2_PRS_SRAM_AI_MASK);
1932 /* If packet is tagged continue check vlans */
1933 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1934 } else {
1935 /* Set result info bits to 'no vlans' */
1936 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1937 MVPP2_PRS_RI_VLAN_MASK);
1938 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1939 }
1940
1941 /* Mask all ports */
1942 mvpp2_prs_tcam_port_map_set(&pe, 0);
1943 }
1944
1945 /* Update port mask */
1946 mvpp2_prs_tcam_port_set(&pe, port, add);
1947
1948 mvpp2_prs_hw_write(priv, &pe);
1949}
1950
1951/* Set entry for dsa ethertype */
1952static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1953 bool add, bool tagged, bool extend)
1954{
1955 struct mvpp2_prs_entry pe;
1956 int tid, shift, port_mask;
1957
1958 if (extend) {
1959 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1960 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1961 port_mask = 0;
1962 shift = 8;
1963 } else {
1964 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1965 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1966 port_mask = MVPP2_PRS_PORT_MASK;
1967 shift = 4;
1968 }
1969
1970 if (priv->prs_shadow[tid].valid) {
1971 /* Entry exist - update port only */
1972 pe.index = tid;
1973 mvpp2_prs_hw_read(priv, &pe);
1974 } else {
1975 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001976 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001977 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1978 pe.index = tid;
1979
1980 /* Set ethertype */
1981 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1982 mvpp2_prs_match_etype(&pe, 2, 0);
1983
1984 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1985 MVPP2_PRS_RI_DSA_MASK);
1986 /* Shift ethertype + 2 byte reserved + tag*/
1987 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1988 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1989
1990 /* Update shadow table */
1991 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1992
1993 if (tagged) {
1994 /* Set tagged bit in DSA tag */
1995 mvpp2_prs_tcam_data_byte_set(&pe,
1996 MVPP2_ETH_TYPE_LEN + 2 + 3,
1997 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1998 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1999 /* Clear all ai bits for next iteration */
2000 mvpp2_prs_sram_ai_update(&pe, 0,
2001 MVPP2_PRS_SRAM_AI_MASK);
2002 /* If packet is tagged continue check vlans */
2003 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2004 } else {
2005 /* Set result info bits to 'no vlans' */
2006 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2007 MVPP2_PRS_RI_VLAN_MASK);
2008 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2009 }
2010 /* Mask/unmask all ports, depending on dsa type */
2011 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2012 }
2013
2014 /* Update port mask */
2015 mvpp2_prs_tcam_port_set(&pe, port, add);
2016
2017 mvpp2_prs_hw_write(priv, &pe);
2018}
2019
2020/* Search for existing single/triple vlan entry */
2021static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2022 unsigned short tpid, int ai)
2023{
2024 struct mvpp2_prs_entry *pe;
2025 int tid;
2026
2027 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2028 if (!pe)
2029 return NULL;
2030 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2031
2032 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2033 for (tid = MVPP2_PE_FIRST_FREE_TID;
2034 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2035 unsigned int ri_bits, ai_bits;
2036 bool match;
2037
2038 if (!priv->prs_shadow[tid].valid ||
2039 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2040 continue;
2041
2042 pe->index = tid;
2043
2044 mvpp2_prs_hw_read(priv, pe);
2045 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2046 if (!match)
2047 continue;
2048
2049 /* Get vlan type */
2050 ri_bits = mvpp2_prs_sram_ri_get(pe);
2051 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2052
2053 /* Get current ai value from tcam */
2054 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2055 /* Clear double vlan bit */
2056 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2057
2058 if (ai != ai_bits)
2059 continue;
2060
2061 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2062 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2063 return pe;
2064 }
2065 kfree(pe);
2066
2067 return NULL;
2068}
2069
2070/* Add/update single/triple vlan entry */
2071static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2072 unsigned int port_map)
2073{
2074 struct mvpp2_prs_entry *pe;
2075 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302076 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002077
2078 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2079
2080 if (!pe) {
2081 /* Create new tcam entry */
2082 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2083 MVPP2_PE_FIRST_FREE_TID);
2084 if (tid < 0)
2085 return tid;
2086
2087 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2088 if (!pe)
2089 return -ENOMEM;
2090
2091 /* Get last double vlan tid */
2092 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2093 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2094 unsigned int ri_bits;
2095
2096 if (!priv->prs_shadow[tid_aux].valid ||
2097 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2098 continue;
2099
2100 pe->index = tid_aux;
2101 mvpp2_prs_hw_read(priv, pe);
2102 ri_bits = mvpp2_prs_sram_ri_get(pe);
2103 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2104 MVPP2_PRS_RI_VLAN_DOUBLE)
2105 break;
2106 }
2107
Sudip Mukherjee43737472014-11-01 16:59:34 +05302108 if (tid <= tid_aux) {
2109 ret = -EINVAL;
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002110 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302111 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002112
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002113 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002114 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2115 pe->index = tid;
2116
2117 mvpp2_prs_match_etype(pe, 0, tpid);
2118
2119 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2120 /* Shift 4 bytes - skip 1 vlan tag */
2121 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2122 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2123 /* Clear all ai bits for next iteration */
2124 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2125
2126 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2127 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2128 MVPP2_PRS_RI_VLAN_MASK);
2129 } else {
2130 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2131 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2132 MVPP2_PRS_RI_VLAN_MASK);
2133 }
2134 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2135
2136 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2137 }
2138 /* Update ports' mask */
2139 mvpp2_prs_tcam_port_map_set(pe, port_map);
2140
2141 mvpp2_prs_hw_write(priv, pe);
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002142free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002143 kfree(pe);
2144
Sudip Mukherjee43737472014-11-01 16:59:34 +05302145 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002146}
2147
2148/* Get first free double vlan ai number */
2149static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2150{
2151 int i;
2152
2153 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2154 if (!priv->prs_double_vlans[i])
2155 return i;
2156 }
2157
2158 return -EINVAL;
2159}
2160
2161/* Search for existing double vlan entry */
2162static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2163 unsigned short tpid1,
2164 unsigned short tpid2)
2165{
2166 struct mvpp2_prs_entry *pe;
2167 int tid;
2168
2169 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2170 if (!pe)
2171 return NULL;
2172 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2173
2174 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2175 for (tid = MVPP2_PE_FIRST_FREE_TID;
2176 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2177 unsigned int ri_mask;
2178 bool match;
2179
2180 if (!priv->prs_shadow[tid].valid ||
2181 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2182 continue;
2183
2184 pe->index = tid;
2185 mvpp2_prs_hw_read(priv, pe);
2186
2187 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2188 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2189
2190 if (!match)
2191 continue;
2192
2193 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2194 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2195 return pe;
2196 }
2197 kfree(pe);
2198
2199 return NULL;
2200}
2201
2202/* Add or update double vlan entry */
2203static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2204 unsigned short tpid2,
2205 unsigned int port_map)
2206{
2207 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302208 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002209
2210 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2211
2212 if (!pe) {
2213 /* Create new tcam entry */
2214 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2215 MVPP2_PE_LAST_FREE_TID);
2216 if (tid < 0)
2217 return tid;
2218
2219 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2220 if (!pe)
2221 return -ENOMEM;
2222
2223 /* Set ai value for new double vlan entry */
2224 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302225 if (ai < 0) {
2226 ret = ai;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002227 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302228 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002229
2230 /* Get first single/triple vlan tid */
2231 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2232 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2233 unsigned int ri_bits;
2234
2235 if (!priv->prs_shadow[tid_aux].valid ||
2236 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2237 continue;
2238
2239 pe->index = tid_aux;
2240 mvpp2_prs_hw_read(priv, pe);
2241 ri_bits = mvpp2_prs_sram_ri_get(pe);
2242 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2243 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2244 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2245 break;
2246 }
2247
Sudip Mukherjee43737472014-11-01 16:59:34 +05302248 if (tid >= tid_aux) {
2249 ret = -ERANGE;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002250 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302251 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002252
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002253 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002254 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2255 pe->index = tid;
2256
2257 priv->prs_double_vlans[ai] = true;
2258
2259 mvpp2_prs_match_etype(pe, 0, tpid1);
2260 mvpp2_prs_match_etype(pe, 4, tpid2);
2261
2262 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2263 /* Shift 8 bytes - skip 2 vlan tags */
2264 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2265 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2266 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2267 MVPP2_PRS_RI_VLAN_MASK);
2268 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2269 MVPP2_PRS_SRAM_AI_MASK);
2270
2271 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2272 }
2273
2274 /* Update ports' mask */
2275 mvpp2_prs_tcam_port_map_set(pe, port_map);
2276 mvpp2_prs_hw_write(priv, pe);
Markus Elfringc9a7e122017-04-17 13:03:49 +02002277free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002278 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302279 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002280}
2281
2282/* IPv4 header parsing for fragmentation and L4 offset */
2283static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2284 unsigned int ri, unsigned int ri_mask)
2285{
2286 struct mvpp2_prs_entry pe;
2287 int tid;
2288
2289 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2290 (proto != IPPROTO_IGMP))
2291 return -EINVAL;
2292
2293 /* Fragmented packet */
2294 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2295 MVPP2_PE_LAST_FREE_TID);
2296 if (tid < 0)
2297 return tid;
2298
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002299 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002300 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2301 pe.index = tid;
2302
2303 /* Set next lu to IPv4 */
2304 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2305 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2306 /* Set L4 offset */
2307 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2308 sizeof(struct iphdr) - 4,
2309 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2310 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2311 MVPP2_PRS_IPV4_DIP_AI_BIT);
2312 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2313 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2314
2315 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2316 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2317 /* Unmask all ports */
2318 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2319
2320 /* Update shadow table and hw entry */
2321 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2322 mvpp2_prs_hw_write(priv, &pe);
2323
2324 /* Not fragmented packet */
2325 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2326 MVPP2_PE_LAST_FREE_TID);
2327 if (tid < 0)
2328 return tid;
2329
2330 pe.index = tid;
2331 /* Clear ri before updating */
2332 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2333 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2334 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2335
2336 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2337 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2338
2339 /* Update shadow table and hw entry */
2340 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2341 mvpp2_prs_hw_write(priv, &pe);
2342
2343 return 0;
2344}
2345
2346/* IPv4 L3 multicast or broadcast */
2347static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2348{
2349 struct mvpp2_prs_entry pe;
2350 int mask, tid;
2351
2352 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2353 MVPP2_PE_LAST_FREE_TID);
2354 if (tid < 0)
2355 return tid;
2356
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002357 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002358 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2359 pe.index = tid;
2360
2361 switch (l3_cast) {
2362 case MVPP2_PRS_L3_MULTI_CAST:
2363 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2364 MVPP2_PRS_IPV4_MC_MASK);
2365 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2366 MVPP2_PRS_RI_L3_ADDR_MASK);
2367 break;
2368 case MVPP2_PRS_L3_BROAD_CAST:
2369 mask = MVPP2_PRS_IPV4_BC_MASK;
2370 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2371 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2372 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2373 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2374 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2375 MVPP2_PRS_RI_L3_ADDR_MASK);
2376 break;
2377 default:
2378 return -EINVAL;
2379 }
2380
2381 /* Finished: go to flowid generation */
2382 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2383 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2384
2385 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2386 MVPP2_PRS_IPV4_DIP_AI_BIT);
2387 /* Unmask all ports */
2388 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2389
2390 /* Update shadow table and hw entry */
2391 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2392 mvpp2_prs_hw_write(priv, &pe);
2393
2394 return 0;
2395}
2396
2397/* Set entries for protocols over IPv6 */
2398static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2399 unsigned int ri, unsigned int ri_mask)
2400{
2401 struct mvpp2_prs_entry pe;
2402 int tid;
2403
2404 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2405 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2406 return -EINVAL;
2407
2408 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2409 MVPP2_PE_LAST_FREE_TID);
2410 if (tid < 0)
2411 return tid;
2412
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002413 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002414 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2415 pe.index = tid;
2416
2417 /* Finished: go to flowid generation */
2418 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2419 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2420 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2421 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2422 sizeof(struct ipv6hdr) - 6,
2423 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2424
2425 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2426 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2427 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2428 /* Unmask all ports */
2429 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2430
2431 /* Write HW */
2432 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2433 mvpp2_prs_hw_write(priv, &pe);
2434
2435 return 0;
2436}
2437
2438/* IPv6 L3 multicast entry */
2439static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2440{
2441 struct mvpp2_prs_entry pe;
2442 int tid;
2443
2444 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2445 return -EINVAL;
2446
2447 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2448 MVPP2_PE_LAST_FREE_TID);
2449 if (tid < 0)
2450 return tid;
2451
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002452 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002453 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2454 pe.index = tid;
2455
2456 /* Finished: go to flowid generation */
2457 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2458 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2459 MVPP2_PRS_RI_L3_ADDR_MASK);
2460 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2461 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2462 /* Shift back to IPv6 NH */
2463 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2464
2465 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2466 MVPP2_PRS_IPV6_MC_MASK);
2467 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2468 /* Unmask all ports */
2469 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2470
2471 /* Update shadow table and hw entry */
2472 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2473 mvpp2_prs_hw_write(priv, &pe);
2474
2475 return 0;
2476}
2477
2478/* Parser per-port initialization */
2479static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2480 int lu_max, int offset)
2481{
2482 u32 val;
2483
2484 /* Set lookup ID */
2485 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2486 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2487 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2488 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2489
2490 /* Set maximum number of loops for packet received from port */
2491 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2492 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2493 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2494 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2495
2496 /* Set initial offset for packet header extraction for the first
2497 * searching loop
2498 */
2499 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2500 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2501 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2502 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2503}
2504
2505/* Default flow entries initialization for all ports */
2506static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2507{
2508 struct mvpp2_prs_entry pe;
2509 int port;
2510
2511 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002512 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002513 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2514 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2515
2516 /* Mask all ports */
2517 mvpp2_prs_tcam_port_map_set(&pe, 0);
2518
2519 /* Set flow ID*/
2520 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2521 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2522
2523 /* Update shadow table and hw entry */
2524 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2525 mvpp2_prs_hw_write(priv, &pe);
2526 }
2527}
2528
2529/* Set default entry for Marvell Header field */
2530static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2531{
2532 struct mvpp2_prs_entry pe;
2533
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002534 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002535
2536 pe.index = MVPP2_PE_MH_DEFAULT;
2537 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2538 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2539 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2540 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2541
2542 /* Unmask all ports */
2543 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2544
2545 /* Update shadow table and hw entry */
2546 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2547 mvpp2_prs_hw_write(priv, &pe);
2548}
2549
2550/* Set default entires (place holder) for promiscuous, non-promiscuous and
2551 * multicast MAC addresses
2552 */
2553static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2554{
2555 struct mvpp2_prs_entry pe;
2556
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002557 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002558
2559 /* Non-promiscuous mode for all ports - DROP unknown packets */
2560 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2561 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2562
2563 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2564 MVPP2_PRS_RI_DROP_MASK);
2565 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2566 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2567
2568 /* Unmask all ports */
2569 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2570
2571 /* Update shadow table and hw entry */
2572 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2573 mvpp2_prs_hw_write(priv, &pe);
2574
2575 /* place holders only - no ports */
2576 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2577 mvpp2_prs_mac_promisc_set(priv, 0, false);
2578 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2579 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2580}
2581
2582/* Set default entries for various types of dsa packets */
2583static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2584{
2585 struct mvpp2_prs_entry pe;
2586
2587 /* None tagged EDSA entry - place holder */
2588 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2589 MVPP2_PRS_EDSA);
2590
2591 /* Tagged EDSA entry - place holder */
2592 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2593
2594 /* None tagged DSA entry - place holder */
2595 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2596 MVPP2_PRS_DSA);
2597
2598 /* Tagged DSA entry - place holder */
2599 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2600
2601 /* None tagged EDSA ethertype entry - place holder*/
2602 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2603 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2604
2605 /* Tagged EDSA ethertype entry - place holder*/
2606 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2607 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2608
2609 /* None tagged DSA ethertype entry */
2610 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2611 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2612
2613 /* Tagged DSA ethertype entry */
2614 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2615 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2616
2617 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002618 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002619 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2620 pe.index = MVPP2_PE_DSA_DEFAULT;
2621 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2622
2623 /* Shift 0 bytes */
2624 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2625 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2626
2627 /* Clear all sram ai bits for next iteration */
2628 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2629
2630 /* Unmask all ports */
2631 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2632
2633 mvpp2_prs_hw_write(priv, &pe);
2634}
2635
2636/* Match basic ethertypes */
2637static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2638{
2639 struct mvpp2_prs_entry pe;
2640 int tid;
2641
2642 /* Ethertype: PPPoE */
2643 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2644 MVPP2_PE_LAST_FREE_TID);
2645 if (tid < 0)
2646 return tid;
2647
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002648 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002649 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2650 pe.index = tid;
2651
2652 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2653
2654 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2655 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2656 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2657 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2658 MVPP2_PRS_RI_PPPOE_MASK);
2659
2660 /* Update shadow table and hw entry */
2661 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2662 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2663 priv->prs_shadow[pe.index].finish = false;
2664 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2665 MVPP2_PRS_RI_PPPOE_MASK);
2666 mvpp2_prs_hw_write(priv, &pe);
2667
2668 /* Ethertype: ARP */
2669 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2670 MVPP2_PE_LAST_FREE_TID);
2671 if (tid < 0)
2672 return tid;
2673
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002674 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002675 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2676 pe.index = tid;
2677
2678 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2679
2680 /* Generate flow in the next iteration*/
2681 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2682 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2683 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2684 MVPP2_PRS_RI_L3_PROTO_MASK);
2685 /* Set L3 offset */
2686 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2687 MVPP2_ETH_TYPE_LEN,
2688 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2689
2690 /* Update shadow table and hw entry */
2691 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2692 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2693 priv->prs_shadow[pe.index].finish = true;
2694 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2695 MVPP2_PRS_RI_L3_PROTO_MASK);
2696 mvpp2_prs_hw_write(priv, &pe);
2697
2698 /* Ethertype: LBTD */
2699 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2700 MVPP2_PE_LAST_FREE_TID);
2701 if (tid < 0)
2702 return tid;
2703
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002704 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002705 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2706 pe.index = tid;
2707
2708 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2709
2710 /* Generate flow in the next iteration*/
2711 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2712 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2713 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2714 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2715 MVPP2_PRS_RI_CPU_CODE_MASK |
2716 MVPP2_PRS_RI_UDF3_MASK);
2717 /* Set L3 offset */
2718 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2719 MVPP2_ETH_TYPE_LEN,
2720 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2721
2722 /* Update shadow table and hw entry */
2723 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2724 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2725 priv->prs_shadow[pe.index].finish = true;
2726 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2727 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2728 MVPP2_PRS_RI_CPU_CODE_MASK |
2729 MVPP2_PRS_RI_UDF3_MASK);
2730 mvpp2_prs_hw_write(priv, &pe);
2731
2732 /* Ethertype: IPv4 without options */
2733 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2734 MVPP2_PE_LAST_FREE_TID);
2735 if (tid < 0)
2736 return tid;
2737
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002738 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002739 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2740 pe.index = tid;
2741
2742 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2743 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2744 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2745 MVPP2_PRS_IPV4_HEAD_MASK |
2746 MVPP2_PRS_IPV4_IHL_MASK);
2747
2748 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2749 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2750 MVPP2_PRS_RI_L3_PROTO_MASK);
2751 /* Skip eth_type + 4 bytes of IP header */
2752 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2753 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2754 /* Set L3 offset */
2755 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2756 MVPP2_ETH_TYPE_LEN,
2757 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2758
2759 /* Update shadow table and hw entry */
2760 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2761 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2762 priv->prs_shadow[pe.index].finish = false;
2763 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2764 MVPP2_PRS_RI_L3_PROTO_MASK);
2765 mvpp2_prs_hw_write(priv, &pe);
2766
2767 /* Ethertype: IPv4 with options */
2768 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2769 MVPP2_PE_LAST_FREE_TID);
2770 if (tid < 0)
2771 return tid;
2772
2773 pe.index = tid;
2774
2775 /* Clear tcam data before updating */
2776 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2777 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2778
2779 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2780 MVPP2_PRS_IPV4_HEAD,
2781 MVPP2_PRS_IPV4_HEAD_MASK);
2782
2783 /* Clear ri before updating */
2784 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2785 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2786 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2787 MVPP2_PRS_RI_L3_PROTO_MASK);
2788
2789 /* Update shadow table and hw entry */
2790 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2791 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2792 priv->prs_shadow[pe.index].finish = false;
2793 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2794 MVPP2_PRS_RI_L3_PROTO_MASK);
2795 mvpp2_prs_hw_write(priv, &pe);
2796
2797 /* Ethertype: IPv6 without options */
2798 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2799 MVPP2_PE_LAST_FREE_TID);
2800 if (tid < 0)
2801 return tid;
2802
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002803 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002804 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2805 pe.index = tid;
2806
2807 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2808
2809 /* Skip DIP of IPV6 header */
2810 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2811 MVPP2_MAX_L3_ADDR_SIZE,
2812 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2813 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2814 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2815 MVPP2_PRS_RI_L3_PROTO_MASK);
2816 /* Set L3 offset */
2817 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2818 MVPP2_ETH_TYPE_LEN,
2819 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2820
2821 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2822 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2823 priv->prs_shadow[pe.index].finish = false;
2824 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2825 MVPP2_PRS_RI_L3_PROTO_MASK);
2826 mvpp2_prs_hw_write(priv, &pe);
2827
2828 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2829 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2830 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2831 pe.index = MVPP2_PE_ETH_TYPE_UN;
2832
2833 /* Unmask all ports */
2834 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2835
2836 /* Generate flow in the next iteration*/
2837 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2838 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2839 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2840 MVPP2_PRS_RI_L3_PROTO_MASK);
2841 /* Set L3 offset even it's unknown L3 */
2842 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2843 MVPP2_ETH_TYPE_LEN,
2844 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2845
2846 /* Update shadow table and hw entry */
2847 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2848 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2849 priv->prs_shadow[pe.index].finish = true;
2850 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2851 MVPP2_PRS_RI_L3_PROTO_MASK);
2852 mvpp2_prs_hw_write(priv, &pe);
2853
2854 return 0;
2855}
2856
2857/* Configure vlan entries and detect up to 2 successive VLAN tags.
2858 * Possible options:
2859 * 0x8100, 0x88A8
2860 * 0x8100, 0x8100
2861 * 0x8100
2862 * 0x88A8
2863 */
2864static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2865{
2866 struct mvpp2_prs_entry pe;
2867 int err;
2868
2869 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2870 MVPP2_PRS_DBL_VLANS_MAX,
2871 GFP_KERNEL);
2872 if (!priv->prs_double_vlans)
2873 return -ENOMEM;
2874
2875 /* Double VLAN: 0x8100, 0x88A8 */
2876 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2877 MVPP2_PRS_PORT_MASK);
2878 if (err)
2879 return err;
2880
2881 /* Double VLAN: 0x8100, 0x8100 */
2882 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2883 MVPP2_PRS_PORT_MASK);
2884 if (err)
2885 return err;
2886
2887 /* Single VLAN: 0x88a8 */
2888 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2889 MVPP2_PRS_PORT_MASK);
2890 if (err)
2891 return err;
2892
2893 /* Single VLAN: 0x8100 */
2894 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2895 MVPP2_PRS_PORT_MASK);
2896 if (err)
2897 return err;
2898
2899 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002900 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002901 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2902 pe.index = MVPP2_PE_VLAN_DBL;
2903
2904 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2905 /* Clear ai for next iterations */
2906 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2907 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2908 MVPP2_PRS_RI_VLAN_MASK);
2909
2910 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2911 MVPP2_PRS_DBL_VLAN_AI_BIT);
2912 /* Unmask all ports */
2913 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2914
2915 /* Update shadow table and hw entry */
2916 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2917 mvpp2_prs_hw_write(priv, &pe);
2918
2919 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002920 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002921 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2922 pe.index = MVPP2_PE_VLAN_NONE;
2923
2924 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2925 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2926 MVPP2_PRS_RI_VLAN_MASK);
2927
2928 /* Unmask all ports */
2929 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2930
2931 /* Update shadow table and hw entry */
2932 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2933 mvpp2_prs_hw_write(priv, &pe);
2934
2935 return 0;
2936}
2937
2938/* Set entries for PPPoE ethertype */
2939static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2940{
2941 struct mvpp2_prs_entry pe;
2942 int tid;
2943
2944 /* IPv4 over PPPoE with options */
2945 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2946 MVPP2_PE_LAST_FREE_TID);
2947 if (tid < 0)
2948 return tid;
2949
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002950 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002951 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2952 pe.index = tid;
2953
2954 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2955
2956 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2957 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2958 MVPP2_PRS_RI_L3_PROTO_MASK);
2959 /* Skip eth_type + 4 bytes of IP header */
2960 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2961 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2962 /* Set L3 offset */
2963 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2964 MVPP2_ETH_TYPE_LEN,
2965 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2966
2967 /* Update shadow table and hw entry */
2968 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2969 mvpp2_prs_hw_write(priv, &pe);
2970
2971 /* IPv4 over PPPoE without options */
2972 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2973 MVPP2_PE_LAST_FREE_TID);
2974 if (tid < 0)
2975 return tid;
2976
2977 pe.index = tid;
2978
2979 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2980 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2981 MVPP2_PRS_IPV4_HEAD_MASK |
2982 MVPP2_PRS_IPV4_IHL_MASK);
2983
2984 /* Clear ri before updating */
2985 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2986 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2987 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2988 MVPP2_PRS_RI_L3_PROTO_MASK);
2989
2990 /* Update shadow table and hw entry */
2991 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2992 mvpp2_prs_hw_write(priv, &pe);
2993
2994 /* IPv6 over PPPoE */
2995 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2996 MVPP2_PE_LAST_FREE_TID);
2997 if (tid < 0)
2998 return tid;
2999
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003000 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003001 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3002 pe.index = tid;
3003
3004 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3005
3006 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3007 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3008 MVPP2_PRS_RI_L3_PROTO_MASK);
3009 /* Skip eth_type + 4 bytes of IPv6 header */
3010 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3011 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3012 /* Set L3 offset */
3013 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3014 MVPP2_ETH_TYPE_LEN,
3015 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3016
3017 /* Update shadow table and hw entry */
3018 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3019 mvpp2_prs_hw_write(priv, &pe);
3020
3021 /* Non-IP over PPPoE */
3022 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3023 MVPP2_PE_LAST_FREE_TID);
3024 if (tid < 0)
3025 return tid;
3026
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003027 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003028 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3029 pe.index = tid;
3030
3031 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3032 MVPP2_PRS_RI_L3_PROTO_MASK);
3033
3034 /* Finished: go to flowid generation */
3035 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3036 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3037 /* Set L3 offset even if it's unknown L3 */
3038 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3039 MVPP2_ETH_TYPE_LEN,
3040 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3041
3042 /* Update shadow table and hw entry */
3043 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3044 mvpp2_prs_hw_write(priv, &pe);
3045
3046 return 0;
3047}
3048
3049/* Initialize entries for IPv4 */
3050static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3051{
3052 struct mvpp2_prs_entry pe;
3053 int err;
3054
3055 /* Set entries for TCP, UDP and IGMP over IPv4 */
3056 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3057 MVPP2_PRS_RI_L4_PROTO_MASK);
3058 if (err)
3059 return err;
3060
3061 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3062 MVPP2_PRS_RI_L4_PROTO_MASK);
3063 if (err)
3064 return err;
3065
3066 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3067 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3068 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3069 MVPP2_PRS_RI_CPU_CODE_MASK |
3070 MVPP2_PRS_RI_UDF3_MASK);
3071 if (err)
3072 return err;
3073
3074 /* IPv4 Broadcast */
3075 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3076 if (err)
3077 return err;
3078
3079 /* IPv4 Multicast */
3080 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3081 if (err)
3082 return err;
3083
3084 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003085 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003086 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3087 pe.index = MVPP2_PE_IP4_PROTO_UN;
3088
3089 /* Set next lu to IPv4 */
3090 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3091 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3092 /* Set L4 offset */
3093 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3094 sizeof(struct iphdr) - 4,
3095 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3096 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3097 MVPP2_PRS_IPV4_DIP_AI_BIT);
3098 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3099 MVPP2_PRS_RI_L4_PROTO_MASK);
3100
3101 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3102 /* Unmask all ports */
3103 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3104
3105 /* Update shadow table and hw entry */
3106 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3107 mvpp2_prs_hw_write(priv, &pe);
3108
3109 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003110 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003111 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3112 pe.index = MVPP2_PE_IP4_ADDR_UN;
3113
3114 /* Finished: go to flowid generation */
3115 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3116 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3117 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3118 MVPP2_PRS_RI_L3_ADDR_MASK);
3119
3120 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3121 MVPP2_PRS_IPV4_DIP_AI_BIT);
3122 /* Unmask all ports */
3123 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3124
3125 /* Update shadow table and hw entry */
3126 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3127 mvpp2_prs_hw_write(priv, &pe);
3128
3129 return 0;
3130}
3131
3132/* Initialize entries for IPv6 */
3133static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3134{
3135 struct mvpp2_prs_entry pe;
3136 int tid, err;
3137
3138 /* Set entries for TCP, UDP and ICMP over IPv6 */
3139 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3140 MVPP2_PRS_RI_L4_TCP,
3141 MVPP2_PRS_RI_L4_PROTO_MASK);
3142 if (err)
3143 return err;
3144
3145 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3146 MVPP2_PRS_RI_L4_UDP,
3147 MVPP2_PRS_RI_L4_PROTO_MASK);
3148 if (err)
3149 return err;
3150
3151 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3152 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3153 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3154 MVPP2_PRS_RI_CPU_CODE_MASK |
3155 MVPP2_PRS_RI_UDF3_MASK);
3156 if (err)
3157 return err;
3158
3159 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3160 /* Result Info: UDF7=1, DS lite */
3161 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3162 MVPP2_PRS_RI_UDF7_IP6_LITE,
3163 MVPP2_PRS_RI_UDF7_MASK);
3164 if (err)
3165 return err;
3166
3167 /* IPv6 multicast */
3168 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3169 if (err)
3170 return err;
3171
3172 /* Entry for checking hop limit */
3173 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3174 MVPP2_PE_LAST_FREE_TID);
3175 if (tid < 0)
3176 return tid;
3177
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003178 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003179 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3180 pe.index = tid;
3181
3182 /* Finished: go to flowid generation */
3183 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3184 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3185 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3186 MVPP2_PRS_RI_DROP_MASK,
3187 MVPP2_PRS_RI_L3_PROTO_MASK |
3188 MVPP2_PRS_RI_DROP_MASK);
3189
3190 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3191 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3192 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3193
3194 /* Update shadow table and hw entry */
3195 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3196 mvpp2_prs_hw_write(priv, &pe);
3197
3198 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003199 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003200 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3201 pe.index = MVPP2_PE_IP6_PROTO_UN;
3202
3203 /* Finished: go to flowid generation */
3204 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3205 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3206 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3207 MVPP2_PRS_RI_L4_PROTO_MASK);
3208 /* Set L4 offset relatively to our current place */
3209 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3210 sizeof(struct ipv6hdr) - 4,
3211 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3212
3213 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3214 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3215 /* Unmask all ports */
3216 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3217
3218 /* Update shadow table and hw entry */
3219 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3220 mvpp2_prs_hw_write(priv, &pe);
3221
3222 /* Default IPv6 entry for unknown ext protocols */
3223 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3224 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3225 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3226
3227 /* Finished: go to flowid generation */
3228 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3229 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3230 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3231 MVPP2_PRS_RI_L4_PROTO_MASK);
3232
3233 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3234 MVPP2_PRS_IPV6_EXT_AI_BIT);
3235 /* Unmask all ports */
3236 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3237
3238 /* Update shadow table and hw entry */
3239 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3240 mvpp2_prs_hw_write(priv, &pe);
3241
3242 /* Default IPv6 entry for unicast address */
3243 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3244 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3245 pe.index = MVPP2_PE_IP6_ADDR_UN;
3246
3247 /* Finished: go to IPv6 again */
3248 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3249 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3250 MVPP2_PRS_RI_L3_ADDR_MASK);
3251 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3252 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3253 /* Shift back to IPV6 NH */
3254 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3255
3256 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3257 /* Unmask all ports */
3258 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3259
3260 /* Update shadow table and hw entry */
3261 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3262 mvpp2_prs_hw_write(priv, &pe);
3263
3264 return 0;
3265}
3266
3267/* Parser default initialization */
3268static int mvpp2_prs_default_init(struct platform_device *pdev,
3269 struct mvpp2 *priv)
3270{
3271 int err, index, i;
3272
3273 /* Enable tcam table */
3274 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3275
3276 /* Clear all tcam and sram entries */
3277 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3278 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3279 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3280 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3281
3282 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3283 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3284 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3285 }
3286
3287 /* Invalidate all tcam entries */
3288 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3289 mvpp2_prs_hw_inv(priv, index);
3290
3291 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003292 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003293 GFP_KERNEL);
3294 if (!priv->prs_shadow)
3295 return -ENOMEM;
3296
3297 /* Always start from lookup = 0 */
3298 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3299 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3300 MVPP2_PRS_PORT_LU_MAX, 0);
3301
3302 mvpp2_prs_def_flow_init(priv);
3303
3304 mvpp2_prs_mh_init(priv);
3305
3306 mvpp2_prs_mac_init(priv);
3307
3308 mvpp2_prs_dsa_init(priv);
3309
3310 err = mvpp2_prs_etype_init(priv);
3311 if (err)
3312 return err;
3313
3314 err = mvpp2_prs_vlan_init(pdev, priv);
3315 if (err)
3316 return err;
3317
3318 err = mvpp2_prs_pppoe_init(priv);
3319 if (err)
3320 return err;
3321
3322 err = mvpp2_prs_ip6_init(priv);
3323 if (err)
3324 return err;
3325
3326 err = mvpp2_prs_ip4_init(priv);
3327 if (err)
3328 return err;
3329
3330 return 0;
3331}
3332
3333/* Compare MAC DA with tcam entry data */
3334static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3335 const u8 *da, unsigned char *mask)
3336{
3337 unsigned char tcam_byte, tcam_mask;
3338 int index;
3339
3340 for (index = 0; index < ETH_ALEN; index++) {
3341 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3342 if (tcam_mask != mask[index])
3343 return false;
3344
3345 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3346 return false;
3347 }
3348
3349 return true;
3350}
3351
3352/* Find tcam entry with matched pair <MAC DA, port> */
3353static struct mvpp2_prs_entry *
3354mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3355 unsigned char *mask, int udf_type)
3356{
3357 struct mvpp2_prs_entry *pe;
3358 int tid;
3359
3360 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3361 if (!pe)
3362 return NULL;
3363 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3364
3365 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3366 for (tid = MVPP2_PE_FIRST_FREE_TID;
3367 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3368 unsigned int entry_pmap;
3369
3370 if (!priv->prs_shadow[tid].valid ||
3371 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3372 (priv->prs_shadow[tid].udf != udf_type))
3373 continue;
3374
3375 pe->index = tid;
3376 mvpp2_prs_hw_read(priv, pe);
3377 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3378
3379 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3380 entry_pmap == pmap)
3381 return pe;
3382 }
3383 kfree(pe);
3384
3385 return NULL;
3386}
3387
3388/* Update parser's mac da entry */
3389static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3390 const u8 *da, bool add)
3391{
3392 struct mvpp2_prs_entry *pe;
3393 unsigned int pmap, len, ri;
3394 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3395 int tid;
3396
3397 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3398 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3399 MVPP2_PRS_UDF_MAC_DEF);
3400
3401 /* No such entry */
3402 if (!pe) {
3403 if (!add)
3404 return 0;
3405
3406 /* Create new TCAM entry */
3407 /* Find first range mac entry*/
3408 for (tid = MVPP2_PE_FIRST_FREE_TID;
3409 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3410 if (priv->prs_shadow[tid].valid &&
3411 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3412 (priv->prs_shadow[tid].udf ==
3413 MVPP2_PRS_UDF_MAC_RANGE))
3414 break;
3415
3416 /* Go through the all entries from first to last */
3417 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3418 tid - 1);
3419 if (tid < 0)
3420 return tid;
3421
3422 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3423 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303424 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003425 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3426 pe->index = tid;
3427
3428 /* Mask all ports */
3429 mvpp2_prs_tcam_port_map_set(pe, 0);
3430 }
3431
3432 /* Update port mask */
3433 mvpp2_prs_tcam_port_set(pe, port, add);
3434
3435 /* Invalidate the entry if no ports are left enabled */
3436 pmap = mvpp2_prs_tcam_port_map_get(pe);
3437 if (pmap == 0) {
3438 if (add) {
3439 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303440 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003441 }
3442 mvpp2_prs_hw_inv(priv, pe->index);
3443 priv->prs_shadow[pe->index].valid = false;
3444 kfree(pe);
3445 return 0;
3446 }
3447
3448 /* Continue - set next lookup */
3449 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3450
3451 /* Set match on DA */
3452 len = ETH_ALEN;
3453 while (len--)
3454 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3455
3456 /* Set result info bits */
3457 if (is_broadcast_ether_addr(da))
3458 ri = MVPP2_PRS_RI_L2_BCAST;
3459 else if (is_multicast_ether_addr(da))
3460 ri = MVPP2_PRS_RI_L2_MCAST;
3461 else
3462 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3463
3464 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3465 MVPP2_PRS_RI_MAC_ME_MASK);
3466 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3467 MVPP2_PRS_RI_MAC_ME_MASK);
3468
3469 /* Shift to ethertype */
3470 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3471 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3472
3473 /* Update shadow table and hw entry */
3474 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3475 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3476 mvpp2_prs_hw_write(priv, pe);
3477
3478 kfree(pe);
3479
3480 return 0;
3481}
3482
3483static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3484{
3485 struct mvpp2_port *port = netdev_priv(dev);
3486 int err;
3487
3488 /* Remove old parser entry */
3489 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3490 false);
3491 if (err)
3492 return err;
3493
3494 /* Add new parser entry */
3495 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3496 if (err)
3497 return err;
3498
3499 /* Set addr in the device */
3500 ether_addr_copy(dev->dev_addr, da);
3501
3502 return 0;
3503}
3504
3505/* Delete all port's multicast simple (not range) entries */
3506static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3507{
3508 struct mvpp2_prs_entry pe;
3509 int index, tid;
3510
3511 for (tid = MVPP2_PE_FIRST_FREE_TID;
3512 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3513 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3514
3515 if (!priv->prs_shadow[tid].valid ||
3516 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3517 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3518 continue;
3519
3520 /* Only simple mac entries */
3521 pe.index = tid;
3522 mvpp2_prs_hw_read(priv, &pe);
3523
3524 /* Read mac addr from entry */
3525 for (index = 0; index < ETH_ALEN; index++)
3526 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3527 &da_mask[index]);
3528
3529 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3530 /* Delete this entry */
3531 mvpp2_prs_mac_da_accept(priv, port, da, false);
3532 }
3533}
3534
3535static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3536{
3537 switch (type) {
3538 case MVPP2_TAG_TYPE_EDSA:
3539 /* Add port to EDSA entries */
3540 mvpp2_prs_dsa_tag_set(priv, port, true,
3541 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3542 mvpp2_prs_dsa_tag_set(priv, port, true,
3543 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3544 /* Remove port from DSA entries */
3545 mvpp2_prs_dsa_tag_set(priv, port, false,
3546 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3547 mvpp2_prs_dsa_tag_set(priv, port, false,
3548 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3549 break;
3550
3551 case MVPP2_TAG_TYPE_DSA:
3552 /* Add port to DSA entries */
3553 mvpp2_prs_dsa_tag_set(priv, port, true,
3554 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3555 mvpp2_prs_dsa_tag_set(priv, port, true,
3556 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3557 /* Remove port from EDSA entries */
3558 mvpp2_prs_dsa_tag_set(priv, port, false,
3559 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3560 mvpp2_prs_dsa_tag_set(priv, port, false,
3561 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3562 break;
3563
3564 case MVPP2_TAG_TYPE_MH:
3565 case MVPP2_TAG_TYPE_NONE:
3566 /* Remove port form EDSA and DSA entries */
3567 mvpp2_prs_dsa_tag_set(priv, port, false,
3568 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3569 mvpp2_prs_dsa_tag_set(priv, port, false,
3570 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3571 mvpp2_prs_dsa_tag_set(priv, port, false,
3572 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3573 mvpp2_prs_dsa_tag_set(priv, port, false,
3574 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3575 break;
3576
3577 default:
3578 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3579 return -EINVAL;
3580 }
3581
3582 return 0;
3583}
3584
3585/* Set prs flow for the port */
3586static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3587{
3588 struct mvpp2_prs_entry *pe;
3589 int tid;
3590
3591 pe = mvpp2_prs_flow_find(port->priv, port->id);
3592
3593 /* Such entry not exist */
3594 if (!pe) {
3595 /* Go through the all entires from last to first */
3596 tid = mvpp2_prs_tcam_first_free(port->priv,
3597 MVPP2_PE_LAST_FREE_TID,
3598 MVPP2_PE_FIRST_FREE_TID);
3599 if (tid < 0)
3600 return tid;
3601
3602 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3603 if (!pe)
3604 return -ENOMEM;
3605
3606 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3607 pe->index = tid;
3608
3609 /* Set flow ID*/
3610 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3611 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3612
3613 /* Update shadow table */
3614 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3615 }
3616
3617 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3618 mvpp2_prs_hw_write(port->priv, pe);
3619 kfree(pe);
3620
3621 return 0;
3622}
3623
3624/* Classifier configuration routines */
3625
3626/* Update classification flow table registers */
3627static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3628 struct mvpp2_cls_flow_entry *fe)
3629{
3630 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3631 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3632 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3633 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3634}
3635
3636/* Update classification lookup table register */
3637static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3638 struct mvpp2_cls_lookup_entry *le)
3639{
3640 u32 val;
3641
3642 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3643 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3644 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3645}
3646
3647/* Classifier default initialization */
3648static void mvpp2_cls_init(struct mvpp2 *priv)
3649{
3650 struct mvpp2_cls_lookup_entry le;
3651 struct mvpp2_cls_flow_entry fe;
3652 int index;
3653
3654 /* Enable classifier */
3655 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3656
3657 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01003658 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003659 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3660 fe.index = index;
3661 mvpp2_cls_flow_write(priv, &fe);
3662 }
3663
3664 /* Clear classifier lookup table */
3665 le.data = 0;
3666 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3667 le.lkpid = index;
3668 le.way = 0;
3669 mvpp2_cls_lookup_write(priv, &le);
3670
3671 le.way = 1;
3672 mvpp2_cls_lookup_write(priv, &le);
3673 }
3674}
3675
3676static void mvpp2_cls_port_config(struct mvpp2_port *port)
3677{
3678 struct mvpp2_cls_lookup_entry le;
3679 u32 val;
3680
3681 /* Set way for the port */
3682 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3683 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3684 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3685
3686 /* Pick the entry to be accessed in lookup ID decoding table
3687 * according to the way and lkpid.
3688 */
3689 le.lkpid = port->id;
3690 le.way = 0;
3691 le.data = 0;
3692
3693 /* Set initial CPU queue for receiving packets */
3694 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3695 le.data |= port->first_rxq;
3696
3697 /* Disable classification engines */
3698 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3699
3700 /* Update lookup ID table entry */
3701 mvpp2_cls_lookup_write(port->priv, &le);
3702}
3703
3704/* Set CPU queue number for oversize packets */
3705static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3706{
3707 u32 val;
3708
3709 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3710 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3711
3712 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3713 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3714
3715 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3716 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3717 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3718}
3719
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003720static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3721{
3722 if (likely(pool->frag_size <= PAGE_SIZE))
3723 return netdev_alloc_frag(pool->frag_size);
3724 else
3725 return kmalloc(pool->frag_size, GFP_ATOMIC);
3726}
3727
3728static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3729{
3730 if (likely(pool->frag_size <= PAGE_SIZE))
3731 skb_free_frag(data);
3732 else
3733 kfree(data);
3734}
3735
Marcin Wojtas3f518502014-07-10 16:52:13 -03003736/* Buffer Manager configuration routines */
3737
3738/* Create pool */
3739static int mvpp2_bm_pool_create(struct platform_device *pdev,
3740 struct mvpp2 *priv,
3741 struct mvpp2_bm_pool *bm_pool, int size)
3742{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003743 u32 val;
3744
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003745 /* Number of buffer pointers must be a multiple of 16, as per
3746 * hardware constraints
3747 */
3748 if (!IS_ALIGNED(size, 16))
3749 return -EINVAL;
3750
3751 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3752 * bytes per buffer pointer
3753 */
3754 if (priv->hw_version == MVPP21)
3755 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3756 else
3757 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3758
3759 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003760 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003761 GFP_KERNEL);
3762 if (!bm_pool->virt_addr)
3763 return -ENOMEM;
3764
Thomas Petazzonid3158802017-02-21 11:28:13 +01003765 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3766 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003767 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3768 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003769 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3770 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3771 return -ENOMEM;
3772 }
3773
3774 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003775 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003776 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3777
3778 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3779 val |= MVPP2_BM_START_MASK;
3780 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3781
3782 bm_pool->type = MVPP2_BM_FREE;
3783 bm_pool->size = size;
3784 bm_pool->pkt_size = 0;
3785 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003786
3787 return 0;
3788}
3789
3790/* Set pool buffer size */
3791static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3792 struct mvpp2_bm_pool *bm_pool,
3793 int buf_size)
3794{
3795 u32 val;
3796
3797 bm_pool->buf_size = buf_size;
3798
3799 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3800 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3801}
3802
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003803static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3804 struct mvpp2_bm_pool *bm_pool,
3805 dma_addr_t *dma_addr,
3806 phys_addr_t *phys_addr)
3807{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003808 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01003809
3810 *dma_addr = mvpp2_percpu_read(priv, cpu,
3811 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3812 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003813
3814 if (priv->hw_version == MVPP22) {
3815 u32 val;
3816 u32 dma_addr_highbits, phys_addr_highbits;
3817
Thomas Petazzonia7868412017-03-07 16:53:13 +01003818 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003819 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3820 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3821 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3822
3823 if (sizeof(dma_addr_t) == 8)
3824 *dma_addr |= (u64)dma_addr_highbits << 32;
3825
3826 if (sizeof(phys_addr_t) == 8)
3827 *phys_addr |= (u64)phys_addr_highbits << 32;
3828 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003829
3830 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003831}
3832
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003833/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003834static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3835 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003836{
3837 int i;
3838
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003839 for (i = 0; i < bm_pool->buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01003840 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003841 phys_addr_t buf_phys_addr;
3842 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003843
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003844 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3845 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01003846
Thomas Petazzoni20396132017-03-07 16:53:00 +01003847 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01003848 bm_pool->buf_size, DMA_FROM_DEVICE);
3849
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003850 data = (void *)phys_to_virt(buf_phys_addr);
3851 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003852 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003853
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003854 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003855 }
3856
3857 /* Update BM driver with number of buffers removed from pool */
3858 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003859}
3860
3861/* Cleanup pool */
3862static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3863 struct mvpp2 *priv,
3864 struct mvpp2_bm_pool *bm_pool)
3865{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003866 u32 val;
3867
Marcin Wojtas4229d502015-12-03 15:20:50 +01003868 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03003869 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003870 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3871 return 0;
3872 }
3873
3874 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3875 val |= MVPP2_BM_STOP_MASK;
3876 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3877
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003878 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003879 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003880 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003881 return 0;
3882}
3883
3884static int mvpp2_bm_pools_init(struct platform_device *pdev,
3885 struct mvpp2 *priv)
3886{
3887 int i, err, size;
3888 struct mvpp2_bm_pool *bm_pool;
3889
3890 /* Create all pools with maximum size */
3891 size = MVPP2_BM_POOL_SIZE_MAX;
3892 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3893 bm_pool = &priv->bm_pools[i];
3894 bm_pool->id = i;
3895 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3896 if (err)
3897 goto err_unroll_pools;
3898 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3899 }
3900 return 0;
3901
3902err_unroll_pools:
3903 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3904 for (i = i - 1; i >= 0; i--)
3905 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3906 return err;
3907}
3908
3909static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3910{
3911 int i, err;
3912
3913 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3914 /* Mask BM all interrupts */
3915 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3916 /* Clear BM cause register */
3917 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3918 }
3919
3920 /* Allocate and initialize BM pools */
3921 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02003922 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003923 if (!priv->bm_pools)
3924 return -ENOMEM;
3925
3926 err = mvpp2_bm_pools_init(pdev, priv);
3927 if (err < 0)
3928 return err;
3929 return 0;
3930}
3931
3932/* Attach long pool to rxq */
3933static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3934 int lrxq, int long_pool)
3935{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003936 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003937 int prxq;
3938
3939 /* Get queue physical ID */
3940 prxq = port->rxqs[lrxq]->id;
3941
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003942 if (port->priv->hw_version == MVPP21)
3943 mask = MVPP21_RXQ_POOL_LONG_MASK;
3944 else
3945 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003946
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003947 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3948 val &= ~mask;
3949 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003950 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3951}
3952
3953/* Attach short pool to rxq */
3954static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3955 int lrxq, int short_pool)
3956{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003957 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003958 int prxq;
3959
3960 /* Get queue physical ID */
3961 prxq = port->rxqs[lrxq]->id;
3962
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003963 if (port->priv->hw_version == MVPP21)
3964 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3965 else
3966 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003967
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003968 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3969 val &= ~mask;
3970 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003971 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3972}
3973
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003974static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3975 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003976 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003977 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003978 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003979{
Thomas Petazzoni20396132017-03-07 16:53:00 +01003980 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003981 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003982
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003983 data = mvpp2_frag_alloc(bm_pool);
3984 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003985 return NULL;
3986
Thomas Petazzoni20396132017-03-07 16:53:00 +01003987 dma_addr = dma_map_single(port->dev->dev.parent, data,
3988 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3989 DMA_FROM_DEVICE);
3990 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003991 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003992 return NULL;
3993 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01003994 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003995 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003996
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003997 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003998}
3999
Marcin Wojtas3f518502014-07-10 16:52:13 -03004000/* Release buffer to BM */
4001static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004002 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004003 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004004{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004005 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004006
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004007 if (port->priv->hw_version == MVPP22) {
4008 u32 val = 0;
4009
4010 if (sizeof(dma_addr_t) == 8)
4011 val |= upper_32_bits(buf_dma_addr) &
4012 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4013
4014 if (sizeof(phys_addr_t) == 8)
4015 val |= (upper_32_bits(buf_phys_addr)
4016 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4017 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4018
Thomas Petazzonia7868412017-03-07 16:53:13 +01004019 mvpp2_percpu_write(port->priv, cpu,
4020 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004021 }
4022
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004023 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4024 * returned in the "cookie" field of the RX
4025 * descriptor. Instead of storing the virtual address, we
4026 * store the physical address
4027 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004028 mvpp2_percpu_write(port->priv, cpu,
4029 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4030 mvpp2_percpu_write(port->priv, cpu,
4031 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004032
4033 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004034}
4035
Marcin Wojtas3f518502014-07-10 16:52:13 -03004036/* Allocate buffers for the pool */
4037static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4038 struct mvpp2_bm_pool *bm_pool, int buf_num)
4039{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004040 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01004041 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004042 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004043 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004044
4045 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4046 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4047
4048 if (buf_num < 0 ||
4049 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4050 netdev_err(port->dev,
4051 "cannot allocate %d buffers for pool %d\n",
4052 buf_num, bm_pool->id);
4053 return 0;
4054 }
4055
Marcin Wojtas3f518502014-07-10 16:52:13 -03004056 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004057 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4058 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004059 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004060 break;
4061
Thomas Petazzoni20396132017-03-07 16:53:00 +01004062 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004063 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004064 }
4065
4066 /* Update BM driver with number of buffers added to pool */
4067 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004068
4069 netdev_dbg(port->dev,
4070 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4071 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4072 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4073
4074 netdev_dbg(port->dev,
4075 "%s pool %d: %d of %d buffers added\n",
4076 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4077 bm_pool->id, i, buf_num);
4078 return i;
4079}
4080
4081/* Notify the driver that BM pool is being used as specific type and return the
4082 * pool pointer on success
4083 */
4084static struct mvpp2_bm_pool *
4085mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4086 int pkt_size)
4087{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004088 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4089 int num;
4090
4091 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4092 netdev_err(port->dev, "mixing pool types is forbidden\n");
4093 return NULL;
4094 }
4095
Marcin Wojtas3f518502014-07-10 16:52:13 -03004096 if (new_pool->type == MVPP2_BM_FREE)
4097 new_pool->type = type;
4098
4099 /* Allocate buffers in case BM pool is used as long pool, but packet
4100 * size doesn't match MTU or BM pool hasn't being used yet
4101 */
4102 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4103 (new_pool->pkt_size == 0)) {
4104 int pkts_num;
4105
4106 /* Set default buffer number or free all the buffers in case
4107 * the pool is not empty
4108 */
4109 pkts_num = new_pool->buf_num;
4110 if (pkts_num == 0)
4111 pkts_num = type == MVPP2_BM_SWF_LONG ?
4112 MVPP2_BM_LONG_BUF_NUM :
4113 MVPP2_BM_SHORT_BUF_NUM;
4114 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004115 mvpp2_bm_bufs_free(port->dev->dev.parent,
4116 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004117
4118 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004119 new_pool->frag_size =
4120 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4121 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004122
4123 /* Allocate buffers for this pool */
4124 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4125 if (num != pkts_num) {
4126 WARN(1, "pool %d: %d of %d allocated\n",
4127 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004128 return NULL;
4129 }
4130 }
4131
4132 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4133 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4134
Marcin Wojtas3f518502014-07-10 16:52:13 -03004135 return new_pool;
4136}
4137
4138/* Initialize pools for swf */
4139static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4140{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004141 int rxq;
4142
4143 if (!port->pool_long) {
4144 port->pool_long =
4145 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4146 MVPP2_BM_SWF_LONG,
4147 port->pkt_size);
4148 if (!port->pool_long)
4149 return -ENOMEM;
4150
Marcin Wojtas3f518502014-07-10 16:52:13 -03004151 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004152
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004153 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004154 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4155 }
4156
4157 if (!port->pool_short) {
4158 port->pool_short =
4159 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4160 MVPP2_BM_SWF_SHORT,
4161 MVPP2_BM_SHORT_PKT_SIZE);
4162 if (!port->pool_short)
4163 return -ENOMEM;
4164
Marcin Wojtas3f518502014-07-10 16:52:13 -03004165 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004166
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004167 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004168 mvpp2_rxq_short_pool_set(port, rxq,
4169 port->pool_short->id);
4170 }
4171
4172 return 0;
4173}
4174
4175static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4176{
4177 struct mvpp2_port *port = netdev_priv(dev);
4178 struct mvpp2_bm_pool *port_pool = port->pool_long;
4179 int num, pkts_num = port_pool->buf_num;
4180 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4181
4182 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004183 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004184 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004185 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4186 return -EIO;
4187 }
4188
4189 port_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004190 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4191 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004192 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4193 if (num != pkts_num) {
4194 WARN(1, "pool %d: %d of %d allocated\n",
4195 port_pool->id, num, pkts_num);
4196 return -EIO;
4197 }
4198
4199 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4200 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4201 dev->mtu = mtu;
4202 netdev_update_features(dev);
4203 return 0;
4204}
4205
4206static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4207{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004208 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004209
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004210 for (i = 0; i < port->nqvecs; i++)
4211 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4212
Marcin Wojtas3f518502014-07-10 16:52:13 -03004213 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004214 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004215}
4216
4217static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4218{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004219 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004220
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004221 for (i = 0; i < port->nqvecs; i++)
4222 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4223
Marcin Wojtas3f518502014-07-10 16:52:13 -03004224 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004225 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4226}
4227
4228static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4229{
4230 struct mvpp2_port *port = qvec->port;
4231
4232 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4233 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4234}
4235
4236static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4237{
4238 struct mvpp2_port *port = qvec->port;
4239
4240 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4241 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004242}
4243
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004244/* Mask the current CPU's Rx/Tx interrupts
4245 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4246 * using smp_processor_id() is OK.
4247 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004248static void mvpp2_interrupts_mask(void *arg)
4249{
4250 struct mvpp2_port *port = arg;
4251
Thomas Petazzonia7868412017-03-07 16:53:13 +01004252 mvpp2_percpu_write(port->priv, smp_processor_id(),
4253 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004254}
4255
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004256/* Unmask the current CPU's Rx/Tx interrupts.
4257 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4258 * using smp_processor_id() is OK.
4259 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004260static void mvpp2_interrupts_unmask(void *arg)
4261{
4262 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004263 u32 val;
4264
4265 val = MVPP2_CAUSE_MISC_SUM_MASK |
4266 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4267 if (port->has_tx_irqs)
4268 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004269
Thomas Petazzonia7868412017-03-07 16:53:13 +01004270 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004271 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4272}
4273
4274static void
4275mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4276{
4277 u32 val;
4278 int i;
4279
4280 if (port->priv->hw_version != MVPP22)
4281 return;
4282
4283 if (mask)
4284 val = 0;
4285 else
4286 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4287
4288 for (i = 0; i < port->nqvecs; i++) {
4289 struct mvpp2_queue_vector *v = port->qvecs + i;
4290
4291 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4292 continue;
4293
4294 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4295 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4296 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004297}
4298
4299/* Port configuration routines */
4300
Antoine Ténartf84bf382017-08-22 19:08:27 +02004301static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4302{
4303 struct mvpp2 *priv = port->priv;
4304 u32 val;
4305
4306 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4307 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4308 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4309
4310 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4311 if (port->gop_id == 2)
4312 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4313 else if (port->gop_id == 3)
4314 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4315 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4316}
4317
4318static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4319{
4320 struct mvpp2 *priv = port->priv;
4321 u32 val;
4322
4323 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4324 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4325 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4326 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4327
4328 if (port->gop_id > 1) {
4329 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4330 if (port->gop_id == 2)
4331 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4332 else if (port->gop_id == 3)
4333 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4334 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4335 }
4336}
4337
4338static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4339{
4340 struct mvpp2 *priv = port->priv;
4341 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4342 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4343 u32 val;
4344
4345 /* XPCS */
4346 val = readl(xpcs + MVPP22_XPCS_CFG0);
4347 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4348 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4349 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4350 writel(val, xpcs + MVPP22_XPCS_CFG0);
4351
4352 /* MPCS */
4353 val = readl(mpcs + MVPP22_MPCS_CTRL);
4354 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4355 writel(val, mpcs + MVPP22_MPCS_CTRL);
4356
4357 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4358 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4359 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4360 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4361 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4362
4363 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4364 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4365 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4366}
4367
4368static int mvpp22_gop_init(struct mvpp2_port *port)
4369{
4370 struct mvpp2 *priv = port->priv;
4371 u32 val;
4372
4373 if (!priv->sysctrl_base)
4374 return 0;
4375
4376 switch (port->phy_interface) {
4377 case PHY_INTERFACE_MODE_RGMII:
4378 case PHY_INTERFACE_MODE_RGMII_ID:
4379 case PHY_INTERFACE_MODE_RGMII_RXID:
4380 case PHY_INTERFACE_MODE_RGMII_TXID:
4381 if (port->gop_id == 0)
4382 goto invalid_conf;
4383 mvpp22_gop_init_rgmii(port);
4384 break;
4385 case PHY_INTERFACE_MODE_SGMII:
4386 mvpp22_gop_init_sgmii(port);
4387 break;
4388 case PHY_INTERFACE_MODE_10GKR:
4389 if (port->gop_id != 0)
4390 goto invalid_conf;
4391 mvpp22_gop_init_10gkr(port);
4392 break;
4393 default:
4394 goto unsupported_conf;
4395 }
4396
4397 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4398 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4399 GENCONF_PORT_CTRL1_EN(port->gop_id);
4400 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4401
4402 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4403 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4404 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4405
4406 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4407 val |= GENCONF_SOFT_RESET1_GOP;
4408 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4409
4410unsupported_conf:
4411 return 0;
4412
4413invalid_conf:
4414 netdev_err(port->dev, "Invalid port configuration\n");
4415 return -EINVAL;
4416}
4417
Antoine Ténart39193572017-08-22 19:08:24 +02004418static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4419{
4420 u32 val;
4421
4422 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4423 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4424 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4425 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4426 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4427 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4428
4429 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4430 val |= MVPP2_GMAC_DISABLE_PADDING;
4431 val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
4432 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4433 } else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
4434 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
4435 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
4436 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4437 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4438 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4439 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4440 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4441 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4442 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4443
4444 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4445 val &= ~MVPP2_GMAC_DISABLE_PADDING;
4446 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4447 }
4448
4449 /* The port is connected to a copper PHY */
4450 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4451 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4452 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4453
4454 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4455 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4456 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4457 MVPP2_GMAC_AN_DUPLEX_EN;
4458 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4459 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
4460 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4461}
4462
4463static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4464{
4465 u32 val;
4466
4467 /* Force link down */
4468 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4469 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4470 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4471 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4472
4473 /* Set the GMAC in a reset state */
4474 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4475 val |= MVPP2_GMAC_PORT_RESET_MASK;
4476 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4477
4478 /* Configure the PCS and in-band AN */
4479 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4480 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4481 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4482 } else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
4483 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
4484 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
4485 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4486 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4487 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4488 }
4489 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4490
4491 mvpp2_port_mii_gmac_configure_mode(port);
4492
4493 /* Unset the GMAC reset state */
4494 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4495 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
4496 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4497
4498 /* Stop forcing link down */
4499 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4500 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4501 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4502}
4503
Antoine Ténart77321952017-08-22 19:08:25 +02004504static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
4505{
4506 u32 val;
4507
4508 if (port->gop_id != 0)
4509 return;
4510
4511 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4512 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4513 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4514
4515 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
4516 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
4517 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
4518 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
4519}
4520
Thomas Petazzoni26975822017-03-07 16:53:14 +01004521static void mvpp22_port_mii_set(struct mvpp2_port *port)
4522{
4523 u32 val;
4524
Thomas Petazzoni26975822017-03-07 16:53:14 +01004525 /* Only GOP port 0 has an XLG MAC */
4526 if (port->gop_id == 0) {
4527 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4528 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02004529
4530 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4531 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4532 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4533 else
4534 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4535
Thomas Petazzoni26975822017-03-07 16:53:14 +01004536 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4537 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01004538}
4539
Marcin Wojtas3f518502014-07-10 16:52:13 -03004540static void mvpp2_port_mii_set(struct mvpp2_port *port)
4541{
Thomas Petazzoni26975822017-03-07 16:53:14 +01004542 if (port->priv->hw_version == MVPP22)
4543 mvpp22_port_mii_set(port);
4544
Antoine Ténart39193572017-08-22 19:08:24 +02004545 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
4546 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
4547 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
4548 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
4549 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4550 mvpp2_port_mii_gmac_configure(port);
Antoine Ténart77321952017-08-22 19:08:25 +02004551 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4552 mvpp2_port_mii_xlg_configure(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03004553}
4554
4555static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4556{
4557 u32 val;
4558
4559 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4560 val |= MVPP2_GMAC_FC_ADV_EN;
4561 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004562}
4563
4564static void mvpp2_port_enable(struct mvpp2_port *port)
4565{
4566 u32 val;
4567
Antoine Ténart725757a2017-06-12 16:01:39 +02004568 /* Only GOP port 0 has an XLG MAC */
4569 if (port->gop_id == 0 &&
4570 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4571 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4572 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4573 val |= MVPP22_XLG_CTRL0_PORT_EN |
4574 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4575 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4576 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4577 } else {
4578 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4579 val |= MVPP2_GMAC_PORT_EN_MASK;
4580 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4581 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4582 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004583}
4584
4585static void mvpp2_port_disable(struct mvpp2_port *port)
4586{
4587 u32 val;
4588
Antoine Ténart725757a2017-06-12 16:01:39 +02004589 /* Only GOP port 0 has an XLG MAC */
4590 if (port->gop_id == 0 &&
4591 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4592 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4593 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4594 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4595 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4596 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4597 } else {
4598 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4599 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4600 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4601 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004602}
4603
4604/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4605static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4606{
4607 u32 val;
4608
4609 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4610 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4611 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4612}
4613
4614/* Configure loopback port */
4615static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4616{
4617 u32 val;
4618
4619 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4620
4621 if (port->speed == 1000)
4622 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4623 else
4624 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4625
4626 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4627 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4628 else
4629 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4630
4631 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4632}
4633
4634static void mvpp2_port_reset(struct mvpp2_port *port)
4635{
4636 u32 val;
4637
4638 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4639 ~MVPP2_GMAC_PORT_RESET_MASK;
4640 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4641
4642 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4643 MVPP2_GMAC_PORT_RESET_MASK)
4644 continue;
4645}
4646
4647/* Change maximum receive size of the port */
4648static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4649{
4650 u32 val;
4651
4652 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4653 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4654 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4655 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4656 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4657}
4658
Stefan Chulski76eb1b12017-08-22 19:08:26 +02004659/* Change maximum receive size of the port */
4660static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
4661{
4662 u32 val;
4663
4664 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
4665 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
4666 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4667 MVPP22_XLG_CTRL1_FRAMESIZELIMIT;
4668 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
4669}
4670
Marcin Wojtas3f518502014-07-10 16:52:13 -03004671/* Set defaults to the MVPP2 port */
4672static void mvpp2_defaults_set(struct mvpp2_port *port)
4673{
4674 int tx_port_num, val, queue, ptxq, lrxq;
4675
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004676 if (port->priv->hw_version == MVPP21) {
4677 /* Configure port to loopback if needed */
4678 if (port->flags & MVPP2_F_LOOPBACK)
4679 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004680
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004681 /* Update TX FIFO MIN Threshold */
4682 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4683 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4684 /* Min. TX threshold must be less than minimal packet length */
4685 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4686 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4687 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004688
4689 /* Disable Legacy WRR, Disable EJP, Release from reset */
4690 tx_port_num = mvpp2_egress_port(port);
4691 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4692 tx_port_num);
4693 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4694
4695 /* Close bandwidth for all queues */
4696 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4697 ptxq = mvpp2_txq_phys(port->id, queue);
4698 mvpp2_write(port->priv,
4699 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4700 }
4701
4702 /* Set refill period to 1 usec, refill tokens
4703 * and bucket size to maximum
4704 */
4705 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4706 port->priv->tclk / USEC_PER_SEC);
4707 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4708 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4709 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4710 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4711 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4712 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4713 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4714
4715 /* Set MaximumLowLatencyPacketSize value to 256 */
4716 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4717 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4718 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4719
4720 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004721 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004722 queue = port->rxqs[lrxq]->id;
4723 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4724 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4725 MVPP2_SNOOP_BUF_HDR_MASK;
4726 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4727 }
4728
4729 /* At default, mask all interrupts to all present cpus */
4730 mvpp2_interrupts_disable(port);
4731}
4732
4733/* Enable/disable receiving packets */
4734static void mvpp2_ingress_enable(struct mvpp2_port *port)
4735{
4736 u32 val;
4737 int lrxq, queue;
4738
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004739 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004740 queue = port->rxqs[lrxq]->id;
4741 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4742 val &= ~MVPP2_RXQ_DISABLE_MASK;
4743 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4744 }
4745}
4746
4747static void mvpp2_ingress_disable(struct mvpp2_port *port)
4748{
4749 u32 val;
4750 int lrxq, queue;
4751
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004752 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004753 queue = port->rxqs[lrxq]->id;
4754 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4755 val |= MVPP2_RXQ_DISABLE_MASK;
4756 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4757 }
4758}
4759
4760/* Enable transmit via physical egress queue
4761 * - HW starts take descriptors from DRAM
4762 */
4763static void mvpp2_egress_enable(struct mvpp2_port *port)
4764{
4765 u32 qmap;
4766 int queue;
4767 int tx_port_num = mvpp2_egress_port(port);
4768
4769 /* Enable all initialized TXs. */
4770 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004771 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004772 struct mvpp2_tx_queue *txq = port->txqs[queue];
4773
Markus Elfringdbbb2f02017-04-17 14:07:52 +02004774 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004775 qmap |= (1 << queue);
4776 }
4777
4778 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4779 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4780}
4781
4782/* Disable transmit via physical egress queue
4783 * - HW doesn't take descriptors from DRAM
4784 */
4785static void mvpp2_egress_disable(struct mvpp2_port *port)
4786{
4787 u32 reg_data;
4788 int delay;
4789 int tx_port_num = mvpp2_egress_port(port);
4790
4791 /* Issue stop command for active channels only */
4792 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4793 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4794 MVPP2_TXP_SCHED_ENQ_MASK;
4795 if (reg_data != 0)
4796 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4797 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4798
4799 /* Wait for all Tx activity to terminate. */
4800 delay = 0;
4801 do {
4802 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4803 netdev_warn(port->dev,
4804 "Tx stop timed out, status=0x%08x\n",
4805 reg_data);
4806 break;
4807 }
4808 mdelay(1);
4809 delay++;
4810
4811 /* Check port TX Command register that all
4812 * Tx queues are stopped
4813 */
4814 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4815 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4816}
4817
4818/* Rx descriptors helper methods */
4819
4820/* Get number of Rx descriptors occupied by received packets */
4821static inline int
4822mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4823{
4824 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4825
4826 return val & MVPP2_RXQ_OCCUPIED_MASK;
4827}
4828
4829/* Update Rx queue status with the number of occupied and available
4830 * Rx descriptor slots.
4831 */
4832static inline void
4833mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4834 int used_count, int free_count)
4835{
4836 /* Decrement the number of used descriptors and increment count
4837 * increment the number of free descriptors.
4838 */
4839 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4840
4841 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4842}
4843
4844/* Get pointer to next RX descriptor to be processed by SW */
4845static inline struct mvpp2_rx_desc *
4846mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4847{
4848 int rx_desc = rxq->next_desc_to_proc;
4849
4850 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4851 prefetch(rxq->descs + rxq->next_desc_to_proc);
4852 return rxq->descs + rx_desc;
4853}
4854
4855/* Set rx queue offset */
4856static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4857 int prxq, int offset)
4858{
4859 u32 val;
4860
4861 /* Convert offset from bytes to units of 32 bytes */
4862 offset = offset >> 5;
4863
4864 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4865 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4866
4867 /* Offset is in */
4868 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4869 MVPP2_RXQ_PACKET_OFFSET_MASK);
4870
4871 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4872}
4873
Marcin Wojtas3f518502014-07-10 16:52:13 -03004874/* Tx descriptors helper methods */
4875
Marcin Wojtas3f518502014-07-10 16:52:13 -03004876/* Get pointer to next Tx descriptor to be processed (send) by HW */
4877static struct mvpp2_tx_desc *
4878mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4879{
4880 int tx_desc = txq->next_desc_to_proc;
4881
4882 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4883 return txq->descs + tx_desc;
4884}
4885
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004886/* Update HW with number of aggregated Tx descriptors to be sent
4887 *
4888 * Called only from mvpp2_tx(), so migration is disabled, using
4889 * smp_processor_id() is OK.
4890 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004891static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4892{
4893 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004894 mvpp2_percpu_write(port->priv, smp_processor_id(),
4895 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004896}
4897
4898
4899/* Check if there are enough free descriptors in aggregated txq.
4900 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004901 *
4902 * Called only from mvpp2_tx(), so migration is disabled, using
4903 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03004904 */
4905static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4906 struct mvpp2_tx_queue *aggr_txq, int num)
4907{
4908 if ((aggr_txq->count + num) > aggr_txq->size) {
4909 /* Update number of occupied aggregated Tx descriptors */
4910 int cpu = smp_processor_id();
4911 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4912
4913 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4914 }
4915
4916 if ((aggr_txq->count + num) > aggr_txq->size)
4917 return -ENOMEM;
4918
4919 return 0;
4920}
4921
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004922/* Reserved Tx descriptors allocation request
4923 *
4924 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
4925 * only by mvpp2_tx(), so migration is disabled, using
4926 * smp_processor_id() is OK.
4927 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004928static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4929 struct mvpp2_tx_queue *txq, int num)
4930{
4931 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004932 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004933
4934 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004935 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004936
Thomas Petazzonia7868412017-03-07 16:53:13 +01004937 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004938
4939 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4940}
4941
4942/* Check if there are enough reserved descriptors for transmission.
4943 * If not, request chunk of reserved descriptors and check again.
4944 */
4945static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4946 struct mvpp2_tx_queue *txq,
4947 struct mvpp2_txq_pcpu *txq_pcpu,
4948 int num)
4949{
4950 int req, cpu, desc_count;
4951
4952 if (txq_pcpu->reserved_num >= num)
4953 return 0;
4954
4955 /* Not enough descriptors reserved! Update the reserved descriptor
4956 * count and check again.
4957 */
4958
4959 desc_count = 0;
4960 /* Compute total of used descriptors */
4961 for_each_present_cpu(cpu) {
4962 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4963
4964 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4965 desc_count += txq_pcpu_aux->count;
4966 desc_count += txq_pcpu_aux->reserved_num;
4967 }
4968
4969 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4970 desc_count += req;
4971
4972 if (desc_count >
4973 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4974 return -ENOMEM;
4975
4976 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4977
4978 /* OK, the descriptor cound has been updated: check again. */
4979 if (txq_pcpu->reserved_num < num)
4980 return -ENOMEM;
4981 return 0;
4982}
4983
4984/* Release the last allocated Tx descriptor. Useful to handle DMA
4985 * mapping failures in the Tx path.
4986 */
4987static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4988{
4989 if (txq->next_desc_to_proc == 0)
4990 txq->next_desc_to_proc = txq->last_desc - 1;
4991 else
4992 txq->next_desc_to_proc--;
4993}
4994
4995/* Set Tx descriptors fields relevant for CSUM calculation */
4996static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4997 int ip_hdr_len, int l4_proto)
4998{
4999 u32 command;
5000
5001 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5002 * G_L4_chk, L4_type required only for checksum calculation
5003 */
5004 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5005 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5006 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5007
5008 if (l3_proto == swab16(ETH_P_IP)) {
5009 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5010 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5011 } else {
5012 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5013 }
5014
5015 if (l4_proto == IPPROTO_TCP) {
5016 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5017 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5018 } else if (l4_proto == IPPROTO_UDP) {
5019 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5020 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5021 } else {
5022 command |= MVPP2_TXD_L4_CSUM_NOT;
5023 }
5024
5025 return command;
5026}
5027
5028/* Get number of sent descriptors and decrement counter.
5029 * The number of sent descriptors is returned.
5030 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005031 *
5032 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5033 * (migration disabled) and from the TX completion tasklet (migration
5034 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005035 */
5036static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5037 struct mvpp2_tx_queue *txq)
5038{
5039 u32 val;
5040
5041 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005042 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5043 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005044
5045 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5046 MVPP2_TRANSMITTED_COUNT_OFFSET;
5047}
5048
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005049/* Called through on_each_cpu(), so runs on all CPUs, with migration
5050 * disabled, therefore using smp_processor_id() is OK.
5051 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005052static void mvpp2_txq_sent_counter_clear(void *arg)
5053{
5054 struct mvpp2_port *port = arg;
5055 int queue;
5056
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005057 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005058 int id = port->txqs[queue]->id;
5059
Thomas Petazzonia7868412017-03-07 16:53:13 +01005060 mvpp2_percpu_read(port->priv, smp_processor_id(),
5061 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005062 }
5063}
5064
5065/* Set max sizes for Tx queues */
5066static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5067{
5068 u32 val, size, mtu;
5069 int txq, tx_port_num;
5070
5071 mtu = port->pkt_size * 8;
5072 if (mtu > MVPP2_TXP_MTU_MAX)
5073 mtu = MVPP2_TXP_MTU_MAX;
5074
5075 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5076 mtu = 3 * mtu;
5077
5078 /* Indirect access to registers */
5079 tx_port_num = mvpp2_egress_port(port);
5080 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5081
5082 /* Set MTU */
5083 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5084 val &= ~MVPP2_TXP_MTU_MAX;
5085 val |= mtu;
5086 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5087
5088 /* TXP token size and all TXQs token size must be larger that MTU */
5089 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5090 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5091 if (size < mtu) {
5092 size = mtu;
5093 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5094 val |= size;
5095 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5096 }
5097
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005098 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005099 val = mvpp2_read(port->priv,
5100 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5101 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5102
5103 if (size < mtu) {
5104 size = mtu;
5105 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5106 val |= size;
5107 mvpp2_write(port->priv,
5108 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5109 val);
5110 }
5111 }
5112}
5113
5114/* Set the number of packets that will be received before Rx interrupt
5115 * will be generated by HW.
5116 */
5117static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005118 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005119{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005120 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005121
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01005122 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5123 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005124
Thomas Petazzonia7868412017-03-07 16:53:13 +01005125 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5126 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5127 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005128
5129 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005130}
5131
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005132/* For some reason in the LSP this is done on each CPU. Why ? */
5133static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5134 struct mvpp2_tx_queue *txq)
5135{
5136 int cpu = get_cpu();
5137 u32 val;
5138
5139 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5140 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5141
5142 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5143 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5144 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5145
5146 put_cpu();
5147}
5148
Thomas Petazzoniab426762017-02-21 11:28:04 +01005149static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5150{
5151 u64 tmp = (u64)clk_hz * usec;
5152
5153 do_div(tmp, USEC_PER_SEC);
5154
5155 return tmp > U32_MAX ? U32_MAX : tmp;
5156}
5157
5158static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5159{
5160 u64 tmp = (u64)cycles * USEC_PER_SEC;
5161
5162 do_div(tmp, clk_hz);
5163
5164 return tmp > U32_MAX ? U32_MAX : tmp;
5165}
5166
Marcin Wojtas3f518502014-07-10 16:52:13 -03005167/* Set the time delay in usec before Rx interrupt */
5168static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005169 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005170{
Thomas Petazzoniab426762017-02-21 11:28:04 +01005171 unsigned long freq = port->priv->tclk;
5172 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005173
Thomas Petazzoniab426762017-02-21 11:28:04 +01005174 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5175 rxq->time_coal =
5176 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5177
5178 /* re-evaluate to get actual register value */
5179 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5180 }
5181
Marcin Wojtas3f518502014-07-10 16:52:13 -03005182 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005183}
5184
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005185static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5186{
5187 unsigned long freq = port->priv->tclk;
5188 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5189
5190 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5191 port->tx_time_coal =
5192 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5193
5194 /* re-evaluate to get actual register value */
5195 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5196 }
5197
5198 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5199}
5200
Marcin Wojtas3f518502014-07-10 16:52:13 -03005201/* Free Tx queue skbuffs */
5202static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5203 struct mvpp2_tx_queue *txq,
5204 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5205{
5206 int i;
5207
5208 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01005209 struct mvpp2_txq_pcpu_buf *tx_buf =
5210 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005211
Thomas Petazzoni20396132017-03-07 16:53:00 +01005212 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
Thomas Petazzoni83544912016-12-21 11:28:49 +01005213 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01005214 if (tx_buf->skb)
5215 dev_kfree_skb_any(tx_buf->skb);
5216
5217 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005218 }
5219}
5220
5221static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5222 u32 cause)
5223{
5224 int queue = fls(cause) - 1;
5225
5226 return port->rxqs[queue];
5227}
5228
5229static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5230 u32 cause)
5231{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005232 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005233
5234 return port->txqs[queue];
5235}
5236
5237/* Handle end of transmission */
5238static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5239 struct mvpp2_txq_pcpu *txq_pcpu)
5240{
5241 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5242 int tx_done;
5243
5244 if (txq_pcpu->cpu != smp_processor_id())
5245 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5246
5247 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5248 if (!tx_done)
5249 return;
5250 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5251
5252 txq_pcpu->count -= tx_done;
5253
5254 if (netif_tx_queue_stopped(nq))
5255 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
5256 netif_tx_wake_queue(nq);
5257}
5258
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005259static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5260 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005261{
5262 struct mvpp2_tx_queue *txq;
5263 struct mvpp2_txq_pcpu *txq_pcpu;
5264 unsigned int tx_todo = 0;
5265
5266 while (cause) {
5267 txq = mvpp2_get_tx_queue(port, cause);
5268 if (!txq)
5269 break;
5270
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005271 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005272
5273 if (txq_pcpu->count) {
5274 mvpp2_txq_done(port, txq, txq_pcpu);
5275 tx_todo += txq_pcpu->count;
5276 }
5277
5278 cause &= ~(1 << txq->log_id);
5279 }
5280 return tx_todo;
5281}
5282
Marcin Wojtas3f518502014-07-10 16:52:13 -03005283/* Rx/Tx queue initialization/cleanup methods */
5284
5285/* Allocate and initialize descriptors for aggr TXQ */
5286static int mvpp2_aggr_txq_init(struct platform_device *pdev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005287 struct mvpp2_tx_queue *aggr_txq, int cpu,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005288 struct mvpp2 *priv)
5289{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005290 u32 txq_dma;
5291
Marcin Wojtas3f518502014-07-10 16:52:13 -03005292 /* Allocate memory for TX descriptors */
5293 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005294 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005295 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005296 if (!aggr_txq->descs)
5297 return -ENOMEM;
5298
Marcin Wojtas3f518502014-07-10 16:52:13 -03005299 aggr_txq->last_desc = aggr_txq->size - 1;
5300
5301 /* Aggr TXQ no reset WA */
5302 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5303 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5304
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005305 /* Set Tx descriptors queue starting address indirect
5306 * access
5307 */
5308 if (priv->hw_version == MVPP21)
5309 txq_dma = aggr_txq->descs_dma;
5310 else
5311 txq_dma = aggr_txq->descs_dma >>
5312 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5313
5314 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Antoine Ténart85affd72017-08-23 09:46:55 +02005315 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5316 MVPP2_AGGR_TXQ_SIZE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005317
5318 return 0;
5319}
5320
5321/* Create a specified Rx queue */
5322static int mvpp2_rxq_init(struct mvpp2_port *port,
5323 struct mvpp2_rx_queue *rxq)
5324
5325{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005326 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005327 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005328
Marcin Wojtas3f518502014-07-10 16:52:13 -03005329 rxq->size = port->rx_ring_size;
5330
5331 /* Allocate memory for RX descriptors */
5332 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5333 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005334 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005335 if (!rxq->descs)
5336 return -ENOMEM;
5337
Marcin Wojtas3f518502014-07-10 16:52:13 -03005338 rxq->last_desc = rxq->size - 1;
5339
5340 /* Zero occupied and non-occupied counters - direct access */
5341 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5342
5343 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005344 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005345 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005346 if (port->priv->hw_version == MVPP21)
5347 rxq_dma = rxq->descs_dma;
5348 else
5349 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005350 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5351 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5352 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005353 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005354
5355 /* Set Offset */
5356 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5357
5358 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005359 mvpp2_rx_pkts_coal_set(port, rxq);
5360 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005361
5362 /* Add number of descriptors ready for receiving packets */
5363 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5364
5365 return 0;
5366}
5367
5368/* Push packets received by the RXQ to BM pool */
5369static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5370 struct mvpp2_rx_queue *rxq)
5371{
5372 int rx_received, i;
5373
5374 rx_received = mvpp2_rxq_received(port, rxq->id);
5375 if (!rx_received)
5376 return;
5377
5378 for (i = 0; i < rx_received; i++) {
5379 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005380 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5381 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005382
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005383 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5384 MVPP2_RXD_BM_POOL_ID_OFFS;
5385
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005386 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005387 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5388 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005389 }
5390 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5391}
5392
5393/* Cleanup Rx queue */
5394static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5395 struct mvpp2_rx_queue *rxq)
5396{
Thomas Petazzonia7868412017-03-07 16:53:13 +01005397 int cpu;
5398
Marcin Wojtas3f518502014-07-10 16:52:13 -03005399 mvpp2_rxq_drop_pkts(port, rxq);
5400
5401 if (rxq->descs)
5402 dma_free_coherent(port->dev->dev.parent,
5403 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5404 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005405 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005406
5407 rxq->descs = NULL;
5408 rxq->last_desc = 0;
5409 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005410 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005411
5412 /* Clear Rx descriptors queue starting address and size;
5413 * free descriptor number
5414 */
5415 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005416 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005417 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5418 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5419 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005420 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005421}
5422
5423/* Create and initialize a Tx queue */
5424static int mvpp2_txq_init(struct mvpp2_port *port,
5425 struct mvpp2_tx_queue *txq)
5426{
5427 u32 val;
5428 int cpu, desc, desc_per_txq, tx_port_num;
5429 struct mvpp2_txq_pcpu *txq_pcpu;
5430
5431 txq->size = port->tx_ring_size;
5432
5433 /* Allocate memory for Tx descriptors */
5434 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5435 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005436 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005437 if (!txq->descs)
5438 return -ENOMEM;
5439
Marcin Wojtas3f518502014-07-10 16:52:13 -03005440 txq->last_desc = txq->size - 1;
5441
5442 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005443 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005444 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5445 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5446 txq->descs_dma);
5447 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5448 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5449 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5450 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5451 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5452 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005453 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005454 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005455
5456 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5457 * for each existing TXQ.
5458 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5459 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5460 */
5461 desc_per_txq = 16;
5462 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5463 (txq->log_id * desc_per_txq);
5464
Thomas Petazzonia7868412017-03-07 16:53:13 +01005465 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5466 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5467 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005468 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005469
5470 /* WRR / EJP configuration - indirect access */
5471 tx_port_num = mvpp2_egress_port(port);
5472 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5473
5474 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5475 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5476 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5477 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5478 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5479
5480 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5481 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5482 val);
5483
5484 for_each_present_cpu(cpu) {
5485 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5486 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02005487 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5488 sizeof(*txq_pcpu->buffs),
5489 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005490 if (!txq_pcpu->buffs)
Markus Elfring20b1e162017-04-17 12:58:33 +02005491 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005492
5493 txq_pcpu->count = 0;
5494 txq_pcpu->reserved_num = 0;
5495 txq_pcpu->txq_put_index = 0;
5496 txq_pcpu->txq_get_index = 0;
5497 }
5498
5499 return 0;
Markus Elfring20b1e162017-04-17 12:58:33 +02005500cleanup:
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005501 for_each_present_cpu(cpu) {
5502 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005503 kfree(txq_pcpu->buffs);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005504 }
5505
5506 dma_free_coherent(port->dev->dev.parent,
5507 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005508 txq->descs, txq->descs_dma);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005509
5510 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005511}
5512
5513/* Free allocated TXQ resources */
5514static void mvpp2_txq_deinit(struct mvpp2_port *port,
5515 struct mvpp2_tx_queue *txq)
5516{
5517 struct mvpp2_txq_pcpu *txq_pcpu;
5518 int cpu;
5519
5520 for_each_present_cpu(cpu) {
5521 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005522 kfree(txq_pcpu->buffs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005523 }
5524
5525 if (txq->descs)
5526 dma_free_coherent(port->dev->dev.parent,
5527 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005528 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005529
5530 txq->descs = NULL;
5531 txq->last_desc = 0;
5532 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005533 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005534
5535 /* Set minimum bandwidth for disabled TXQs */
5536 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5537
5538 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005539 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005540 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5541 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5542 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005543 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005544}
5545
5546/* Cleanup Tx ports */
5547static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5548{
5549 struct mvpp2_txq_pcpu *txq_pcpu;
5550 int delay, pending, cpu;
5551 u32 val;
5552
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005553 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005554 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5555 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005556 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005557 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005558
5559 /* The napi queue has been stopped so wait for all packets
5560 * to be transmitted.
5561 */
5562 delay = 0;
5563 do {
5564 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5565 netdev_warn(port->dev,
5566 "port %d: cleaning queue %d timed out\n",
5567 port->id, txq->log_id);
5568 break;
5569 }
5570 mdelay(1);
5571 delay++;
5572
Thomas Petazzonia7868412017-03-07 16:53:13 +01005573 pending = mvpp2_percpu_read(port->priv, cpu,
5574 MVPP2_TXQ_PENDING_REG);
5575 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005576 } while (pending);
5577
5578 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005579 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005580 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005581
5582 for_each_present_cpu(cpu) {
5583 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5584
5585 /* Release all packets */
5586 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5587
5588 /* Reset queue */
5589 txq_pcpu->count = 0;
5590 txq_pcpu->txq_put_index = 0;
5591 txq_pcpu->txq_get_index = 0;
5592 }
5593}
5594
5595/* Cleanup all Tx queues */
5596static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5597{
5598 struct mvpp2_tx_queue *txq;
5599 int queue;
5600 u32 val;
5601
5602 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5603
5604 /* Reset Tx ports and delete Tx queues */
5605 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5606 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5607
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005608 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005609 txq = port->txqs[queue];
5610 mvpp2_txq_clean(port, txq);
5611 mvpp2_txq_deinit(port, txq);
5612 }
5613
5614 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5615
5616 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5617 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5618}
5619
5620/* Cleanup all Rx queues */
5621static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5622{
5623 int queue;
5624
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005625 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005626 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5627}
5628
5629/* Init all Rx queues for port */
5630static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5631{
5632 int queue, err;
5633
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005634 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005635 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5636 if (err)
5637 goto err_cleanup;
5638 }
5639 return 0;
5640
5641err_cleanup:
5642 mvpp2_cleanup_rxqs(port);
5643 return err;
5644}
5645
5646/* Init all tx queues for port */
5647static int mvpp2_setup_txqs(struct mvpp2_port *port)
5648{
5649 struct mvpp2_tx_queue *txq;
5650 int queue, err;
5651
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005652 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005653 txq = port->txqs[queue];
5654 err = mvpp2_txq_init(port, txq);
5655 if (err)
5656 goto err_cleanup;
5657 }
5658
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005659 if (port->has_tx_irqs) {
5660 mvpp2_tx_time_coal_set(port);
5661 for (queue = 0; queue < port->ntxqs; queue++) {
5662 txq = port->txqs[queue];
5663 mvpp2_tx_pkts_coal_set(port, txq);
5664 }
5665 }
5666
Marcin Wojtas3f518502014-07-10 16:52:13 -03005667 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5668 return 0;
5669
5670err_cleanup:
5671 mvpp2_cleanup_txqs(port);
5672 return err;
5673}
5674
5675/* The callback for per-port interrupt */
5676static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5677{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005678 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005679
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005680 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005681
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005682 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005683
5684 return IRQ_HANDLED;
5685}
5686
5687/* Adjust link */
5688static void mvpp2_link_event(struct net_device *dev)
5689{
5690 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02005691 struct phy_device *phydev = dev->phydev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005692 int status_change = 0;
5693 u32 val;
5694
5695 if (phydev->link) {
5696 if ((port->speed != phydev->speed) ||
5697 (port->duplex != phydev->duplex)) {
5698 u32 val;
5699
5700 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5701 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5702 MVPP2_GMAC_CONFIG_GMII_SPEED |
5703 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5704 MVPP2_GMAC_AN_SPEED_EN |
5705 MVPP2_GMAC_AN_DUPLEX_EN);
5706
5707 if (phydev->duplex)
5708 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5709
5710 if (phydev->speed == SPEED_1000)
5711 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni2add5112014-07-27 23:21:35 +02005712 else if (phydev->speed == SPEED_100)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005713 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5714
5715 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5716
5717 port->duplex = phydev->duplex;
5718 port->speed = phydev->speed;
5719 }
5720 }
5721
5722 if (phydev->link != port->link) {
5723 if (!phydev->link) {
5724 port->duplex = -1;
5725 port->speed = 0;
5726 }
5727
5728 port->link = phydev->link;
5729 status_change = 1;
5730 }
5731
5732 if (status_change) {
5733 if (phydev->link) {
5734 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5735 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5736 MVPP2_GMAC_FORCE_LINK_DOWN);
5737 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5738 mvpp2_egress_enable(port);
5739 mvpp2_ingress_enable(port);
5740 } else {
5741 mvpp2_ingress_disable(port);
5742 mvpp2_egress_disable(port);
5743 }
5744 phy_print_status(phydev);
5745 }
5746}
5747
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005748static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5749{
5750 ktime_t interval;
5751
5752 if (!port_pcpu->timer_scheduled) {
5753 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01005754 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005755 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5756 HRTIMER_MODE_REL_PINNED);
5757 }
5758}
5759
5760static void mvpp2_tx_proc_cb(unsigned long data)
5761{
5762 struct net_device *dev = (struct net_device *)data;
5763 struct mvpp2_port *port = netdev_priv(dev);
5764 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5765 unsigned int tx_todo, cause;
5766
5767 if (!netif_running(dev))
5768 return;
5769 port_pcpu->timer_scheduled = false;
5770
5771 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005772 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005773 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005774
5775 /* Set the timer in case not all the packets were processed */
5776 if (tx_todo)
5777 mvpp2_timer_set(port_pcpu);
5778}
5779
5780static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5781{
5782 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5783 struct mvpp2_port_pcpu,
5784 tx_done_timer);
5785
5786 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5787
5788 return HRTIMER_NORESTART;
5789}
5790
Marcin Wojtas3f518502014-07-10 16:52:13 -03005791/* Main RX/TX processing routines */
5792
5793/* Display more error info */
5794static void mvpp2_rx_error(struct mvpp2_port *port,
5795 struct mvpp2_rx_desc *rx_desc)
5796{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005797 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5798 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005799
5800 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5801 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005802 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5803 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005804 break;
5805 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005806 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5807 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005808 break;
5809 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005810 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5811 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005812 break;
5813 }
5814}
5815
5816/* Handle RX checksum offload */
5817static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5818 struct sk_buff *skb)
5819{
5820 if (((status & MVPP2_RXD_L3_IP4) &&
5821 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5822 (status & MVPP2_RXD_L3_IP6))
5823 if (((status & MVPP2_RXD_L4_UDP) ||
5824 (status & MVPP2_RXD_L4_TCP)) &&
5825 (status & MVPP2_RXD_L4_CSUM_OK)) {
5826 skb->csum = 0;
5827 skb->ip_summed = CHECKSUM_UNNECESSARY;
5828 return;
5829 }
5830
5831 skb->ip_summed = CHECKSUM_NONE;
5832}
5833
5834/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5835static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005836 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005837{
Thomas Petazzoni20396132017-03-07 16:53:00 +01005838 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005839 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005840 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005841
Marcin Wojtas3f518502014-07-10 16:52:13 -03005842 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005843 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5844 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005845 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005846 return -ENOMEM;
5847
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005848 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005849
Marcin Wojtas3f518502014-07-10 16:52:13 -03005850 return 0;
5851}
5852
5853/* Handle tx checksum */
5854static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5855{
5856 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5857 int ip_hdr_len = 0;
5858 u8 l4_proto;
5859
5860 if (skb->protocol == htons(ETH_P_IP)) {
5861 struct iphdr *ip4h = ip_hdr(skb);
5862
5863 /* Calculate IPv4 checksum and L4 checksum */
5864 ip_hdr_len = ip4h->ihl;
5865 l4_proto = ip4h->protocol;
5866 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5867 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5868
5869 /* Read l4_protocol from one of IPv6 extra headers */
5870 if (skb_network_header_len(skb) > 0)
5871 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5872 l4_proto = ip6h->nexthdr;
5873 } else {
5874 return MVPP2_TXD_L4_CSUM_NOT;
5875 }
5876
5877 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5878 skb->protocol, ip_hdr_len, l4_proto);
5879 }
5880
5881 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5882}
5883
Marcin Wojtas3f518502014-07-10 16:52:13 -03005884/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005885static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
5886 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005887{
5888 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005889 int rx_received;
5890 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005891 u32 rcvd_pkts = 0;
5892 u32 rcvd_bytes = 0;
5893
5894 /* Get number of received packets and clamp the to-do */
5895 rx_received = mvpp2_rxq_received(port, rxq->id);
5896 if (rx_todo > rx_received)
5897 rx_todo = rx_received;
5898
Marcin Wojtasb5015852015-12-03 15:20:51 +01005899 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005900 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5901 struct mvpp2_bm_pool *bm_pool;
5902 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005903 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005904 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005905 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005906 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005907 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005908 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005909
Marcin Wojtasb5015852015-12-03 15:20:51 +01005910 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005911 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5912 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5913 rx_bytes -= MVPP2_MH_SIZE;
5914 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5915 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5916 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005917
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005918 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5919 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005920 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03005921
5922 /* In case of an error, release the requested buffer pointer
5923 * to the Buffer Manager. This request process is controlled
5924 * by the hardware, and the information about the buffer is
5925 * comprised by the RX descriptor.
5926 */
5927 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02005928err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005929 dev->stats.rx_errors++;
5930 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005931 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005932 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005933 continue;
5934 }
5935
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005936 if (bm_pool->frag_size > PAGE_SIZE)
5937 frag_size = 0;
5938 else
5939 frag_size = bm_pool->frag_size;
5940
5941 skb = build_skb(data, frag_size);
5942 if (!skb) {
5943 netdev_warn(port->dev, "skb build failed\n");
5944 goto err_drop_frame;
5945 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005946
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005947 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005948 if (err) {
5949 netdev_err(port->dev, "failed to refill BM pools\n");
5950 goto err_drop_frame;
5951 }
5952
Thomas Petazzoni20396132017-03-07 16:53:00 +01005953 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01005954 bm_pool->buf_size, DMA_FROM_DEVICE);
5955
Marcin Wojtas3f518502014-07-10 16:52:13 -03005956 rcvd_pkts++;
5957 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005958
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005959 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005960 skb_put(skb, rx_bytes);
5961 skb->protocol = eth_type_trans(skb, dev);
5962 mvpp2_rx_csum(port, rx_status, skb);
5963
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005964 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005965 }
5966
5967 if (rcvd_pkts) {
5968 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5969
5970 u64_stats_update_begin(&stats->syncp);
5971 stats->rx_packets += rcvd_pkts;
5972 stats->rx_bytes += rcvd_bytes;
5973 u64_stats_update_end(&stats->syncp);
5974 }
5975
5976 /* Update Rx queue management counters */
5977 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01005978 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005979
5980 return rx_todo;
5981}
5982
5983static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005984tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005985 struct mvpp2_tx_desc *desc)
5986{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005987 dma_addr_t buf_dma_addr =
5988 mvpp2_txdesc_dma_addr_get(port, desc);
5989 size_t buf_sz =
5990 mvpp2_txdesc_size_get(port, desc);
5991 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5992 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005993 mvpp2_txq_desc_put(txq);
5994}
5995
5996/* Handle tx fragmentation processing */
5997static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5998 struct mvpp2_tx_queue *aggr_txq,
5999 struct mvpp2_tx_queue *txq)
6000{
6001 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6002 struct mvpp2_tx_desc *tx_desc;
6003 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006004 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006005
6006 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6007 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6008 void *addr = page_address(frag->page.p) + frag->page_offset;
6009
6010 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006011 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6012 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006013
Thomas Petazzoni20396132017-03-07 16:53:00 +01006014 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006015 frag->size,
6016 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006017 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006018 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02006019 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006020 }
6021
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006022 mvpp2_txdesc_offset_set(port, tx_desc,
6023 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6024 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6025 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006026
6027 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6028 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006029 mvpp2_txdesc_cmd_set(port, tx_desc,
6030 MVPP2_TXD_L_DESC);
6031 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006032 } else {
6033 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006034 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6035 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006036 }
6037 }
6038
6039 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02006040cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006041 /* Release all descriptors that were used to map fragments of
6042 * this packet, as well as the corresponding DMA mappings
6043 */
6044 for (i = i - 1; i >= 0; i--) {
6045 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006046 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006047 }
6048
6049 return -ENOMEM;
6050}
6051
6052/* Main tx processing */
6053static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6054{
6055 struct mvpp2_port *port = netdev_priv(dev);
6056 struct mvpp2_tx_queue *txq, *aggr_txq;
6057 struct mvpp2_txq_pcpu *txq_pcpu;
6058 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006059 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006060 int frags = 0;
6061 u16 txq_id;
6062 u32 tx_cmd;
6063
6064 txq_id = skb_get_queue_mapping(skb);
6065 txq = port->txqs[txq_id];
6066 txq_pcpu = this_cpu_ptr(txq->pcpu);
6067 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6068
6069 frags = skb_shinfo(skb)->nr_frags + 1;
6070
6071 /* Check number of available descriptors */
6072 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6073 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6074 txq_pcpu, frags)) {
6075 frags = 0;
6076 goto out;
6077 }
6078
6079 /* Get a descriptor for the first part of the packet */
6080 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006081 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6082 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006083
Thomas Petazzoni20396132017-03-07 16:53:00 +01006084 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006085 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006086 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006087 mvpp2_txq_desc_put(txq);
6088 frags = 0;
6089 goto out;
6090 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006091
6092 mvpp2_txdesc_offset_set(port, tx_desc,
6093 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
6094 mvpp2_txdesc_dma_addr_set(port, tx_desc,
6095 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006096
6097 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6098
6099 if (frags == 1) {
6100 /* First and Last descriptor */
6101 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006102 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6103 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006104 } else {
6105 /* First but not Last */
6106 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006107 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6108 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006109
6110 /* Continue with other skb fragments */
6111 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006112 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006113 frags = 0;
6114 goto out;
6115 }
6116 }
6117
6118 txq_pcpu->reserved_num -= frags;
6119 txq_pcpu->count += frags;
6120 aggr_txq->count += frags;
6121
6122 /* Enable transmit */
6123 wmb();
6124 mvpp2_aggr_txq_pend_desc_add(port, frags);
6125
6126 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
6127 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
6128
6129 netif_tx_stop_queue(nq);
6130 }
6131out:
6132 if (frags > 0) {
6133 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6134
6135 u64_stats_update_begin(&stats->syncp);
6136 stats->tx_packets++;
6137 stats->tx_bytes += skb->len;
6138 u64_stats_update_end(&stats->syncp);
6139 } else {
6140 dev->stats.tx_dropped++;
6141 dev_kfree_skb_any(skb);
6142 }
6143
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006144 /* Finalize TX processing */
6145 if (txq_pcpu->count >= txq->done_pkts_coal)
6146 mvpp2_txq_done(port, txq, txq_pcpu);
6147
6148 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006149 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
6150 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006151 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6152
6153 mvpp2_timer_set(port_pcpu);
6154 }
6155
Marcin Wojtas3f518502014-07-10 16:52:13 -03006156 return NETDEV_TX_OK;
6157}
6158
6159static inline void mvpp2_cause_error(struct net_device *dev, int cause)
6160{
6161 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
6162 netdev_err(dev, "FCS error\n");
6163 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
6164 netdev_err(dev, "rx fifo overrun error\n");
6165 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
6166 netdev_err(dev, "tx fifo underrun error\n");
6167}
6168
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006169static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006170{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006171 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006172 int rx_done = 0;
6173 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006174 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006175 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006176
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006177 qv = container_of(napi, struct mvpp2_queue_vector, napi);
6178
Marcin Wojtas3f518502014-07-10 16:52:13 -03006179 /* Rx/Tx cause register
6180 *
6181 * Bits 0-15: each bit indicates received packets on the Rx queue
6182 * (bit 0 is for Rx queue 0).
6183 *
6184 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
6185 * (bit 16 is for Tx queue 0).
6186 *
6187 * Each CPU has its own Rx/Tx cause register
6188 */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006189 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
Thomas Petazzonia7868412017-03-07 16:53:13 +01006190 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006191
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006192 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006193 if (cause_misc) {
6194 mvpp2_cause_error(port->dev, cause_misc);
6195
6196 /* Clear the cause register */
6197 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01006198 mvpp2_percpu_write(port->priv, cpu,
6199 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
6200 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006201 }
6202
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006203 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
6204 if (cause_tx) {
6205 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
6206 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
6207 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006208
6209 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006210 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
6211 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006212 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006213 while (cause_rx && budget > 0) {
6214 int count;
6215 struct mvpp2_rx_queue *rxq;
6216
6217 rxq = mvpp2_get_rx_queue(port, cause_rx);
6218 if (!rxq)
6219 break;
6220
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006221 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006222 rx_done += count;
6223 budget -= count;
6224 if (budget > 0) {
6225 /* Clear the bit associated to this Rx queue
6226 * so that next iteration will continue from
6227 * the next Rx queue.
6228 */
6229 cause_rx &= ~(1 << rxq->logic_rxq);
6230 }
6231 }
6232
6233 if (budget > 0) {
6234 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08006235 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006236
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006237 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006238 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006239 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006240 return rx_done;
6241}
6242
6243/* Set hw internals when starting port */
6244static void mvpp2_start_dev(struct mvpp2_port *port)
6245{
Philippe Reynes8e072692016-06-28 00:08:11 +02006246 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006247 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02006248
Stefan Chulski76eb1b12017-08-22 19:08:26 +02006249 if (port->gop_id == 0 &&
6250 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
6251 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
6252 mvpp2_xlg_max_rx_size_set(port);
6253 else
6254 mvpp2_gmac_max_rx_size_set(port);
6255
Marcin Wojtas3f518502014-07-10 16:52:13 -03006256 mvpp2_txp_max_tx_size_set(port);
6257
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006258 for (i = 0; i < port->nqvecs; i++)
6259 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006260
6261 /* Enable interrupts on all CPUs */
6262 mvpp2_interrupts_enable(port);
6263
Antoine Ténartf84bf382017-08-22 19:08:27 +02006264 if (port->priv->hw_version == MVPP22)
6265 mvpp22_gop_init(port);
6266
Antoine Ténart2055d622017-08-22 19:08:23 +02006267 mvpp2_port_mii_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006268 mvpp2_port_enable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02006269 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006270 netif_tx_start_all_queues(port->dev);
6271}
6272
6273/* Set hw internals when stopping port */
6274static void mvpp2_stop_dev(struct mvpp2_port *port)
6275{
Philippe Reynes8e072692016-06-28 00:08:11 +02006276 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006277 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02006278
Marcin Wojtas3f518502014-07-10 16:52:13 -03006279 /* Stop new packets from arriving to RXQs */
6280 mvpp2_ingress_disable(port);
6281
6282 mdelay(10);
6283
6284 /* Disable interrupts on all CPUs */
6285 mvpp2_interrupts_disable(port);
6286
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006287 for (i = 0; i < port->nqvecs; i++)
6288 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006289
6290 netif_carrier_off(port->dev);
6291 netif_tx_stop_all_queues(port->dev);
6292
6293 mvpp2_egress_disable(port);
6294 mvpp2_port_disable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02006295 phy_stop(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006296}
6297
Marcin Wojtas3f518502014-07-10 16:52:13 -03006298static int mvpp2_check_ringparam_valid(struct net_device *dev,
6299 struct ethtool_ringparam *ring)
6300{
6301 u16 new_rx_pending = ring->rx_pending;
6302 u16 new_tx_pending = ring->tx_pending;
6303
6304 if (ring->rx_pending == 0 || ring->tx_pending == 0)
6305 return -EINVAL;
6306
6307 if (ring->rx_pending > MVPP2_MAX_RXD)
6308 new_rx_pending = MVPP2_MAX_RXD;
6309 else if (!IS_ALIGNED(ring->rx_pending, 16))
6310 new_rx_pending = ALIGN(ring->rx_pending, 16);
6311
6312 if (ring->tx_pending > MVPP2_MAX_TXD)
6313 new_tx_pending = MVPP2_MAX_TXD;
6314 else if (!IS_ALIGNED(ring->tx_pending, 32))
6315 new_tx_pending = ALIGN(ring->tx_pending, 32);
6316
6317 if (ring->rx_pending != new_rx_pending) {
6318 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6319 ring->rx_pending, new_rx_pending);
6320 ring->rx_pending = new_rx_pending;
6321 }
6322
6323 if (ring->tx_pending != new_tx_pending) {
6324 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
6325 ring->tx_pending, new_tx_pending);
6326 ring->tx_pending = new_tx_pending;
6327 }
6328
6329 return 0;
6330}
6331
Thomas Petazzoni26975822017-03-07 16:53:14 +01006332static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006333{
6334 u32 mac_addr_l, mac_addr_m, mac_addr_h;
6335
6336 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
6337 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
6338 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
6339 addr[0] = (mac_addr_h >> 24) & 0xFF;
6340 addr[1] = (mac_addr_h >> 16) & 0xFF;
6341 addr[2] = (mac_addr_h >> 8) & 0xFF;
6342 addr[3] = mac_addr_h & 0xFF;
6343 addr[4] = mac_addr_m & 0xFF;
6344 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
6345}
6346
6347static int mvpp2_phy_connect(struct mvpp2_port *port)
6348{
6349 struct phy_device *phy_dev;
6350
6351 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
6352 port->phy_interface);
6353 if (!phy_dev) {
6354 netdev_err(port->dev, "cannot connect to phy\n");
6355 return -ENODEV;
6356 }
6357 phy_dev->supported &= PHY_GBIT_FEATURES;
6358 phy_dev->advertising = phy_dev->supported;
6359
Marcin Wojtas3f518502014-07-10 16:52:13 -03006360 port->link = 0;
6361 port->duplex = 0;
6362 port->speed = 0;
6363
6364 return 0;
6365}
6366
6367static void mvpp2_phy_disconnect(struct mvpp2_port *port)
6368{
Philippe Reynes8e072692016-06-28 00:08:11 +02006369 struct net_device *ndev = port->dev;
6370
6371 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006372}
6373
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006374static int mvpp2_irqs_init(struct mvpp2_port *port)
6375{
6376 int err, i;
6377
6378 for (i = 0; i < port->nqvecs; i++) {
6379 struct mvpp2_queue_vector *qv = port->qvecs + i;
6380
6381 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
6382 if (err)
6383 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006384
6385 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6386 irq_set_affinity_hint(qv->irq,
6387 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006388 }
6389
6390 return 0;
6391err:
6392 for (i = 0; i < port->nqvecs; i++) {
6393 struct mvpp2_queue_vector *qv = port->qvecs + i;
6394
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006395 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006396 free_irq(qv->irq, qv);
6397 }
6398
6399 return err;
6400}
6401
6402static void mvpp2_irqs_deinit(struct mvpp2_port *port)
6403{
6404 int i;
6405
6406 for (i = 0; i < port->nqvecs; i++) {
6407 struct mvpp2_queue_vector *qv = port->qvecs + i;
6408
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006409 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006410 free_irq(qv->irq, qv);
6411 }
6412}
6413
Marcin Wojtas3f518502014-07-10 16:52:13 -03006414static int mvpp2_open(struct net_device *dev)
6415{
6416 struct mvpp2_port *port = netdev_priv(dev);
6417 unsigned char mac_bcast[ETH_ALEN] = {
6418 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6419 int err;
6420
6421 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
6422 if (err) {
6423 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
6424 return err;
6425 }
6426 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
6427 dev->dev_addr, true);
6428 if (err) {
6429 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
6430 return err;
6431 }
6432 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
6433 if (err) {
6434 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
6435 return err;
6436 }
6437 err = mvpp2_prs_def_flow(port);
6438 if (err) {
6439 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
6440 return err;
6441 }
6442
6443 /* Allocate the Rx/Tx queues */
6444 err = mvpp2_setup_rxqs(port);
6445 if (err) {
6446 netdev_err(port->dev, "cannot allocate Rx queues\n");
6447 return err;
6448 }
6449
6450 err = mvpp2_setup_txqs(port);
6451 if (err) {
6452 netdev_err(port->dev, "cannot allocate Tx queues\n");
6453 goto err_cleanup_rxqs;
6454 }
6455
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006456 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006457 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006458 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006459 goto err_cleanup_txqs;
6460 }
6461
6462 /* In default link is down */
6463 netif_carrier_off(port->dev);
6464
6465 err = mvpp2_phy_connect(port);
6466 if (err < 0)
6467 goto err_free_irq;
6468
6469 /* Unmask interrupts on all CPUs */
6470 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006471 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006472
6473 mvpp2_start_dev(port);
6474
6475 return 0;
6476
6477err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006478 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006479err_cleanup_txqs:
6480 mvpp2_cleanup_txqs(port);
6481err_cleanup_rxqs:
6482 mvpp2_cleanup_rxqs(port);
6483 return err;
6484}
6485
6486static int mvpp2_stop(struct net_device *dev)
6487{
6488 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006489 struct mvpp2_port_pcpu *port_pcpu;
6490 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006491
6492 mvpp2_stop_dev(port);
6493 mvpp2_phy_disconnect(port);
6494
6495 /* Mask interrupts on all CPUs */
6496 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006497 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006498
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006499 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006500 if (!port->has_tx_irqs) {
6501 for_each_present_cpu(cpu) {
6502 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006503
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006504 hrtimer_cancel(&port_pcpu->tx_done_timer);
6505 port_pcpu->timer_scheduled = false;
6506 tasklet_kill(&port_pcpu->tx_done_tasklet);
6507 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006508 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006509 mvpp2_cleanup_rxqs(port);
6510 mvpp2_cleanup_txqs(port);
6511
6512 return 0;
6513}
6514
6515static void mvpp2_set_rx_mode(struct net_device *dev)
6516{
6517 struct mvpp2_port *port = netdev_priv(dev);
6518 struct mvpp2 *priv = port->priv;
6519 struct netdev_hw_addr *ha;
6520 int id = port->id;
6521 bool allmulti = dev->flags & IFF_ALLMULTI;
6522
6523 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6524 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6525 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6526
6527 /* Remove all port->id's mcast enries */
6528 mvpp2_prs_mcast_del_all(priv, id);
6529
6530 if (allmulti && !netdev_mc_empty(dev)) {
6531 netdev_for_each_mc_addr(ha, dev)
6532 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6533 }
6534}
6535
6536static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6537{
6538 struct mvpp2_port *port = netdev_priv(dev);
6539 const struct sockaddr *addr = p;
6540 int err;
6541
6542 if (!is_valid_ether_addr(addr->sa_data)) {
6543 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02006544 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006545 }
6546
6547 if (!netif_running(dev)) {
6548 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6549 if (!err)
6550 return 0;
6551 /* Reconfigure parser to accept the original MAC address */
6552 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6553 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006554 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006555 }
6556
6557 mvpp2_stop_dev(port);
6558
6559 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6560 if (!err)
6561 goto out_start;
6562
6563 /* Reconfigure parser accept the original MAC address */
6564 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6565 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006566 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006567out_start:
6568 mvpp2_start_dev(port);
6569 mvpp2_egress_enable(port);
6570 mvpp2_ingress_enable(port);
6571 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02006572log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02006573 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006574 return err;
6575}
6576
6577static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6578{
6579 struct mvpp2_port *port = netdev_priv(dev);
6580 int err;
6581
Jarod Wilson57779872016-10-17 15:54:06 -04006582 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6583 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6584 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6585 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006586 }
6587
6588 if (!netif_running(dev)) {
6589 err = mvpp2_bm_update_mtu(dev, mtu);
6590 if (!err) {
6591 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6592 return 0;
6593 }
6594
6595 /* Reconfigure BM to the original MTU */
6596 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6597 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006598 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006599 }
6600
6601 mvpp2_stop_dev(port);
6602
6603 err = mvpp2_bm_update_mtu(dev, mtu);
6604 if (!err) {
6605 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6606 goto out_start;
6607 }
6608
6609 /* Reconfigure BM to the original MTU */
6610 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6611 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006612 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006613
6614out_start:
6615 mvpp2_start_dev(port);
6616 mvpp2_egress_enable(port);
6617 mvpp2_ingress_enable(port);
6618
6619 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02006620log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02006621 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006622 return err;
6623}
6624
stephen hemmingerbc1f4472017-01-06 19:12:52 -08006625static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03006626mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6627{
6628 struct mvpp2_port *port = netdev_priv(dev);
6629 unsigned int start;
6630 int cpu;
6631
6632 for_each_possible_cpu(cpu) {
6633 struct mvpp2_pcpu_stats *cpu_stats;
6634 u64 rx_packets;
6635 u64 rx_bytes;
6636 u64 tx_packets;
6637 u64 tx_bytes;
6638
6639 cpu_stats = per_cpu_ptr(port->stats, cpu);
6640 do {
6641 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6642 rx_packets = cpu_stats->rx_packets;
6643 rx_bytes = cpu_stats->rx_bytes;
6644 tx_packets = cpu_stats->tx_packets;
6645 tx_bytes = cpu_stats->tx_bytes;
6646 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6647
6648 stats->rx_packets += rx_packets;
6649 stats->rx_bytes += rx_bytes;
6650 stats->tx_packets += tx_packets;
6651 stats->tx_bytes += tx_bytes;
6652 }
6653
6654 stats->rx_errors = dev->stats.rx_errors;
6655 stats->rx_dropped = dev->stats.rx_dropped;
6656 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006657}
6658
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006659static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6660{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006661 int ret;
6662
Philippe Reynes8e072692016-06-28 00:08:11 +02006663 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006664 return -ENOTSUPP;
6665
Philippe Reynes8e072692016-06-28 00:08:11 +02006666 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006667 if (!ret)
6668 mvpp2_link_event(dev);
6669
6670 return ret;
6671}
6672
Marcin Wojtas3f518502014-07-10 16:52:13 -03006673/* Ethtool methods */
6674
Marcin Wojtas3f518502014-07-10 16:52:13 -03006675/* Set interrupt coalescing for ethtools */
6676static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6677 struct ethtool_coalesce *c)
6678{
6679 struct mvpp2_port *port = netdev_priv(dev);
6680 int queue;
6681
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006682 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006683 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6684
6685 rxq->time_coal = c->rx_coalesce_usecs;
6686 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01006687 mvpp2_rx_pkts_coal_set(port, rxq);
6688 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006689 }
6690
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006691 if (port->has_tx_irqs) {
6692 port->tx_time_coal = c->tx_coalesce_usecs;
6693 mvpp2_tx_time_coal_set(port);
6694 }
6695
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006696 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006697 struct mvpp2_tx_queue *txq = port->txqs[queue];
6698
6699 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006700
6701 if (port->has_tx_irqs)
6702 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006703 }
6704
Marcin Wojtas3f518502014-07-10 16:52:13 -03006705 return 0;
6706}
6707
6708/* get coalescing for ethtools */
6709static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6710 struct ethtool_coalesce *c)
6711{
6712 struct mvpp2_port *port = netdev_priv(dev);
6713
6714 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6715 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6716 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6717 return 0;
6718}
6719
6720static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6721 struct ethtool_drvinfo *drvinfo)
6722{
6723 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6724 sizeof(drvinfo->driver));
6725 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6726 sizeof(drvinfo->version));
6727 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6728 sizeof(drvinfo->bus_info));
6729}
6730
6731static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6732 struct ethtool_ringparam *ring)
6733{
6734 struct mvpp2_port *port = netdev_priv(dev);
6735
6736 ring->rx_max_pending = MVPP2_MAX_RXD;
6737 ring->tx_max_pending = MVPP2_MAX_TXD;
6738 ring->rx_pending = port->rx_ring_size;
6739 ring->tx_pending = port->tx_ring_size;
6740}
6741
6742static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6743 struct ethtool_ringparam *ring)
6744{
6745 struct mvpp2_port *port = netdev_priv(dev);
6746 u16 prev_rx_ring_size = port->rx_ring_size;
6747 u16 prev_tx_ring_size = port->tx_ring_size;
6748 int err;
6749
6750 err = mvpp2_check_ringparam_valid(dev, ring);
6751 if (err)
6752 return err;
6753
6754 if (!netif_running(dev)) {
6755 port->rx_ring_size = ring->rx_pending;
6756 port->tx_ring_size = ring->tx_pending;
6757 return 0;
6758 }
6759
6760 /* The interface is running, so we have to force a
6761 * reallocation of the queues
6762 */
6763 mvpp2_stop_dev(port);
6764 mvpp2_cleanup_rxqs(port);
6765 mvpp2_cleanup_txqs(port);
6766
6767 port->rx_ring_size = ring->rx_pending;
6768 port->tx_ring_size = ring->tx_pending;
6769
6770 err = mvpp2_setup_rxqs(port);
6771 if (err) {
6772 /* Reallocate Rx queues with the original ring size */
6773 port->rx_ring_size = prev_rx_ring_size;
6774 ring->rx_pending = prev_rx_ring_size;
6775 err = mvpp2_setup_rxqs(port);
6776 if (err)
6777 goto err_out;
6778 }
6779 err = mvpp2_setup_txqs(port);
6780 if (err) {
6781 /* Reallocate Tx queues with the original ring size */
6782 port->tx_ring_size = prev_tx_ring_size;
6783 ring->tx_pending = prev_tx_ring_size;
6784 err = mvpp2_setup_txqs(port);
6785 if (err)
6786 goto err_clean_rxqs;
6787 }
6788
6789 mvpp2_start_dev(port);
6790 mvpp2_egress_enable(port);
6791 mvpp2_ingress_enable(port);
6792
6793 return 0;
6794
6795err_clean_rxqs:
6796 mvpp2_cleanup_rxqs(port);
6797err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02006798 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006799 return err;
6800}
6801
6802/* Device ops */
6803
6804static const struct net_device_ops mvpp2_netdev_ops = {
6805 .ndo_open = mvpp2_open,
6806 .ndo_stop = mvpp2_stop,
6807 .ndo_start_xmit = mvpp2_tx,
6808 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6809 .ndo_set_mac_address = mvpp2_set_mac_address,
6810 .ndo_change_mtu = mvpp2_change_mtu,
6811 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006812 .ndo_do_ioctl = mvpp2_ioctl,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006813};
6814
6815static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08006816 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006817 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006818 .set_coalesce = mvpp2_ethtool_set_coalesce,
6819 .get_coalesce = mvpp2_ethtool_get_coalesce,
6820 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6821 .get_ringparam = mvpp2_ethtool_get_ringparam,
6822 .set_ringparam = mvpp2_ethtool_set_ringparam,
Philippe Reynesfb773e92016-06-28 00:08:12 +02006823 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6824 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006825};
6826
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006827/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
6828 * had a single IRQ defined per-port.
6829 */
6830static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
6831 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006832{
6833 struct mvpp2_queue_vector *v = &port->qvecs[0];
6834
6835 v->first_rxq = 0;
6836 v->nrxqs = port->nrxqs;
6837 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6838 v->sw_thread_id = 0;
6839 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
6840 v->port = port;
6841 v->irq = irq_of_parse_and_map(port_node, 0);
6842 if (v->irq <= 0)
6843 return -EINVAL;
6844 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6845 NAPI_POLL_WEIGHT);
6846
6847 port->nqvecs = 1;
6848
6849 return 0;
6850}
6851
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006852static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
6853 struct device_node *port_node)
6854{
6855 struct mvpp2_queue_vector *v;
6856 int i, ret;
6857
6858 port->nqvecs = num_possible_cpus();
6859 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
6860 port->nqvecs += 1;
6861
6862 for (i = 0; i < port->nqvecs; i++) {
6863 char irqname[16];
6864
6865 v = port->qvecs + i;
6866
6867 v->port = port;
6868 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
6869 v->sw_thread_id = i;
6870 v->sw_thread_mask = BIT(i);
6871
6872 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
6873
6874 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
6875 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
6876 v->nrxqs = MVPP2_DEFAULT_RXQ;
6877 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
6878 i == (port->nqvecs - 1)) {
6879 v->first_rxq = 0;
6880 v->nrxqs = port->nrxqs;
6881 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6882 strncpy(irqname, "rx-shared", sizeof(irqname));
6883 }
6884
6885 v->irq = of_irq_get_byname(port_node, irqname);
6886 if (v->irq <= 0) {
6887 ret = -EINVAL;
6888 goto err;
6889 }
6890
6891 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6892 NAPI_POLL_WEIGHT);
6893 }
6894
6895 return 0;
6896
6897err:
6898 for (i = 0; i < port->nqvecs; i++)
6899 irq_dispose_mapping(port->qvecs[i].irq);
6900 return ret;
6901}
6902
6903static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
6904 struct device_node *port_node)
6905{
6906 if (port->has_tx_irqs)
6907 return mvpp2_multi_queue_vectors_init(port, port_node);
6908 else
6909 return mvpp2_simple_queue_vectors_init(port, port_node);
6910}
6911
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006912static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
6913{
6914 int i;
6915
6916 for (i = 0; i < port->nqvecs; i++)
6917 irq_dispose_mapping(port->qvecs[i].irq);
6918}
6919
6920/* Configure Rx queue group interrupt for this port */
6921static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
6922{
6923 struct mvpp2 *priv = port->priv;
6924 u32 val;
6925 int i;
6926
6927 if (priv->hw_version == MVPP21) {
6928 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6929 port->nrxqs);
6930 return;
6931 }
6932
6933 /* Handle the more complicated PPv2.2 case */
6934 for (i = 0; i < port->nqvecs; i++) {
6935 struct mvpp2_queue_vector *qv = port->qvecs + i;
6936
6937 if (!qv->nrxqs)
6938 continue;
6939
6940 val = qv->sw_thread_id;
6941 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
6942 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6943
6944 val = qv->first_rxq;
6945 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
6946 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6947 }
6948}
6949
Marcin Wojtas3f518502014-07-10 16:52:13 -03006950/* Initialize port HW */
6951static int mvpp2_port_init(struct mvpp2_port *port)
6952{
6953 struct device *dev = port->dev->dev.parent;
6954 struct mvpp2 *priv = port->priv;
6955 struct mvpp2_txq_pcpu *txq_pcpu;
6956 int queue, cpu, err;
6957
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006958 /* Checks for hardware constraints */
6959 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006960 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006961 return -EINVAL;
6962
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006963 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
6964 (port->ntxqs > MVPP2_MAX_TXQ))
6965 return -EINVAL;
6966
Marcin Wojtas3f518502014-07-10 16:52:13 -03006967 /* Disable port */
6968 mvpp2_egress_disable(port);
6969 mvpp2_port_disable(port);
6970
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006971 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
6972
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006973 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03006974 GFP_KERNEL);
6975 if (!port->txqs)
6976 return -ENOMEM;
6977
6978 /* Associate physical Tx queues to this port and initialize.
6979 * The mapping is predefined.
6980 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006981 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006982 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6983 struct mvpp2_tx_queue *txq;
6984
6985 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01006986 if (!txq) {
6987 err = -ENOMEM;
6988 goto err_free_percpu;
6989 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006990
6991 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6992 if (!txq->pcpu) {
6993 err = -ENOMEM;
6994 goto err_free_percpu;
6995 }
6996
6997 txq->id = queue_phy_id;
6998 txq->log_id = queue;
6999 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
7000 for_each_present_cpu(cpu) {
7001 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
7002 txq_pcpu->cpu = cpu;
7003 }
7004
7005 port->txqs[queue] = txq;
7006 }
7007
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007008 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007009 GFP_KERNEL);
7010 if (!port->rxqs) {
7011 err = -ENOMEM;
7012 goto err_free_percpu;
7013 }
7014
7015 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007016 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007017 struct mvpp2_rx_queue *rxq;
7018
7019 /* Map physical Rx queue to port's logical Rx queue */
7020 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08007021 if (!rxq) {
7022 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007023 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08007024 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007025 /* Map this Rx queue to a physical queue */
7026 rxq->id = port->first_rxq + queue;
7027 rxq->port = port->id;
7028 rxq->logic_rxq = queue;
7029
7030 port->rxqs[queue] = rxq;
7031 }
7032
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007033 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007034
7035 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007036 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007037 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7038
7039 rxq->size = port->rx_ring_size;
7040 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
7041 rxq->time_coal = MVPP2_RX_COAL_USEC;
7042 }
7043
7044 mvpp2_ingress_disable(port);
7045
7046 /* Port default configuration */
7047 mvpp2_defaults_set(port);
7048
7049 /* Port's classifier configuration */
7050 mvpp2_cls_oversize_rxq_set(port);
7051 mvpp2_cls_port_config(port);
7052
7053 /* Provide an initial Rx packet size */
7054 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
7055
7056 /* Initialize pools for swf */
7057 err = mvpp2_swf_bm_pool_init(port);
7058 if (err)
7059 goto err_free_percpu;
7060
7061 return 0;
7062
7063err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007064 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007065 if (!port->txqs[queue])
7066 continue;
7067 free_percpu(port->txqs[queue]->pcpu);
7068 }
7069 return err;
7070}
7071
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007072/* Checks if the port DT description has the TX interrupts
7073 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
7074 * there are available, but we need to keep support for old DTs.
7075 */
7076static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
7077 struct device_node *port_node)
7078{
7079 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
7080 "tx-cpu2", "tx-cpu3" };
7081 int ret, i;
7082
7083 if (priv->hw_version == MVPP21)
7084 return false;
7085
7086 for (i = 0; i < 5; i++) {
7087 ret = of_property_match_string(port_node, "interrupt-names",
7088 irqs[i]);
7089 if (ret < 0)
7090 return false;
7091 }
7092
7093 return true;
7094}
7095
Marcin Wojtas3f518502014-07-10 16:52:13 -03007096/* Ports initialization */
7097static int mvpp2_port_probe(struct platform_device *pdev,
7098 struct device_node *port_node,
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007099 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007100{
7101 struct device_node *phy_node;
7102 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007103 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007104 struct net_device *dev;
7105 struct resource *res;
7106 const char *dt_mac_addr;
7107 const char *mac_from;
7108 char hw_mac_addr[ETH_ALEN];
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007109 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007110 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007111 u32 id;
7112 int features;
7113 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007114 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007115
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007116 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
7117
7118 if (!has_tx_irqs)
7119 queue_mode = MVPP2_QDIST_SINGLE_MODE;
7120
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007121 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007122 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
7123 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
7124 else
7125 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007126
7127 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007128 if (!dev)
7129 return -ENOMEM;
7130
7131 phy_node = of_parse_phandle(port_node, "phy", 0);
7132 if (!phy_node) {
7133 dev_err(&pdev->dev, "missing phy\n");
7134 err = -ENODEV;
7135 goto err_free_netdev;
7136 }
7137
7138 phy_mode = of_get_phy_mode(port_node);
7139 if (phy_mode < 0) {
7140 dev_err(&pdev->dev, "incorrect phy mode\n");
7141 err = phy_mode;
7142 goto err_free_netdev;
7143 }
7144
7145 if (of_property_read_u32(port_node, "port-id", &id)) {
7146 err = -EINVAL;
7147 dev_err(&pdev->dev, "missing port-id value\n");
7148 goto err_free_netdev;
7149 }
7150
7151 dev->tx_queue_len = MVPP2_MAX_TXD;
7152 dev->watchdog_timeo = 5 * HZ;
7153 dev->netdev_ops = &mvpp2_netdev_ops;
7154 dev->ethtool_ops = &mvpp2_eth_tool_ops;
7155
7156 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007157 port->dev = dev;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007158 port->ntxqs = ntxqs;
7159 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007160 port->priv = priv;
7161 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007162
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007163 err = mvpp2_queue_vectors_init(port, port_node);
7164 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007165 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007166
7167 if (of_property_read_bool(port_node, "marvell,loopback"))
7168 port->flags |= MVPP2_F_LOOPBACK;
7169
Marcin Wojtas3f518502014-07-10 16:52:13 -03007170 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007171 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007172 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007173 else
7174 port->first_rxq = port->id * priv->max_port_rxqs;
7175
Marcin Wojtas3f518502014-07-10 16:52:13 -03007176 port->phy_node = phy_node;
7177 port->phy_interface = phy_mode;
7178
Thomas Petazzonia7868412017-03-07 16:53:13 +01007179 if (priv->hw_version == MVPP21) {
7180 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
7181 port->base = devm_ioremap_resource(&pdev->dev, res);
7182 if (IS_ERR(port->base)) {
7183 err = PTR_ERR(port->base);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007184 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007185 }
7186 } else {
7187 if (of_property_read_u32(port_node, "gop-port-id",
7188 &port->gop_id)) {
7189 err = -EINVAL;
7190 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007191 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007192 }
7193
7194 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007195 }
7196
7197 /* Alloc per-cpu stats */
7198 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
7199 if (!port->stats) {
7200 err = -ENOMEM;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007201 goto err_deinit_qvecs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007202 }
7203
7204 dt_mac_addr = of_get_mac_address(port_node);
7205 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
7206 mac_from = "device tree";
7207 ether_addr_copy(dev->dev_addr, dt_mac_addr);
7208 } else {
Thomas Petazzoni26975822017-03-07 16:53:14 +01007209 if (priv->hw_version == MVPP21)
7210 mvpp21_get_mac_address(port, hw_mac_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007211 if (is_valid_ether_addr(hw_mac_addr)) {
7212 mac_from = "hardware";
7213 ether_addr_copy(dev->dev_addr, hw_mac_addr);
7214 } else {
7215 mac_from = "random";
7216 eth_hw_addr_random(dev);
7217 }
7218 }
7219
7220 port->tx_ring_size = MVPP2_MAX_TXD;
7221 port->rx_ring_size = MVPP2_MAX_RXD;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007222 SET_NETDEV_DEV(dev, &pdev->dev);
7223
7224 err = mvpp2_port_init(port);
7225 if (err < 0) {
7226 dev_err(&pdev->dev, "failed to init port %d\n", id);
7227 goto err_free_stats;
7228 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01007229
Thomas Petazzoni26975822017-03-07 16:53:14 +01007230 mvpp2_port_periodic_xon_disable(port);
7231
7232 if (priv->hw_version == MVPP21)
7233 mvpp2_port_fc_adv_enable(port);
7234
7235 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007236
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007237 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
7238 if (!port->pcpu) {
7239 err = -ENOMEM;
7240 goto err_free_txq_pcpu;
7241 }
7242
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007243 if (!port->has_tx_irqs) {
7244 for_each_present_cpu(cpu) {
7245 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007246
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007247 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
7248 HRTIMER_MODE_REL_PINNED);
7249 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
7250 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007251
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007252 tasklet_init(&port_pcpu->tx_done_tasklet,
7253 mvpp2_tx_proc_cb,
7254 (unsigned long)dev);
7255 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007256 }
7257
Marcin Wojtas3f518502014-07-10 16:52:13 -03007258 features = NETIF_F_SG | NETIF_F_IP_CSUM;
7259 dev->features = features | NETIF_F_RXCSUM;
7260 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
7261 dev->vlan_features |= features;
7262
Jarod Wilson57779872016-10-17 15:54:06 -04007263 /* MTU range: 68 - 9676 */
7264 dev->min_mtu = ETH_MIN_MTU;
7265 /* 9676 == 9700 - 20 and rounding to 8 */
7266 dev->max_mtu = 9676;
7267
Marcin Wojtas3f518502014-07-10 16:52:13 -03007268 err = register_netdev(dev);
7269 if (err < 0) {
7270 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007271 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007272 }
7273 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7274
Marcin Wojtas3f518502014-07-10 16:52:13 -03007275 priv->port_list[id] = port;
7276 return 0;
7277
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007278err_free_port_pcpu:
7279 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007280err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007281 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007282 free_percpu(port->txqs[i]->pcpu);
7283err_free_stats:
7284 free_percpu(port->stats);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007285err_deinit_qvecs:
7286 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007287err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08007288 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007289 free_netdev(dev);
7290 return err;
7291}
7292
7293/* Ports removal routine */
7294static void mvpp2_port_remove(struct mvpp2_port *port)
7295{
7296 int i;
7297
7298 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08007299 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007300 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007301 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007302 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007303 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007304 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007305 free_netdev(port->dev);
7306}
7307
7308/* Initialize decoding windows */
7309static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7310 struct mvpp2 *priv)
7311{
7312 u32 win_enable;
7313 int i;
7314
7315 for (i = 0; i < 6; i++) {
7316 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7317 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7318
7319 if (i < 4)
7320 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7321 }
7322
7323 win_enable = 0;
7324
7325 for (i = 0; i < dram->num_cs; i++) {
7326 const struct mbus_dram_window *cs = dram->cs + i;
7327
7328 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7329 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7330 dram->mbus_dram_target_id);
7331
7332 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7333 (cs->size - 1) & 0xffff0000);
7334
7335 win_enable |= (1 << i);
7336 }
7337
7338 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7339}
7340
7341/* Initialize Rx FIFO's */
7342static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7343{
7344 int port;
7345
7346 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7347 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7348 MVPP2_RX_FIFO_PORT_DATA_SIZE);
7349 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7350 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
7351 }
7352
7353 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7354 MVPP2_RX_FIFO_PORT_MIN_PKT);
7355 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7356}
7357
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01007358static void mvpp2_axi_init(struct mvpp2 *priv)
7359{
7360 u32 val, rdval, wrval;
7361
7362 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7363
7364 /* AXI Bridge Configuration */
7365
7366 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7367 << MVPP22_AXI_ATTR_CACHE_OFFS;
7368 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7369 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7370
7371 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7372 << MVPP22_AXI_ATTR_CACHE_OFFS;
7373 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7374 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7375
7376 /* BM */
7377 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7378 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7379
7380 /* Descriptors */
7381 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7382 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7383 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7384 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7385
7386 /* Buffer Data */
7387 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7388 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7389
7390 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7391 << MVPP22_AXI_CODE_CACHE_OFFS;
7392 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7393 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7394 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7395 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7396
7397 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7398 << MVPP22_AXI_CODE_CACHE_OFFS;
7399 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7400 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7401
7402 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7403
7404 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7405 << MVPP22_AXI_CODE_CACHE_OFFS;
7406 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7407 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7408
7409 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7410}
7411
Marcin Wojtas3f518502014-07-10 16:52:13 -03007412/* Initialize network controller common part HW */
7413static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7414{
7415 const struct mbus_dram_target_info *dram_target_info;
7416 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03007417 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007418
Marcin Wojtas3f518502014-07-10 16:52:13 -03007419 /* MBUS windows configuration */
7420 dram_target_info = mv_mbus_dram_info();
7421 if (dram_target_info)
7422 mvpp2_conf_mbus_windows(dram_target_info, priv);
7423
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01007424 if (priv->hw_version == MVPP22)
7425 mvpp2_axi_init(priv);
7426
Marcin Wojtas08a23752014-07-21 13:48:12 -03007427 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01007428 if (priv->hw_version == MVPP21) {
7429 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7430 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7431 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7432 } else {
7433 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7434 val &= ~MVPP22_SMI_POLLING_EN;
7435 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7436 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03007437
Marcin Wojtas3f518502014-07-10 16:52:13 -03007438 /* Allocate and initialize aggregated TXQs */
7439 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02007440 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007441 GFP_KERNEL);
7442 if (!priv->aggr_txqs)
7443 return -ENOMEM;
7444
7445 for_each_present_cpu(i) {
7446 priv->aggr_txqs[i].id = i;
7447 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
Antoine Ténart85affd72017-08-23 09:46:55 +02007448 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007449 if (err < 0)
7450 return err;
7451 }
7452
7453 /* Rx Fifo Init */
7454 mvpp2_rx_fifo_init(priv);
7455
Thomas Petazzoni26975822017-03-07 16:53:14 +01007456 if (priv->hw_version == MVPP21)
7457 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7458 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007459
7460 /* Allow cache snoop when transmiting packets */
7461 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7462
7463 /* Buffer Manager initialization */
7464 err = mvpp2_bm_init(pdev, priv);
7465 if (err < 0)
7466 return err;
7467
7468 /* Parser default initialization */
7469 err = mvpp2_prs_default_init(pdev, priv);
7470 if (err < 0)
7471 return err;
7472
7473 /* Classifier default initialization */
7474 mvpp2_cls_init(priv);
7475
7476 return 0;
7477}
7478
7479static int mvpp2_probe(struct platform_device *pdev)
7480{
7481 struct device_node *dn = pdev->dev.of_node;
7482 struct device_node *port_node;
7483 struct mvpp2 *priv;
7484 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007485 void __iomem *base;
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007486 int port_count, i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007487 int err;
7488
Markus Elfring0b92e592017-04-17 08:38:32 +02007489 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007490 if (!priv)
7491 return -ENOMEM;
7492
Thomas Petazzonifaca9242017-03-07 16:53:06 +01007493 priv->hw_version =
7494 (unsigned long)of_device_get_match_data(&pdev->dev);
7495
Marcin Wojtas3f518502014-07-10 16:52:13 -03007496 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007497 base = devm_ioremap_resource(&pdev->dev, res);
7498 if (IS_ERR(base))
7499 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007500
Thomas Petazzonia7868412017-03-07 16:53:13 +01007501 if (priv->hw_version == MVPP21) {
7502 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7503 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
7504 if (IS_ERR(priv->lms_base))
7505 return PTR_ERR(priv->lms_base);
7506 } else {
7507 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7508 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7509 if (IS_ERR(priv->iface_base))
7510 return PTR_ERR(priv->iface_base);
Antoine Ténartf84bf382017-08-22 19:08:27 +02007511
7512 priv->sysctrl_base =
7513 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7514 "marvell,system-controller");
7515 if (IS_ERR(priv->sysctrl_base))
7516 /* The system controller regmap is optional for dt
7517 * compatibility reasons. When not provided, the
7518 * configuration of the GoP relies on the
7519 * firmware/bootloader.
7520 */
7521 priv->sysctrl_base = NULL;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007522 }
7523
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007524 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01007525 u32 addr_space_sz;
7526
7527 addr_space_sz = (priv->hw_version == MVPP21 ?
7528 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007529 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007530 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007531
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007532 if (priv->hw_version == MVPP21)
7533 priv->max_port_rxqs = 8;
7534 else
7535 priv->max_port_rxqs = 32;
7536
Marcin Wojtas3f518502014-07-10 16:52:13 -03007537 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7538 if (IS_ERR(priv->pp_clk))
7539 return PTR_ERR(priv->pp_clk);
7540 err = clk_prepare_enable(priv->pp_clk);
7541 if (err < 0)
7542 return err;
7543
7544 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7545 if (IS_ERR(priv->gop_clk)) {
7546 err = PTR_ERR(priv->gop_clk);
7547 goto err_pp_clk;
7548 }
7549 err = clk_prepare_enable(priv->gop_clk);
7550 if (err < 0)
7551 goto err_pp_clk;
7552
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007553 if (priv->hw_version == MVPP22) {
7554 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7555 if (IS_ERR(priv->mg_clk)) {
7556 err = PTR_ERR(priv->mg_clk);
7557 goto err_gop_clk;
7558 }
7559
7560 err = clk_prepare_enable(priv->mg_clk);
7561 if (err < 0)
7562 goto err_gop_clk;
7563 }
7564
Marcin Wojtas3f518502014-07-10 16:52:13 -03007565 /* Get system's tclk rate */
7566 priv->tclk = clk_get_rate(priv->pp_clk);
7567
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01007568 if (priv->hw_version == MVPP22) {
7569 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
7570 if (err)
7571 goto err_mg_clk;
7572 /* Sadly, the BM pools all share the same register to
7573 * store the high 32 bits of their address. So they
7574 * must all have the same high 32 bits, which forces
7575 * us to restrict coherent memory to DMA_BIT_MASK(32).
7576 */
7577 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7578 if (err)
7579 goto err_mg_clk;
7580 }
7581
Marcin Wojtas3f518502014-07-10 16:52:13 -03007582 /* Initialize network controller */
7583 err = mvpp2_init(pdev, priv);
7584 if (err < 0) {
7585 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007586 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007587 }
7588
7589 port_count = of_get_available_child_count(dn);
7590 if (port_count == 0) {
7591 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08007592 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007593 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007594 }
7595
7596 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
Markus Elfring0b92e592017-04-17 08:38:32 +02007597 sizeof(*priv->port_list),
7598 GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007599 if (!priv->port_list) {
7600 err = -ENOMEM;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007601 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007602 }
7603
7604 /* Initialize ports */
Marcin Wojtas3f518502014-07-10 16:52:13 -03007605 for_each_available_child_of_node(dn, port_node) {
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007606 err = mvpp2_port_probe(pdev, port_node, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007607 if (err < 0)
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007608 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007609 }
7610
7611 platform_set_drvdata(pdev, priv);
7612 return 0;
7613
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007614err_mg_clk:
7615 if (priv->hw_version == MVPP22)
7616 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007617err_gop_clk:
7618 clk_disable_unprepare(priv->gop_clk);
7619err_pp_clk:
7620 clk_disable_unprepare(priv->pp_clk);
7621 return err;
7622}
7623
7624static int mvpp2_remove(struct platform_device *pdev)
7625{
7626 struct mvpp2 *priv = platform_get_drvdata(pdev);
7627 struct device_node *dn = pdev->dev.of_node;
7628 struct device_node *port_node;
7629 int i = 0;
7630
7631 for_each_available_child_of_node(dn, port_node) {
7632 if (priv->port_list[i])
7633 mvpp2_port_remove(priv->port_list[i]);
7634 i++;
7635 }
7636
7637 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
7638 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7639
7640 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7641 }
7642
7643 for_each_present_cpu(i) {
7644 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7645
7646 dma_free_coherent(&pdev->dev,
7647 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7648 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01007649 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007650 }
7651
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007652 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007653 clk_disable_unprepare(priv->pp_clk);
7654 clk_disable_unprepare(priv->gop_clk);
7655
7656 return 0;
7657}
7658
7659static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01007660 {
7661 .compatible = "marvell,armada-375-pp2",
7662 .data = (void *)MVPP21,
7663 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01007664 {
7665 .compatible = "marvell,armada-7k-pp22",
7666 .data = (void *)MVPP22,
7667 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03007668 { }
7669};
7670MODULE_DEVICE_TABLE(of, mvpp2_match);
7671
7672static struct platform_driver mvpp2_driver = {
7673 .probe = mvpp2_probe,
7674 .remove = mvpp2_remove,
7675 .driver = {
7676 .name = MVPP2_DRIVER_NAME,
7677 .of_match_table = mvpp2_match,
7678 },
7679};
7680
7681module_platform_driver(mvpp2_driver);
7682
7683MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7684MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03007685MODULE_LICENSE("GPL v2");