blob: 4202f9b5b966345f1aae651ff20377b9b2977b1a [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
Marcin Wojtasa75edc72018-01-18 13:31:44 +010013#include <linux/acpi.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030014#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020022#include <linux/mfd/syscon.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030023#include <linux/interrupt.h>
24#include <linux/cpumask.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010030#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030031#include <linux/phy.h>
Antoine Tenart542897d2017-08-30 10:29:15 +020032#include <linux/phy/phy.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020034#include <linux/hrtimer.h>
35#include <linux/ktime.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020036#include <linux/regmap.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030037#include <uapi/linux/ppp_defs.h>
38#include <net/ip.h>
39#include <net/ipv6.h>
Antoine Ténart186cd4d2017-08-23 09:46:56 +020040#include <net/tso.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030041
Antoine Tenart7c10f972017-10-30 11:23:29 +010042/* Fifo Registers */
Marcin Wojtas3f518502014-07-10 16:52:13 -030043#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46#define MVPP2_RX_FIFO_INIT_REG 0x64
Yan Markman93ff1302018-03-05 15:16:52 +010047#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
Antoine Tenart7c10f972017-10-30 11:23:29 +010048#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
Marcin Wojtas3f518502014-07-10 16:52:13 -030049
50/* RX DMA Top Registers */
51#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
52#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
53#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
54#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
55#define MVPP2_POOL_BUF_SIZE_OFFSET 5
56#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
57#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
58#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
59#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010060#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
61#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030062#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010063#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
64#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030065#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
66#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
67#define MVPP2_RXQ_DISABLE_MASK BIT(31)
68
Maxime Chevallier56beda32018-02-28 10:14:13 +010069/* Top Registers */
70#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
71#define MVPP2_DSA_EXTENDED BIT(5)
72
Marcin Wojtas3f518502014-07-10 16:52:13 -030073/* Parser Registers */
74#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
75#define MVPP2_PRS_PORT_LU_MAX 0xf
76#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
77#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
78#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
79#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
80#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
81#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
82#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
83#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
84#define MVPP2_PRS_TCAM_IDX_REG 0x1100
85#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
86#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
87#define MVPP2_PRS_SRAM_IDX_REG 0x1200
88#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
89#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
90#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
91
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010092/* RSS Registers */
93#define MVPP22_RSS_INDEX 0x1500
Antoine Tenart8a7b7412017-12-08 10:24:20 +010094#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010095#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
96#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
97#define MVPP22_RSS_TABLE_ENTRY 0x1508
98#define MVPP22_RSS_TABLE 0x1510
99#define MVPP22_RSS_TABLE_POINTER(p) (p)
100#define MVPP22_RSS_WIDTH 0x150c
101
Marcin Wojtas3f518502014-07-10 16:52:13 -0300102/* Classifier Registers */
103#define MVPP2_CLS_MODE_REG 0x1800
104#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
105#define MVPP2_CLS_PORT_WAY_REG 0x1810
106#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
107#define MVPP2_CLS_LKP_INDEX_REG 0x1814
108#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
109#define MVPP2_CLS_LKP_TBL_REG 0x1818
110#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
111#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
112#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
113#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
114#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
115#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
116#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
117#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
118#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
119#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
120#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
121#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
122
123/* Descriptor Manager Top Registers */
124#define MVPP2_RXQ_NUM_REG 0x2040
125#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100126#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300127#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
128#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
129#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
130#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
131#define MVPP2_RXQ_NUM_NEW_OFFSET 16
132#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
133#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
134#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
135#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
136#define MVPP2_RXQ_THRESH_REG 0x204c
137#define MVPP2_OCCUPIED_THRESH_OFFSET 0
138#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
139#define MVPP2_RXQ_INDEX_REG 0x2050
140#define MVPP2_TXQ_NUM_REG 0x2080
141#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
142#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
143#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200144#define MVPP2_TXQ_THRESH_REG 0x2094
145#define MVPP2_TXQ_THRESH_OFFSET 16
146#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300147#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300148#define MVPP2_TXQ_INDEX_REG 0x2098
149#define MVPP2_TXQ_PREF_BUF_REG 0x209c
150#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
151#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
152#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
153#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
154#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
155#define MVPP2_TXQ_PENDING_REG 0x20a0
156#define MVPP2_TXQ_PENDING_MASK 0x3fff
157#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
158#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
159#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
160#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
161#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
162#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
163#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
164#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
165#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
166#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
167#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100168#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300169#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
170#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
171#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
172#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
173#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
174
175/* MBUS bridge registers */
176#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
177#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
178#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
179#define MVPP2_BASE_ADDR_ENABLE 0x4060
180
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100181/* AXI Bridge Registers */
182#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
183#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
184#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
185#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
186#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
187#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
188#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
189#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
190#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
191#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
192#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
193#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
194
195/* Values for AXI Bridge registers */
196#define MVPP22_AXI_ATTR_CACHE_OFFS 0
197#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
198
199#define MVPP22_AXI_CODE_CACHE_OFFS 0
200#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
201
202#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
203#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
204#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
205
206#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
207#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
208
Marcin Wojtas3f518502014-07-10 16:52:13 -0300209/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200210#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
211#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
212
Marcin Wojtas3f518502014-07-10 16:52:13 -0300213#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100214#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200215#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100216
Antoine Ténart81b66302017-08-22 19:08:21 +0200217#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100218#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200219#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
220#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100221
222#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200223#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100224
Antoine Ténart81b66302017-08-22 19:08:21 +0200225#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
226#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
227#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
228#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100229
Marcin Wojtas3f518502014-07-10 16:52:13 -0300230#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
231#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
232#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
233#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
234#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
235#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200236#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300237#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
238#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
239#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
240#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
241#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
242#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
243#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
244#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
245#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
246#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
247#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
248#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
249
250/* Buffer Manager registers */
251#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
252#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
253#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
254#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
255#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
256#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
257#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
258#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
259#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
260#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
261#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
Stefan Chulskieffbf5f2018-03-05 15:16:51 +0100262#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300263#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
264#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
265#define MVPP2_BM_START_MASK BIT(0)
266#define MVPP2_BM_STOP_MASK BIT(1)
267#define MVPP2_BM_STATE_MASK BIT(4)
268#define MVPP2_BM_LOW_THRESH_OFFS 8
269#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
270#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
271 MVPP2_BM_LOW_THRESH_OFFS)
272#define MVPP2_BM_HIGH_THRESH_OFFS 16
273#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
274#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
275 MVPP2_BM_HIGH_THRESH_OFFS)
276#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
277#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
278#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
279#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
280#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
281#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
282#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
283#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
284#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
285#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100286#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
287#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
288#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
289#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300290#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
291#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
292#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
293#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
294#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100295#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
296#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
Antoine Ténart81b66302017-08-22 19:08:21 +0200297#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100298#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300299
300/* TX Scheduler registers */
301#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
302#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
303#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
304#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
305#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
306#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
307#define MVPP2_TXP_SCHED_MTU_REG 0x801c
308#define MVPP2_TXP_MTU_MAX 0x7FFFF
309#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
310#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
311#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
312#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
313#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
314#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
315#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
316#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
317#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
318#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
319#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
320#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
321#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
322#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
323
324/* TX general registers */
325#define MVPP2_TX_SNOOP_REG 0x8800
326#define MVPP2_TX_PORT_FLUSH_REG 0x8810
327#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
328
329/* LMS registers */
330#define MVPP2_SRC_ADDR_MIDDLE 0x24
331#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300332#define MVPP2_PHY_AN_CFG0_REG 0x34
333#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300334#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100335#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300336
337/* Per-port registers */
338#define MVPP2_GMAC_CTRL_0_REG 0x0
Antoine Ténart81b66302017-08-22 19:08:21 +0200339#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200340#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200341#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
342#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
343#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300344#define MVPP2_GMAC_CTRL_1_REG 0x4
Antoine Ténart81b66302017-08-22 19:08:21 +0200345#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
346#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
347#define MVPP2_GMAC_PCS_LB_EN_BIT 6
348#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
349#define MVPP2_GMAC_SA_LOW_OFFS 7
Marcin Wojtas3f518502014-07-10 16:52:13 -0300350#define MVPP2_GMAC_CTRL_2_REG 0x8
Antoine Ténart81b66302017-08-22 19:08:21 +0200351#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200352#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200353#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
Antoine Tenartc7dfc8c2017-09-25 14:59:48 +0200354#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
Antoine Ténart39193572017-08-22 19:08:24 +0200355#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
Antoine Ténart81b66302017-08-22 19:08:21 +0200356#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300357#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
Antoine Ténart81b66302017-08-22 19:08:21 +0200358#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
359#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Antoine Ténart39193572017-08-22 19:08:24 +0200360#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
361#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
Antoine Ténart81b66302017-08-22 19:08:21 +0200362#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
363#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
364#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
365#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Antoine Ténart39193572017-08-22 19:08:24 +0200366#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
Antoine Ténart81b66302017-08-22 19:08:21 +0200367#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
368#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200369#define MVPP2_GMAC_STATUS0 0x10
370#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300371#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
Antoine Ténart81b66302017-08-22 19:08:21 +0200372#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
373#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
374#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
Marcin Wojtas3f518502014-07-10 16:52:13 -0300375 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200376#define MVPP22_GMAC_INT_STAT 0x20
377#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
378#define MVPP22_GMAC_INT_MASK 0x24
379#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100380#define MVPP22_GMAC_CTRL_4_REG 0x90
Antoine Ténart81b66302017-08-22 19:08:21 +0200381#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
382#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
Antoine Ténart1068ec72017-08-22 19:08:22 +0200383#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
Antoine Ténart81b66302017-08-22 19:08:21 +0200384#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200385#define MVPP22_GMAC_INT_SUM_MASK 0xa4
386#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100387
388/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
389 * relative to port->base.
390 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200391#define MVPP22_XLG_CTRL0_REG 0x100
Antoine Ténart81b66302017-08-22 19:08:21 +0200392#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
393#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
Antoine Ténart77321952017-08-22 19:08:25 +0200394#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
Antoine Ténart81b66302017-08-22 19:08:21 +0200395#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200396#define MVPP22_XLG_CTRL1_REG 0x104
Antoine Ténartec15ecd2017-08-25 15:24:46 +0200397#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200398#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200399#define MVPP22_XLG_STATUS 0x10c
400#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
401#define MVPP22_XLG_INT_STAT 0x114
402#define MVPP22_XLG_INT_STAT_LINK BIT(1)
403#define MVPP22_XLG_INT_MASK 0x118
404#define MVPP22_XLG_INT_MASK_LINK BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100405#define MVPP22_XLG_CTRL3_REG 0x11c
Antoine Ténart81b66302017-08-22 19:08:21 +0200406#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
407#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
408#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200409#define MVPP22_XLG_EXT_INT_MASK 0x15c
410#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
411#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
Antoine Ténart77321952017-08-22 19:08:25 +0200412#define MVPP22_XLG_CTRL4_REG 0x184
413#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
414#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
415#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
416
Thomas Petazzoni26975822017-03-07 16:53:14 +0100417/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
418#define MVPP22_SMI_MISC_CFG_REG 0x1204
Antoine Ténart81b66302017-08-22 19:08:21 +0200419#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300420
Thomas Petazzonia7868412017-03-07 16:53:13 +0100421#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
422
Marcin Wojtas3f518502014-07-10 16:52:13 -0300423#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
424
425/* Descriptor ring Macros */
426#define MVPP2_QUEUE_NEXT_DESC(q, index) \
427 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
428
Antoine Ténartf84bf382017-08-22 19:08:27 +0200429/* XPCS registers. PPv2.2 only */
430#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
431#define MVPP22_MPCS_CTRL 0x14
432#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
433#define MVPP22_MPCS_CLK_RESET 0x14c
434#define MAC_CLK_RESET_SD_TX BIT(0)
435#define MAC_CLK_RESET_SD_RX BIT(1)
436#define MAC_CLK_RESET_MAC BIT(2)
437#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
438#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
439
440/* XPCS registers. PPv2.2 only */
441#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
442#define MVPP22_XPCS_CFG0 0x0
443#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
444#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
445
446/* System controller registers. Accessed through a regmap. */
447#define GENCONF_SOFT_RESET1 0x1108
448#define GENCONF_SOFT_RESET1_GOP BIT(6)
449#define GENCONF_PORT_CTRL0 0x1110
450#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
451#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
452#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
453#define GENCONF_PORT_CTRL1 0x1114
454#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
455#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
456#define GENCONF_CTRL0 0x1120
457#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
458#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
459#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
460
Marcin Wojtas3f518502014-07-10 16:52:13 -0300461/* Various constants */
462
463/* Coalescing */
Antoine Tenart86162282017-12-11 09:13:29 +0100464#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200465#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200466#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300467#define MVPP2_RX_COAL_PKTS 32
Antoine Tenart86162282017-12-11 09:13:29 +0100468#define MVPP2_RX_COAL_USEC 64
Marcin Wojtas3f518502014-07-10 16:52:13 -0300469
470/* The two bytes Marvell header. Either contains a special value used
471 * by Marvell switches when a specific hardware mode is enabled (not
472 * supported by this driver) or is filled automatically by zeroes on
473 * the RX side. Those two bytes being at the front of the Ethernet
474 * header, they allow to have the IP header aligned on a 4 bytes
475 * boundary automatically: the hardware skips those two bytes on its
476 * own.
477 */
478#define MVPP2_MH_SIZE 2
479#define MVPP2_ETH_TYPE_LEN 2
480#define MVPP2_PPPOE_HDR_SIZE 8
481#define MVPP2_VLAN_TAG_LEN 4
Maxime Chevallier56beda32018-02-28 10:14:13 +0100482#define MVPP2_VLAN_TAG_EDSA_LEN 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300483
484/* Lbtd 802.3 type */
485#define MVPP2_IP_LBDT_TYPE 0xfffa
486
Marcin Wojtas3f518502014-07-10 16:52:13 -0300487#define MVPP2_TX_CSUM_MAX_SIZE 9800
488
489/* Timeout constants */
490#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
491#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
492
493#define MVPP2_TX_MTU_MAX 0x7ffff
494
495/* Maximum number of T-CONTs of PON port */
496#define MVPP2_MAX_TCONT 16
497
498/* Maximum number of supported ports */
499#define MVPP2_MAX_PORTS 4
500
501/* Maximum number of TXQs used by single port */
502#define MVPP2_MAX_TXQ 8
503
Antoine Tenart1d17db02017-10-30 11:23:31 +0100504/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
505 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
506 * multiply this value by two to count the maximum number of skb descs needed.
507 */
508#define MVPP2_MAX_TSO_SEGS 300
509#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
510
Marcin Wojtas3f518502014-07-10 16:52:13 -0300511/* Dfault number of RXQs in use */
512#define MVPP2_DEFAULT_RXQ 4
513
Marcin Wojtas3f518502014-07-10 16:52:13 -0300514/* Max number of Rx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100515#define MVPP2_MAX_RXD_MAX 1024
516#define MVPP2_MAX_RXD_DFLT 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300517
518/* Max number of Tx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100519#define MVPP2_MAX_TXD_MAX 2048
520#define MVPP2_MAX_TXD_DFLT 1024
Marcin Wojtas3f518502014-07-10 16:52:13 -0300521
522/* Amount of Tx descriptors that can be reserved at once by CPU */
523#define MVPP2_CPU_DESC_CHUNK 64
524
525/* Max number of Tx descriptors in each aggregated queue */
526#define MVPP2_AGGR_TXQ_SIZE 256
527
528/* Descriptor aligned size */
529#define MVPP2_DESC_ALIGNED_SIZE 32
530
531/* Descriptor alignment mask */
532#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
533
534/* RX FIFO constants */
Antoine Tenart2d1d7df2017-10-30 11:23:28 +0100535#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
536#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
537#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
538#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
539#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
540#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
541#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
Marcin Wojtas3f518502014-07-10 16:52:13 -0300542
Antoine Tenart7c10f972017-10-30 11:23:29 +0100543/* TX FIFO constants */
544#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
545#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
Yan Markman93ff1302018-03-05 15:16:52 +0100546#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
547#define MVPP2_TX_FIFO_THRESHOLD_10KB \
548 (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
549#define MVPP2_TX_FIFO_THRESHOLD_3KB \
550 (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
Antoine Tenart7c10f972017-10-30 11:23:29 +0100551
Marcin Wojtas3f518502014-07-10 16:52:13 -0300552/* RX buffer constants */
553#define MVPP2_SKB_SHINFO_SIZE \
554 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
555
556#define MVPP2_RX_PKT_SIZE(mtu) \
557 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800558 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300559
560#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
561#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
562#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
563 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
564
565#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
566
567/* IPv6 max L3 address size */
568#define MVPP2_MAX_L3_ADDR_SIZE 16
569
570/* Port flags */
571#define MVPP2_F_LOOPBACK BIT(0)
572
573/* Marvell tag types */
574enum mvpp2_tag_type {
575 MVPP2_TAG_TYPE_NONE = 0,
576 MVPP2_TAG_TYPE_MH = 1,
577 MVPP2_TAG_TYPE_DSA = 2,
578 MVPP2_TAG_TYPE_EDSA = 3,
579 MVPP2_TAG_TYPE_VLAN = 4,
580 MVPP2_TAG_TYPE_LAST = 5
581};
582
583/* Parser constants */
584#define MVPP2_PRS_TCAM_SRAM_SIZE 256
585#define MVPP2_PRS_TCAM_WORDS 6
586#define MVPP2_PRS_SRAM_WORDS 4
587#define MVPP2_PRS_FLOW_ID_SIZE 64
588#define MVPP2_PRS_FLOW_ID_MASK 0x3f
589#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
590#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
591#define MVPP2_PRS_IPV4_HEAD 0x40
592#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
593#define MVPP2_PRS_IPV4_MC 0xe0
594#define MVPP2_PRS_IPV4_MC_MASK 0xf0
595#define MVPP2_PRS_IPV4_BC_MASK 0xff
596#define MVPP2_PRS_IPV4_IHL 0x5
597#define MVPP2_PRS_IPV4_IHL_MASK 0xf
598#define MVPP2_PRS_IPV6_MC 0xff
599#define MVPP2_PRS_IPV6_MC_MASK 0xff
600#define MVPP2_PRS_IPV6_HOP_MASK 0xff
601#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
602#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
603#define MVPP2_PRS_DBL_VLANS_MAX 100
Maxime Chevallier10fea262018-03-07 15:18:04 +0100604#define MVPP2_PRS_CAST_MASK BIT(0)
605#define MVPP2_PRS_MCAST_VAL BIT(0)
606#define MVPP2_PRS_UCAST_VAL 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300607
608/* Tcam structure:
609 * - lookup ID - 4 bits
610 * - port ID - 1 byte
611 * - additional information - 1 byte
612 * - header data - 8 bytes
613 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
614 */
615#define MVPP2_PRS_AI_BITS 8
616#define MVPP2_PRS_PORT_MASK 0xff
617#define MVPP2_PRS_LU_MASK 0xf
618#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
619 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
620#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
621 (((offs) * 2) - ((offs) % 2) + 2)
622#define MVPP2_PRS_TCAM_AI_BYTE 16
623#define MVPP2_PRS_TCAM_PORT_BYTE 17
624#define MVPP2_PRS_TCAM_LU_BYTE 20
625#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
626#define MVPP2_PRS_TCAM_INV_WORD 5
Maxime Chevallier56beda32018-02-28 10:14:13 +0100627
628#define MVPP2_PRS_VID_TCAM_BYTE 2
629
Maxime Chevallier10fea262018-03-07 15:18:04 +0100630/* TCAM range for unicast and multicast filtering. We have 25 entries per port,
631 * with 4 dedicated to UC filtering and the rest to multicast filtering.
632 * Additionnally we reserve one entry for the broadcast address, and one for
633 * each port's own address.
634 */
635#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
636#define MVPP2_PRS_MAC_RANGE_SIZE 80
637
638/* Number of entries per port dedicated to UC and MC filtering */
639#define MVPP2_PRS_MAC_UC_FILT_MAX 4
640#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
641 MVPP2_PRS_MAC_UC_FILT_MAX)
642
Maxime Chevallier56beda32018-02-28 10:14:13 +0100643/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
644 * 10 VLAN ID filter entries per port
645 * 1 default VLAN filter entry per port
646 * It is assumed that there are 3 ports for filter, not including loopback port
647 */
648#define MVPP2_PRS_VLAN_FILT_MAX 11
649#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
650
651#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
652#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
653
Marcin Wojtas3f518502014-07-10 16:52:13 -0300654/* Tcam entries ID */
655#define MVPP2_PE_DROP_ALL 0
656#define MVPP2_PE_FIRST_FREE_TID 1
Maxime Chevallier56beda32018-02-28 10:14:13 +0100657
Maxime Chevallier10fea262018-03-07 15:18:04 +0100658/* MAC filtering range */
659#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
660#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
661 MVPP2_PRS_MAC_RANGE_SIZE + 1)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100662/* VLAN filtering range */
663#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
664#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
Maxime Chevallier982e0502018-04-16 10:07:23 +0200666#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300667#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
Maxime Chevallier10fea262018-03-07 15:18:04 +0100668#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
669#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
670#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
671#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
672#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
673#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
674#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
675#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
676#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
677#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
678#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
679#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
680#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
681#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
682#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
683#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
684#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
685#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
686#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
687#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
688#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
689/* reserved */
690#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
691#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300692#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
693
Maxime Chevallier56beda32018-02-28 10:14:13 +0100694#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
695 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
696#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
697 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
698/* Index of default vid filter for given port */
699#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
700 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
701
Marcin Wojtas3f518502014-07-10 16:52:13 -0300702/* Sram structure
703 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
704 */
705#define MVPP2_PRS_SRAM_RI_OFFS 0
706#define MVPP2_PRS_SRAM_RI_WORD 0
707#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
708#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
709#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
710#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
711#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
712#define MVPP2_PRS_SRAM_UDF_OFFS 73
713#define MVPP2_PRS_SRAM_UDF_BITS 8
714#define MVPP2_PRS_SRAM_UDF_MASK 0xff
715#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
716#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
717#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
718#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
719#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
720#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
721#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
722#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
723#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
724#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
725#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
726#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
727#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
728#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
729#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
730#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
731#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
732#define MVPP2_PRS_SRAM_AI_OFFS 90
733#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
734#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
735#define MVPP2_PRS_SRAM_AI_MASK 0xff
736#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
737#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
738#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
739#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
740
741/* Sram result info bits assignment */
742#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
743#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100744#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
745#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300746#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
747#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
748#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
749#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
750#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100751#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
752#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300753#define MVPP2_PRS_RI_L2_MCAST BIT(9)
754#define MVPP2_PRS_RI_L2_BCAST BIT(10)
755#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100756#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
757#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300758#define MVPP2_PRS_RI_L3_IP4 BIT(12)
759#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
760#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
761#define MVPP2_PRS_RI_L3_IP6 BIT(14)
762#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
763#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100764#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
765#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300766#define MVPP2_PRS_RI_L3_MCAST BIT(15)
767#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
768#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
Stefan Chulskiaff3da32017-09-25 14:59:46 +0200769#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300770#define MVPP2_PRS_RI_UDF3_MASK 0x300000
771#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
772#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
773#define MVPP2_PRS_RI_L4_TCP BIT(22)
774#define MVPP2_PRS_RI_L4_UDP BIT(23)
775#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
776#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
777#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
778#define MVPP2_PRS_RI_DROP_MASK 0x80000000
779
780/* Sram additional info bits assignment */
781#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
782#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
783#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
784#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
785#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
786#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
787#define MVPP2_PRS_SINGLE_VLAN_AI 0
788#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100789#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300790
791/* DSA/EDSA type */
792#define MVPP2_PRS_TAGGED true
793#define MVPP2_PRS_UNTAGGED false
794#define MVPP2_PRS_EDSA true
795#define MVPP2_PRS_DSA false
796
797/* MAC entries, shadow udf */
798enum mvpp2_prs_udf {
799 MVPP2_PRS_UDF_MAC_DEF,
800 MVPP2_PRS_UDF_MAC_RANGE,
801 MVPP2_PRS_UDF_L2_DEF,
802 MVPP2_PRS_UDF_L2_DEF_COPY,
803 MVPP2_PRS_UDF_L2_USER,
804};
805
806/* Lookup ID */
807enum mvpp2_prs_lookup {
808 MVPP2_PRS_LU_MH,
809 MVPP2_PRS_LU_MAC,
810 MVPP2_PRS_LU_DSA,
811 MVPP2_PRS_LU_VLAN,
Maxime Chevallier56beda32018-02-28 10:14:13 +0100812 MVPP2_PRS_LU_VID,
Marcin Wojtas3f518502014-07-10 16:52:13 -0300813 MVPP2_PRS_LU_L2,
814 MVPP2_PRS_LU_PPPOE,
815 MVPP2_PRS_LU_IP4,
816 MVPP2_PRS_LU_IP6,
817 MVPP2_PRS_LU_FLOWS,
818 MVPP2_PRS_LU_LAST,
819};
820
Maxime Chevallier10fea262018-03-07 15:18:04 +0100821/* L2 cast enum */
822enum mvpp2_prs_l2_cast {
823 MVPP2_PRS_L2_UNI_CAST,
824 MVPP2_PRS_L2_MULTI_CAST,
825};
826
Marcin Wojtas3f518502014-07-10 16:52:13 -0300827/* L3 cast enum */
828enum mvpp2_prs_l3_cast {
829 MVPP2_PRS_L3_UNI_CAST,
830 MVPP2_PRS_L3_MULTI_CAST,
831 MVPP2_PRS_L3_BROAD_CAST
832};
833
834/* Classifier constants */
835#define MVPP2_CLS_FLOWS_TBL_SIZE 512
836#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
837#define MVPP2_CLS_LKP_TBL_SIZE 64
Antoine Tenart1d7d15d2017-10-30 11:23:30 +0100838#define MVPP2_CLS_RX_QUEUES 256
839
840/* RSS constants */
841#define MVPP22_RSS_TABLE_ENTRIES 32
Marcin Wojtas3f518502014-07-10 16:52:13 -0300842
843/* BM constants */
Stefan Chulski576193f2018-03-05 15:16:54 +0100844#define MVPP2_BM_JUMBO_BUF_NUM 512
Marcin Wojtas3f518502014-07-10 16:52:13 -0300845#define MVPP2_BM_LONG_BUF_NUM 1024
846#define MVPP2_BM_SHORT_BUF_NUM 2048
847#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
848#define MVPP2_BM_POOL_PTR_ALIGN 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300849
850/* BM cookie (32 bits) definition */
851#define MVPP2_BM_COOKIE_POOL_OFFS 8
852#define MVPP2_BM_COOKIE_CPU_OFFS 24
853
Stefan Chulski01d04932018-03-05 15:16:50 +0100854#define MVPP2_BM_SHORT_FRAME_SIZE 512
855#define MVPP2_BM_LONG_FRAME_SIZE 2048
Stefan Chulski576193f2018-03-05 15:16:54 +0100856#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
Marcin Wojtas3f518502014-07-10 16:52:13 -0300857/* BM short pool packet size
858 * These value assure that for SWF the total number
859 * of bytes allocated for each buffer will be 512
860 */
Stefan Chulski01d04932018-03-05 15:16:50 +0100861#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
862#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
Stefan Chulski576193f2018-03-05 15:16:54 +0100863#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300864
Thomas Petazzonia7868412017-03-07 16:53:13 +0100865#define MVPP21_ADDR_SPACE_SZ 0
866#define MVPP22_ADDR_SPACE_SZ SZ_64K
867
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200868#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200869#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100870
Stefan Chulski01d04932018-03-05 15:16:50 +0100871enum mvpp2_bm_pool_log_num {
872 MVPP2_BM_SHORT,
873 MVPP2_BM_LONG,
Stefan Chulski576193f2018-03-05 15:16:54 +0100874 MVPP2_BM_JUMBO,
Stefan Chulski01d04932018-03-05 15:16:50 +0100875 MVPP2_BM_POOLS_NUM
Marcin Wojtas3f518502014-07-10 16:52:13 -0300876};
877
Stefan Chulski01d04932018-03-05 15:16:50 +0100878static struct {
879 int pkt_size;
880 int buf_num;
881} mvpp2_pools[MVPP2_BM_POOLS_NUM];
882
Miquel Raynal118d6292017-11-06 22:56:53 +0100883/* GMAC MIB Counters register definitions */
884#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
885#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
886#define MVPP22_MIB_COUNTERS_OFFSET 0x0
887#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
888
889#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
890#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
891#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
892#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
893#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
894#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
895#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
896#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
897#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
898#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
899#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
900#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
901#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
902#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
903#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
904#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
905#define MVPP2_MIB_FC_SENT 0x54
906#define MVPP2_MIB_FC_RCVD 0x58
907#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
908#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
909#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
910#define MVPP2_MIB_OVERSIZE_RCVD 0x68
911#define MVPP2_MIB_JABBER_RCVD 0x6c
912#define MVPP2_MIB_MAC_RCV_ERROR 0x70
913#define MVPP2_MIB_BAD_CRC_EVENT 0x74
914#define MVPP2_MIB_COLLISION 0x78
915#define MVPP2_MIB_LATE_COLLISION 0x7c
916
917#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
918
Maxime Chevallierda42bb22018-04-18 11:14:44 +0200919#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
920
Marcin Wojtas3f518502014-07-10 16:52:13 -0300921/* Definitions */
922
923/* Shared Packet Processor resources */
924struct mvpp2 {
925 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300926 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100927 void __iomem *iface_base;
928
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200929 /* On PPv2.2, each "software thread" can access the base
930 * register through a separate address space, each 64 KB apart
931 * from each other. Typically, such address spaces will be
932 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100933 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200934 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300935
Antoine Ténartf84bf382017-08-22 19:08:27 +0200936 /* On PPv2.2, some port control registers are located into the system
937 * controller space. These registers are accessible through a regmap.
938 */
939 struct regmap *sysctrl_base;
940
Marcin Wojtas3f518502014-07-10 16:52:13 -0300941 /* Common clocks */
942 struct clk *pp_clk;
943 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100944 struct clk *mg_clk;
Gregory CLEMENT4792ea02017-09-29 14:27:39 +0200945 struct clk *axi_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300946
947 /* List of pointers to port structures */
Miquel Raynal118d6292017-11-06 22:56:53 +0100948 int port_count;
Marcin Wojtasbf147152018-01-18 13:31:42 +0100949 struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300950
951 /* Aggregated TXQs */
952 struct mvpp2_tx_queue *aggr_txqs;
953
954 /* BM pools */
955 struct mvpp2_bm_pool *bm_pools;
956
957 /* PRS shadow table */
958 struct mvpp2_prs_shadow *prs_shadow;
959 /* PRS auxiliary table for double vlan entries control */
960 bool *prs_double_vlans;
961
962 /* Tclk value */
963 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100964
965 /* HW version */
966 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100967
968 /* Maximum number of RXQs per port */
969 unsigned int max_port_rxqs;
Miquel Raynal118d6292017-11-06 22:56:53 +0100970
Miquel Raynale5c500e2017-11-08 08:59:40 +0100971 /* Workqueue to gather hardware statistics */
Miquel Raynal118d6292017-11-06 22:56:53 +0100972 char queue_name[30];
973 struct workqueue_struct *stats_queue;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300974};
975
976struct mvpp2_pcpu_stats {
977 struct u64_stats_sync syncp;
978 u64 rx_packets;
979 u64 rx_bytes;
980 u64 tx_packets;
981 u64 tx_bytes;
982};
983
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200984/* Per-CPU port control */
985struct mvpp2_port_pcpu {
986 struct hrtimer tx_done_timer;
987 bool timer_scheduled;
988 /* Tasklet for egress finalization */
989 struct tasklet_struct tx_done_tasklet;
990};
991
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200992struct mvpp2_queue_vector {
993 int irq;
994 struct napi_struct napi;
995 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
996 int sw_thread_id;
997 u16 sw_thread_mask;
998 int first_rxq;
999 int nrxqs;
1000 u32 pending_cause_rx;
1001 struct mvpp2_port *port;
1002};
1003
Marcin Wojtas3f518502014-07-10 16:52:13 -03001004struct mvpp2_port {
1005 u8 id;
1006
Thomas Petazzonia7868412017-03-07 16:53:13 +01001007 /* Index of the port from the "group of ports" complex point
1008 * of view
1009 */
1010 int gop_id;
1011
Antoine Tenartfd3651b2017-09-01 11:04:54 +02001012 int link_irq;
1013
Marcin Wojtas3f518502014-07-10 16:52:13 -03001014 struct mvpp2 *priv;
1015
Marcin Wojtas24812222018-01-18 13:31:43 +01001016 /* Firmware node associated to the port */
1017 struct fwnode_handle *fwnode;
1018
Marcin Wojtas3f518502014-07-10 16:52:13 -03001019 /* Per-port registers' base address */
1020 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +01001021 void __iomem *stats_base;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001022
1023 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02001024 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001025 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02001026 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001027 struct net_device *dev;
1028
1029 int pkt_size;
1030
Marcin Wojtasedc660f2015-08-06 19:00:30 +02001031 /* Per-CPU port control */
1032 struct mvpp2_port_pcpu __percpu *pcpu;
1033
Marcin Wojtas3f518502014-07-10 16:52:13 -03001034 /* Flags */
1035 unsigned long flags;
1036
1037 u16 tx_ring_size;
1038 u16 rx_ring_size;
1039 struct mvpp2_pcpu_stats __percpu *stats;
Miquel Raynal118d6292017-11-06 22:56:53 +01001040 u64 *ethtool_stats;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001041
Miquel Raynale5c500e2017-11-08 08:59:40 +01001042 /* Per-port work and its lock to gather hardware statistics */
1043 struct mutex gather_stats_lock;
1044 struct delayed_work stats_work;
1045
Marcin Wojtas3f518502014-07-10 16:52:13 -03001046 phy_interface_t phy_interface;
1047 struct device_node *phy_node;
Antoine Tenart542897d2017-08-30 10:29:15 +02001048 struct phy *comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001049 unsigned int link;
1050 unsigned int duplex;
1051 unsigned int speed;
1052
1053 struct mvpp2_bm_pool *pool_long;
1054 struct mvpp2_bm_pool *pool_short;
1055
1056 /* Index of first port's physical RXQ */
1057 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02001058
1059 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1060 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001061 bool has_tx_irqs;
1062
1063 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001064};
1065
1066/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1067 * layout of the transmit and reception DMA descriptors, and their
1068 * layout is therefore defined by the hardware design
1069 */
1070
1071#define MVPP2_TXD_L3_OFF_SHIFT 0
1072#define MVPP2_TXD_IP_HLEN_SHIFT 8
1073#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1074#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1075#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1076#define MVPP2_TXD_PADDING_DISABLE BIT(23)
1077#define MVPP2_TXD_L4_UDP BIT(24)
1078#define MVPP2_TXD_L3_IP6 BIT(26)
1079#define MVPP2_TXD_L_DESC BIT(28)
1080#define MVPP2_TXD_F_DESC BIT(29)
1081
1082#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1083#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1084#define MVPP2_RXD_ERR_CRC 0x0
1085#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1086#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1087#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1088#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1089#define MVPP2_RXD_HWF_SYNC BIT(21)
1090#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1091#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1092#define MVPP2_RXD_L4_TCP BIT(25)
1093#define MVPP2_RXD_L4_UDP BIT(26)
1094#define MVPP2_RXD_L3_IP4 BIT(28)
1095#define MVPP2_RXD_L3_IP6 BIT(30)
1096#define MVPP2_RXD_BUF_HDR BIT(31)
1097
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001098/* HW TX descriptor for PPv2.1 */
1099struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001100 u32 command; /* Options used by HW for packet transmitting.*/
1101 u8 packet_offset; /* the offset from the buffer beginning */
1102 u8 phys_txq; /* destination queue ID */
1103 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001104 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001105 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1106 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1107 u32 reserved2; /* reserved (for future use) */
1108};
1109
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001110/* HW RX descriptor for PPv2.1 */
1111struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001112 u32 status; /* info about received packet */
1113 u16 reserved1; /* parser_info (for future use, PnC) */
1114 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001115 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001116 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1117 u16 reserved2; /* gem_port_id (for future use, PON) */
1118 u16 reserved3; /* csum_l4 (for future use, PnC) */
1119 u8 reserved4; /* bm_qset (for future use, BM) */
1120 u8 reserved5;
1121 u16 reserved6; /* classify_info (for future use, PnC) */
1122 u32 reserved7; /* flow_id (for future use, PnC) */
1123 u32 reserved8;
1124};
1125
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001126/* HW TX descriptor for PPv2.2 */
1127struct mvpp22_tx_desc {
1128 u32 command;
1129 u8 packet_offset;
1130 u8 phys_txq;
1131 u16 data_size;
1132 u64 reserved1;
1133 u64 buf_dma_addr_ptp;
1134 u64 buf_cookie_misc;
1135};
1136
1137/* HW RX descriptor for PPv2.2 */
1138struct mvpp22_rx_desc {
1139 u32 status;
1140 u16 reserved1;
1141 u16 data_size;
1142 u32 reserved2;
1143 u32 reserved3;
1144 u64 buf_dma_addr_key_hash;
1145 u64 buf_cookie_misc;
1146};
1147
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001148/* Opaque type used by the driver to manipulate the HW TX and RX
1149 * descriptors
1150 */
1151struct mvpp2_tx_desc {
1152 union {
1153 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001154 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001155 };
1156};
1157
1158struct mvpp2_rx_desc {
1159 union {
1160 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001161 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001162 };
1163};
1164
Thomas Petazzoni83544912016-12-21 11:28:49 +01001165struct mvpp2_txq_pcpu_buf {
1166 /* Transmitted SKB */
1167 struct sk_buff *skb;
1168
1169 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001170 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +01001171
1172 /* Size transmitted */
1173 size_t size;
1174};
1175
Marcin Wojtas3f518502014-07-10 16:52:13 -03001176/* Per-CPU Tx queue control */
1177struct mvpp2_txq_pcpu {
1178 int cpu;
1179
1180 /* Number of Tx DMA descriptors in the descriptor ring */
1181 int size;
1182
1183 /* Number of currently used Tx DMA descriptor in the
1184 * descriptor ring
1185 */
1186 int count;
1187
Antoine Tenart1d17db02017-10-30 11:23:31 +01001188 int wake_threshold;
1189 int stop_threshold;
1190
Marcin Wojtas3f518502014-07-10 16:52:13 -03001191 /* Number of Tx DMA descriptors reserved for each CPU */
1192 int reserved_num;
1193
Thomas Petazzoni83544912016-12-21 11:28:49 +01001194 /* Infos about transmitted buffers */
1195 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001196
Marcin Wojtas3f518502014-07-10 16:52:13 -03001197 /* Index of last TX DMA descriptor that was inserted */
1198 int txq_put_index;
1199
1200 /* Index of the TX DMA descriptor to be cleaned up */
1201 int txq_get_index;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02001202
1203 /* DMA buffer for TSO headers */
1204 char *tso_headers;
1205 dma_addr_t tso_headers_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001206};
1207
1208struct mvpp2_tx_queue {
1209 /* Physical number of this Tx queue */
1210 u8 id;
1211
1212 /* Logical number of this Tx queue */
1213 u8 log_id;
1214
1215 /* Number of Tx DMA descriptors in the descriptor ring */
1216 int size;
1217
1218 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1219 int count;
1220
1221 /* Per-CPU control of physical Tx queues */
1222 struct mvpp2_txq_pcpu __percpu *pcpu;
1223
Marcin Wojtas3f518502014-07-10 16:52:13 -03001224 u32 done_pkts_coal;
1225
1226 /* Virtual address of thex Tx DMA descriptors array */
1227 struct mvpp2_tx_desc *descs;
1228
1229 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001230 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001231
1232 /* Index of the last Tx DMA descriptor */
1233 int last_desc;
1234
1235 /* Index of the next Tx DMA descriptor to process */
1236 int next_desc_to_proc;
1237};
1238
1239struct mvpp2_rx_queue {
1240 /* RX queue number, in the range 0-31 for physical RXQs */
1241 u8 id;
1242
1243 /* Num of rx descriptors in the rx descriptor ring */
1244 int size;
1245
1246 u32 pkts_coal;
1247 u32 time_coal;
1248
1249 /* Virtual address of the RX DMA descriptors array */
1250 struct mvpp2_rx_desc *descs;
1251
1252 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001253 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001254
1255 /* Index of the last RX DMA descriptor */
1256 int last_desc;
1257
1258 /* Index of the next RX DMA descriptor to process */
1259 int next_desc_to_proc;
1260
1261 /* ID of port to which physical RXQ is mapped */
1262 int port;
1263
1264 /* Port's logic RXQ number to which physical RXQ is mapped */
1265 int logic_rxq;
1266};
1267
1268union mvpp2_prs_tcam_entry {
1269 u32 word[MVPP2_PRS_TCAM_WORDS];
1270 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1271};
1272
1273union mvpp2_prs_sram_entry {
1274 u32 word[MVPP2_PRS_SRAM_WORDS];
1275 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1276};
1277
1278struct mvpp2_prs_entry {
1279 u32 index;
1280 union mvpp2_prs_tcam_entry tcam;
1281 union mvpp2_prs_sram_entry sram;
1282};
1283
1284struct mvpp2_prs_shadow {
1285 bool valid;
1286 bool finish;
1287
1288 /* Lookup ID */
1289 int lu;
1290
1291 /* User defined offset */
1292 int udf;
1293
1294 /* Result info */
1295 u32 ri;
1296 u32 ri_mask;
1297};
1298
1299struct mvpp2_cls_flow_entry {
1300 u32 index;
1301 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1302};
1303
1304struct mvpp2_cls_lookup_entry {
1305 u32 lkpid;
1306 u32 way;
1307 u32 data;
1308};
1309
1310struct mvpp2_bm_pool {
1311 /* Pool number in the range 0-7 */
1312 int id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001313
1314 /* Buffer Pointers Pool External (BPPE) size */
1315 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001316 /* BPPE size in bytes */
1317 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001318 /* Number of buffers for this pool */
1319 int buf_num;
1320 /* Pool buffer size */
1321 int buf_size;
1322 /* Packet size */
1323 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001324 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001325
1326 /* BPPE virtual base address */
1327 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001328 /* BPPE DMA base address */
1329 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001330
1331 /* Ports using BM pool */
1332 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001333};
1334
Antoine Tenart20920262017-10-23 15:24:30 +02001335#define IS_TSO_HEADER(txq_pcpu, addr) \
1336 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1337 (addr) < (txq_pcpu)->tso_headers_dma + \
1338 (txq_pcpu)->size * TSO_HEADER_SIZE)
1339
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001340/* Queue modes */
1341#define MVPP2_QDIST_SINGLE_MODE 0
1342#define MVPP2_QDIST_MULTI_MODE 1
1343
1344static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1345
1346module_param(queue_mode, int, 0444);
1347MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1348
Marcin Wojtas3f518502014-07-10 16:52:13 -03001349#define MVPP2_DRIVER_NAME "mvpp2"
1350#define MVPP2_DRIVER_VERSION "1.0"
1351
1352/* Utility/helper methods */
1353
1354static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1355{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001356 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001357}
1358
1359static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1360{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001361 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001362}
1363
Yan Markmancdcfeb02018-03-27 16:49:05 +02001364static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
1365{
1366 return readl_relaxed(priv->swth_base[0] + offset);
1367}
Thomas Petazzonia7868412017-03-07 16:53:13 +01001368/* These accessors should be used to access:
1369 *
1370 * - per-CPU registers, where each CPU has its own copy of the
1371 * register.
1372 *
1373 * MVPP2_BM_VIRT_ALLOC_REG
1374 * MVPP2_BM_ADDR_HIGH_ALLOC
1375 * MVPP22_BM_ADDR_HIGH_RLS_REG
1376 * MVPP2_BM_VIRT_RLS_REG
1377 * MVPP2_ISR_RX_TX_CAUSE_REG
1378 * MVPP2_ISR_RX_TX_MASK_REG
1379 * MVPP2_TXQ_NUM_REG
1380 * MVPP2_AGGR_TXQ_UPDATE_REG
1381 * MVPP2_TXQ_RSVD_REQ_REG
1382 * MVPP2_TXQ_RSVD_RSLT_REG
1383 * MVPP2_TXQ_SENT_REG
1384 * MVPP2_RXQ_NUM_REG
1385 *
1386 * - global registers that must be accessed through a specific CPU
1387 * window, because they are related to an access to a per-CPU
1388 * register
1389 *
1390 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1391 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1392 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1393 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1394 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1395 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1396 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1397 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1398 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1399 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1400 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1401 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1402 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1403 */
1404static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1405 u32 offset, u32 data)
1406{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001407 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001408}
1409
1410static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1411 u32 offset)
1412{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001413 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001414}
1415
Yan Markmancdcfeb02018-03-27 16:49:05 +02001416static void mvpp2_percpu_write_relaxed(struct mvpp2 *priv, int cpu,
1417 u32 offset, u32 data)
1418{
1419 writel_relaxed(data, priv->swth_base[cpu] + offset);
1420}
1421
1422static u32 mvpp2_percpu_read_relaxed(struct mvpp2 *priv, int cpu,
1423 u32 offset)
1424{
1425 return readl_relaxed(priv->swth_base[cpu] + offset);
1426}
1427
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001428static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1429 struct mvpp2_tx_desc *tx_desc)
1430{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001431 if (port->priv->hw_version == MVPP21)
1432 return tx_desc->pp21.buf_dma_addr;
1433 else
Maxime Chevallierda42bb22018-04-18 11:14:44 +02001434 return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001435}
1436
1437static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1438 struct mvpp2_tx_desc *tx_desc,
1439 dma_addr_t dma_addr)
1440{
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001441 dma_addr_t addr, offset;
1442
1443 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1444 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1445
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001446 if (port->priv->hw_version == MVPP21) {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001447 tx_desc->pp21.buf_dma_addr = addr;
1448 tx_desc->pp21.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001449 } else {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001450 u64 val = (u64)addr;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001451
Maxime Chevallierda42bb22018-04-18 11:14:44 +02001452 tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001453 tx_desc->pp22.buf_dma_addr_ptp |= val;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001454 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001455 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001456}
1457
1458static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1459 struct mvpp2_tx_desc *tx_desc)
1460{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001461 if (port->priv->hw_version == MVPP21)
1462 return tx_desc->pp21.data_size;
1463 else
1464 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001465}
1466
1467static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1468 struct mvpp2_tx_desc *tx_desc,
1469 size_t size)
1470{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001471 if (port->priv->hw_version == MVPP21)
1472 tx_desc->pp21.data_size = size;
1473 else
1474 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001475}
1476
1477static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1478 struct mvpp2_tx_desc *tx_desc,
1479 unsigned int txq)
1480{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001481 if (port->priv->hw_version == MVPP21)
1482 tx_desc->pp21.phys_txq = txq;
1483 else
1484 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001485}
1486
1487static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1488 struct mvpp2_tx_desc *tx_desc,
1489 unsigned int command)
1490{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001491 if (port->priv->hw_version == MVPP21)
1492 tx_desc->pp21.command = command;
1493 else
1494 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001495}
1496
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001497static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1498 struct mvpp2_tx_desc *tx_desc)
1499{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001500 if (port->priv->hw_version == MVPP21)
1501 return tx_desc->pp21.packet_offset;
1502 else
1503 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001504}
1505
1506static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1507 struct mvpp2_rx_desc *rx_desc)
1508{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001509 if (port->priv->hw_version == MVPP21)
1510 return rx_desc->pp21.buf_dma_addr;
1511 else
Maxime Chevallierda42bb22018-04-18 11:14:44 +02001512 return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001513}
1514
1515static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1516 struct mvpp2_rx_desc *rx_desc)
1517{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001518 if (port->priv->hw_version == MVPP21)
1519 return rx_desc->pp21.buf_cookie;
1520 else
Maxime Chevallierda42bb22018-04-18 11:14:44 +02001521 return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001522}
1523
1524static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1525 struct mvpp2_rx_desc *rx_desc)
1526{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001527 if (port->priv->hw_version == MVPP21)
1528 return rx_desc->pp21.data_size;
1529 else
1530 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001531}
1532
1533static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1534 struct mvpp2_rx_desc *rx_desc)
1535{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001536 if (port->priv->hw_version == MVPP21)
1537 return rx_desc->pp21.status;
1538 else
1539 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001540}
1541
Marcin Wojtas3f518502014-07-10 16:52:13 -03001542static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1543{
1544 txq_pcpu->txq_get_index++;
1545 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1546 txq_pcpu->txq_get_index = 0;
1547}
1548
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001549static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1550 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001551 struct sk_buff *skb,
1552 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001553{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001554 struct mvpp2_txq_pcpu_buf *tx_buf =
1555 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1556 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001557 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1558 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1559 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001560 txq_pcpu->txq_put_index++;
1561 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1562 txq_pcpu->txq_put_index = 0;
1563}
1564
1565/* Get number of physical egress port */
1566static inline int mvpp2_egress_port(struct mvpp2_port *port)
1567{
1568 return MVPP2_MAX_TCONT + port->id;
1569}
1570
1571/* Get number of physical TXQ */
1572static inline int mvpp2_txq_phys(int port, int txq)
1573{
1574 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1575}
1576
1577/* Parser configuration routines */
1578
1579/* Update parser tcam and sram hw entries */
1580static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1581{
1582 int i;
1583
1584 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1585 return -EINVAL;
1586
1587 /* Clear entry invalidation bit */
1588 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1589
1590 /* Write tcam index - indirect access */
1591 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1592 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1593 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1594
1595 /* Write sram index - indirect access */
1596 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1597 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1598 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1599
1600 return 0;
1601}
1602
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001603/* Initialize tcam entry from hw */
1604static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
1605 struct mvpp2_prs_entry *pe, int tid)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001606{
1607 int i;
1608
Maxime Chevallier3d92f0b2018-04-05 11:55:48 +02001609 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001610 return -EINVAL;
1611
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001612 memset(pe, 0, sizeof(*pe));
1613 pe->index = tid;
1614
Marcin Wojtas3f518502014-07-10 16:52:13 -03001615 /* Write tcam index - indirect access */
1616 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1617
1618 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1619 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1620 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1621 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1622
1623 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1624 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1625
1626 /* Write sram index - indirect access */
1627 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1628 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1629 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1630
1631 return 0;
1632}
1633
1634/* Invalidate tcam hw entry */
1635static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1636{
1637 /* Write index - indirect access */
1638 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1639 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1640 MVPP2_PRS_TCAM_INV_MASK);
1641}
1642
1643/* Enable shadow table entry and set its lookup ID */
1644static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1645{
1646 priv->prs_shadow[index].valid = true;
1647 priv->prs_shadow[index].lu = lu;
1648}
1649
1650/* Update ri fields in shadow table entry */
1651static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1652 unsigned int ri, unsigned int ri_mask)
1653{
1654 priv->prs_shadow[index].ri_mask = ri_mask;
1655 priv->prs_shadow[index].ri = ri;
1656}
1657
1658/* Update lookup field in tcam sw entry */
1659static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1660{
1661 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1662
1663 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1664 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1665}
1666
1667/* Update mask for single port in tcam sw entry */
1668static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1669 unsigned int port, bool add)
1670{
1671 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1672
1673 if (add)
1674 pe->tcam.byte[enable_off] &= ~(1 << port);
1675 else
1676 pe->tcam.byte[enable_off] |= 1 << port;
1677}
1678
1679/* Update port map in tcam sw entry */
1680static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1681 unsigned int ports)
1682{
1683 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1684 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1685
1686 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1687 pe->tcam.byte[enable_off] &= ~port_mask;
1688 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1689}
1690
1691/* Obtain port map from tcam sw entry */
1692static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1693{
1694 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1695
1696 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1697}
1698
1699/* Set byte of data and its enable bits in tcam sw entry */
1700static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1701 unsigned int offs, unsigned char byte,
1702 unsigned char enable)
1703{
1704 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1705 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1706}
1707
1708/* Get byte of data and its enable bits from tcam sw entry */
1709static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1710 unsigned int offs, unsigned char *byte,
1711 unsigned char *enable)
1712{
1713 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1714 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1715}
1716
1717/* Compare tcam data bytes with a pattern */
1718static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1719 u16 data)
1720{
1721 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1722 u16 tcam_data;
1723
Antoine Tenartef4816f2017-10-24 11:41:26 +02001724 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
Marcin Wojtas3f518502014-07-10 16:52:13 -03001725 if (tcam_data != data)
1726 return false;
1727 return true;
1728}
1729
1730/* Update ai bits in tcam sw entry */
1731static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1732 unsigned int bits, unsigned int enable)
1733{
1734 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1735
1736 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1737
1738 if (!(enable & BIT(i)))
1739 continue;
1740
1741 if (bits & BIT(i))
1742 pe->tcam.byte[ai_idx] |= 1 << i;
1743 else
1744 pe->tcam.byte[ai_idx] &= ~(1 << i);
1745 }
1746
1747 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1748}
1749
1750/* Get ai bits from tcam sw entry */
1751static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1752{
1753 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1754}
1755
1756/* Set ethertype in tcam sw entry */
1757static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1758 unsigned short ethertype)
1759{
1760 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1761 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1762}
1763
Maxime Chevallier56beda32018-02-28 10:14:13 +01001764/* Set vid in tcam sw entry */
1765static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1766 unsigned short vid)
1767{
1768 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1769 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1770}
1771
Marcin Wojtas3f518502014-07-10 16:52:13 -03001772/* Set bits in sram sw entry */
1773static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1774 int val)
1775{
1776 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1777}
1778
1779/* Clear bits in sram sw entry */
1780static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1781 int val)
1782{
1783 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1784}
1785
1786/* Update ri bits in sram sw entry */
1787static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1788 unsigned int bits, unsigned int mask)
1789{
1790 unsigned int i;
1791
1792 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1793 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1794
1795 if (!(mask & BIT(i)))
1796 continue;
1797
1798 if (bits & BIT(i))
1799 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1800 else
1801 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1802
1803 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1804 }
1805}
1806
1807/* Obtain ri bits from sram sw entry */
1808static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1809{
1810 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1811}
1812
1813/* Update ai bits in sram sw entry */
1814static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1815 unsigned int bits, unsigned int mask)
1816{
1817 unsigned int i;
1818 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1819
1820 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1821
1822 if (!(mask & BIT(i)))
1823 continue;
1824
1825 if (bits & BIT(i))
1826 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1827 else
1828 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1829
1830 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1831 }
1832}
1833
1834/* Read ai bits from sram sw entry */
1835static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1836{
1837 u8 bits;
1838 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1839 int ai_en_off = ai_off + 1;
1840 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1841
1842 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1843 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1844
1845 return bits;
1846}
1847
1848/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1849 * lookup interation
1850 */
1851static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1852 unsigned int lu)
1853{
1854 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1855
1856 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1857 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1858 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1859}
1860
1861/* In the sram sw entry set sign and value of the next lookup offset
1862 * and the offset value generated to the classifier
1863 */
1864static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1865 unsigned int op)
1866{
1867 /* Set sign */
1868 if (shift < 0) {
1869 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1870 shift = 0 - shift;
1871 } else {
1872 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1873 }
1874
1875 /* Set value */
1876 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1877 (unsigned char)shift;
1878
1879 /* Reset and set operation */
1880 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1882 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1883
1884 /* Set base offset as current */
1885 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1886}
1887
1888/* In the sram sw entry set sign and value of the user defined offset
1889 * generated to the classifier
1890 */
1891static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1892 unsigned int type, int offset,
1893 unsigned int op)
1894{
1895 /* Set sign */
1896 if (offset < 0) {
1897 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1898 offset = 0 - offset;
1899 } else {
1900 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1901 }
1902
1903 /* Set value */
1904 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1905 MVPP2_PRS_SRAM_UDF_MASK);
1906 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1907 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1908 MVPP2_PRS_SRAM_UDF_BITS)] &=
1909 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1910 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1911 MVPP2_PRS_SRAM_UDF_BITS)] |=
1912 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1913
1914 /* Set offset type */
1915 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1916 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1917 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1918
1919 /* Set offset operation */
1920 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1921 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1922 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1923
1924 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1925 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1926 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1927 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1928
1929 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1930 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1931 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1932
1933 /* Set base offset as current */
1934 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1935}
1936
1937/* Find parser flow entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02001938static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001939{
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02001940 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001941 int tid;
1942
Marcin Wojtas3f518502014-07-10 16:52:13 -03001943 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1944 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1945 u8 bits;
1946
1947 if (!priv->prs_shadow[tid].valid ||
1948 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1949 continue;
1950
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02001951 mvpp2_prs_init_from_hw(priv, &pe, tid);
1952 bits = mvpp2_prs_sram_ai_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001953
1954 /* Sram store classification lookup ID in AI bits [5:0] */
1955 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02001956 return tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001957 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03001958
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02001959 return -ENOENT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001960}
1961
1962/* Return first free tcam index, seeking from start to end */
1963static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1964 unsigned char end)
1965{
1966 int tid;
1967
1968 if (start > end)
1969 swap(start, end);
1970
1971 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1972 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1973
1974 for (tid = start; tid <= end; tid++) {
1975 if (!priv->prs_shadow[tid].valid)
1976 return tid;
1977 }
1978
1979 return -EINVAL;
1980}
1981
1982/* Enable/disable dropping all mac da's */
1983static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1984{
1985 struct mvpp2_prs_entry pe;
1986
1987 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1988 /* Entry exist - update port only */
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001989 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001990 } else {
1991 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001992 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001993 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1994 pe.index = MVPP2_PE_DROP_ALL;
1995
1996 /* Non-promiscuous mode for all ports - DROP unknown packets */
1997 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1998 MVPP2_PRS_RI_DROP_MASK);
1999
2000 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2001 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2002
2003 /* Update shadow table */
2004 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2005
2006 /* Mask all ports */
2007 mvpp2_prs_tcam_port_map_set(&pe, 0);
2008 }
2009
2010 /* Update port mask */
2011 mvpp2_prs_tcam_port_set(&pe, port, add);
2012
2013 mvpp2_prs_hw_write(priv, &pe);
2014}
2015
Maxime Chevallier10fea262018-03-07 15:18:04 +01002016/* Set port to unicast or multicast promiscuous mode */
2017static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
2018 enum mvpp2_prs_l2_cast l2_cast, bool add)
Marcin Wojtas3f518502014-07-10 16:52:13 -03002019{
2020 struct mvpp2_prs_entry pe;
Maxime Chevallier10fea262018-03-07 15:18:04 +01002021 unsigned char cast_match;
2022 unsigned int ri;
2023 int tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002024
Maxime Chevallier10fea262018-03-07 15:18:04 +01002025 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
2026 cast_match = MVPP2_PRS_UCAST_VAL;
2027 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
2028 ri = MVPP2_PRS_RI_L2_UCAST;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002029 } else {
Maxime Chevallier10fea262018-03-07 15:18:04 +01002030 cast_match = MVPP2_PRS_MCAST_VAL;
2031 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
2032 ri = MVPP2_PRS_RI_L2_MCAST;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002033 }
2034
Maxime Chevallier10fea262018-03-07 15:18:04 +01002035 /* promiscuous mode - Accept unknown unicast or multicast packets */
2036 if (priv->prs_shadow[tid].valid) {
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002037 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002038 } else {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002039 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002040 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
Maxime Chevallier10fea262018-03-07 15:18:04 +01002041 pe.index = tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002042
2043 /* Continue - set next lookup */
2044 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2045
2046 /* Set result info bits */
Maxime Chevallier10fea262018-03-07 15:18:04 +01002047 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002048
Maxime Chevallier10fea262018-03-07 15:18:04 +01002049 /* Match UC or MC addresses */
2050 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
2051 MVPP2_PRS_CAST_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002052
2053 /* Shift to ethertype */
2054 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2055 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2056
2057 /* Mask all ports */
2058 mvpp2_prs_tcam_port_map_set(&pe, 0);
2059
2060 /* Update shadow table */
2061 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2062 }
2063
2064 /* Update port mask */
2065 mvpp2_prs_tcam_port_set(&pe, port, add);
2066
2067 mvpp2_prs_hw_write(priv, &pe);
2068}
2069
2070/* Set entry for dsa packets */
2071static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2072 bool tagged, bool extend)
2073{
2074 struct mvpp2_prs_entry pe;
2075 int tid, shift;
2076
2077 if (extend) {
2078 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2079 shift = 8;
2080 } else {
2081 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2082 shift = 4;
2083 }
2084
2085 if (priv->prs_shadow[tid].valid) {
2086 /* Entry exist - update port only */
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002087 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002088 } else {
2089 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002090 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002091 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2092 pe.index = tid;
2093
Marcin Wojtas3f518502014-07-10 16:52:13 -03002094 /* Update shadow table */
2095 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2096
2097 if (tagged) {
2098 /* Set tagged bit in DSA tag */
2099 mvpp2_prs_tcam_data_byte_set(&pe, 0,
Maxime Chevallier56beda32018-02-28 10:14:13 +01002100 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2101 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2102
2103 /* Set ai bits for next iteration */
2104 if (extend)
2105 mvpp2_prs_sram_ai_update(&pe, 1,
2106 MVPP2_PRS_SRAM_AI_MASK);
2107 else
2108 mvpp2_prs_sram_ai_update(&pe, 0,
2109 MVPP2_PRS_SRAM_AI_MASK);
2110
2111 /* If packet is tagged continue check vid filtering */
2112 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002113 } else {
Maxime Chevallier56beda32018-02-28 10:14:13 +01002114 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2115 mvpp2_prs_sram_shift_set(&pe, shift,
2116 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2117
Marcin Wojtas3f518502014-07-10 16:52:13 -03002118 /* Set result info bits to 'no vlans' */
2119 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2120 MVPP2_PRS_RI_VLAN_MASK);
2121 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2122 }
2123
2124 /* Mask all ports */
2125 mvpp2_prs_tcam_port_map_set(&pe, 0);
2126 }
2127
2128 /* Update port mask */
2129 mvpp2_prs_tcam_port_set(&pe, port, add);
2130
2131 mvpp2_prs_hw_write(priv, &pe);
2132}
2133
2134/* Set entry for dsa ethertype */
2135static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2136 bool add, bool tagged, bool extend)
2137{
2138 struct mvpp2_prs_entry pe;
2139 int tid, shift, port_mask;
2140
2141 if (extend) {
2142 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2143 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2144 port_mask = 0;
2145 shift = 8;
2146 } else {
2147 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2148 MVPP2_PE_ETYPE_DSA_UNTAGGED;
2149 port_mask = MVPP2_PRS_PORT_MASK;
2150 shift = 4;
2151 }
2152
2153 if (priv->prs_shadow[tid].valid) {
2154 /* Entry exist - update port only */
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002155 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002156 } else {
2157 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002158 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002159 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2160 pe.index = tid;
2161
2162 /* Set ethertype */
2163 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2164 mvpp2_prs_match_etype(&pe, 2, 0);
2165
2166 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2167 MVPP2_PRS_RI_DSA_MASK);
2168 /* Shift ethertype + 2 byte reserved + tag*/
2169 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2170 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2171
2172 /* Update shadow table */
2173 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2174
2175 if (tagged) {
2176 /* Set tagged bit in DSA tag */
2177 mvpp2_prs_tcam_data_byte_set(&pe,
2178 MVPP2_ETH_TYPE_LEN + 2 + 3,
2179 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2180 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2181 /* Clear all ai bits for next iteration */
2182 mvpp2_prs_sram_ai_update(&pe, 0,
2183 MVPP2_PRS_SRAM_AI_MASK);
2184 /* If packet is tagged continue check vlans */
2185 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2186 } else {
2187 /* Set result info bits to 'no vlans' */
2188 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2189 MVPP2_PRS_RI_VLAN_MASK);
2190 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2191 }
2192 /* Mask/unmask all ports, depending on dsa type */
2193 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2194 }
2195
2196 /* Update port mask */
2197 mvpp2_prs_tcam_port_set(&pe, port, add);
2198
2199 mvpp2_prs_hw_write(priv, &pe);
2200}
2201
2202/* Search for existing single/triple vlan entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002203static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
Marcin Wojtas3f518502014-07-10 16:52:13 -03002204{
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002205 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002206 int tid;
2207
Marcin Wojtas3f518502014-07-10 16:52:13 -03002208 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2209 for (tid = MVPP2_PE_FIRST_FREE_TID;
2210 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2211 unsigned int ri_bits, ai_bits;
2212 bool match;
2213
2214 if (!priv->prs_shadow[tid].valid ||
2215 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2216 continue;
2217
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002218 mvpp2_prs_init_from_hw(priv, &pe, tid);
2219 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002220 if (!match)
2221 continue;
2222
2223 /* Get vlan type */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002224 ri_bits = mvpp2_prs_sram_ri_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002225 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2226
2227 /* Get current ai value from tcam */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002228 ai_bits = mvpp2_prs_tcam_ai_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002229 /* Clear double vlan bit */
2230 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2231
2232 if (ai != ai_bits)
2233 continue;
2234
2235 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2236 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002237 return tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002238 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002239
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002240 return -ENOENT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002241}
2242
2243/* Add/update single/triple vlan entry */
2244static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2245 unsigned int port_map)
2246{
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002247 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002248 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302249 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002250
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002251 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002252
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002253 tid = mvpp2_prs_vlan_find(priv, tpid, ai);
2254
2255 if (tid < 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03002256 /* Create new tcam entry */
2257 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2258 MVPP2_PE_FIRST_FREE_TID);
2259 if (tid < 0)
2260 return tid;
2261
Marcin Wojtas3f518502014-07-10 16:52:13 -03002262 /* Get last double vlan tid */
2263 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2264 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2265 unsigned int ri_bits;
2266
2267 if (!priv->prs_shadow[tid_aux].valid ||
2268 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2269 continue;
2270
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002271 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
2272 ri_bits = mvpp2_prs_sram_ri_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002273 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2274 MVPP2_PRS_RI_VLAN_DOUBLE)
2275 break;
2276 }
2277
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002278 if (tid <= tid_aux)
2279 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002280
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002281 memset(&pe, 0, sizeof(pe));
2282 pe.index = tid;
2283 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002284
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002285 mvpp2_prs_match_etype(&pe, 0, tpid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002286
Maxime Chevallier56beda32018-02-28 10:14:13 +01002287 /* VLAN tag detected, proceed with VID filtering */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002288 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
Maxime Chevallier56beda32018-02-28 10:14:13 +01002289
Marcin Wojtas3f518502014-07-10 16:52:13 -03002290 /* Clear all ai bits for next iteration */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002291 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002292
2293 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002294 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002295 MVPP2_PRS_RI_VLAN_MASK);
2296 } else {
2297 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002298 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002299 MVPP2_PRS_RI_VLAN_MASK);
2300 }
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002301 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002302
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002303 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2304 } else {
2305 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002306 }
2307 /* Update ports' mask */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002308 mvpp2_prs_tcam_port_map_set(&pe, port_map);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002309
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002310 mvpp2_prs_hw_write(priv, &pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002311
Sudip Mukherjee43737472014-11-01 16:59:34 +05302312 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002313}
2314
2315/* Get first free double vlan ai number */
2316static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2317{
2318 int i;
2319
2320 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2321 if (!priv->prs_double_vlans[i])
2322 return i;
2323 }
2324
2325 return -EINVAL;
2326}
2327
2328/* Search for existing double vlan entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002329static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
2330 unsigned short tpid2)
Marcin Wojtas3f518502014-07-10 16:52:13 -03002331{
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002332 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002333 int tid;
2334
Marcin Wojtas3f518502014-07-10 16:52:13 -03002335 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2336 for (tid = MVPP2_PE_FIRST_FREE_TID;
2337 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2338 unsigned int ri_mask;
2339 bool match;
2340
2341 if (!priv->prs_shadow[tid].valid ||
2342 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2343 continue;
2344
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002345 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002346
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002347 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
2348 mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002349
2350 if (!match)
2351 continue;
2352
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002353 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002354 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002355 return tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002356 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002357
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002358 return -ENOENT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002359}
2360
2361/* Add or update double vlan entry */
2362static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2363 unsigned short tpid2,
2364 unsigned int port_map)
2365{
Sudip Mukherjee43737472014-11-01 16:59:34 +05302366 int tid_aux, tid, ai, ret = 0;
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002367 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002368
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002369 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002370
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002371 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2372
2373 if (tid < 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03002374 /* Create new tcam entry */
2375 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2376 MVPP2_PE_LAST_FREE_TID);
2377 if (tid < 0)
2378 return tid;
2379
Marcin Wojtas3f518502014-07-10 16:52:13 -03002380 /* Set ai value for new double vlan entry */
2381 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002382 if (ai < 0)
2383 return ai;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002384
2385 /* Get first single/triple vlan tid */
2386 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2387 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2388 unsigned int ri_bits;
2389
2390 if (!priv->prs_shadow[tid_aux].valid ||
2391 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2392 continue;
2393
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002394 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
2395 ri_bits = mvpp2_prs_sram_ri_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002396 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2397 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2398 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2399 break;
2400 }
2401
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002402 if (tid >= tid_aux)
2403 return -ERANGE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002404
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002405 memset(&pe, 0, sizeof(pe));
2406 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2407 pe.index = tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002408
2409 priv->prs_double_vlans[ai] = true;
2410
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002411 mvpp2_prs_match_etype(&pe, 0, tpid1);
2412 mvpp2_prs_match_etype(&pe, 4, tpid2);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002413
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002414 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
Maxime Chevallier56beda32018-02-28 10:14:13 +01002415 /* Shift 4 bytes - skip outer vlan tag */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002416 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002417 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002418 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002419 MVPP2_PRS_RI_VLAN_MASK);
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002420 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002421 MVPP2_PRS_SRAM_AI_MASK);
2422
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002423 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2424 } else {
2425 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002426 }
2427
2428 /* Update ports' mask */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02002429 mvpp2_prs_tcam_port_map_set(&pe, port_map);
2430 mvpp2_prs_hw_write(priv, &pe);
2431
Sudip Mukherjee43737472014-11-01 16:59:34 +05302432 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002433}
2434
2435/* IPv4 header parsing for fragmentation and L4 offset */
2436static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2437 unsigned int ri, unsigned int ri_mask)
2438{
2439 struct mvpp2_prs_entry pe;
2440 int tid;
2441
2442 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2443 (proto != IPPROTO_IGMP))
2444 return -EINVAL;
2445
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002446 /* Not fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002447 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2448 MVPP2_PE_LAST_FREE_TID);
2449 if (tid < 0)
2450 return tid;
2451
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002452 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002453 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2454 pe.index = tid;
2455
2456 /* Set next lu to IPv4 */
2457 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2458 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2459 /* Set L4 offset */
2460 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2461 sizeof(struct iphdr) - 4,
2462 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2463 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2464 MVPP2_PRS_IPV4_DIP_AI_BIT);
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002465 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2466
2467 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2468 MVPP2_PRS_TCAM_PROTO_MASK_L);
2469 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2470 MVPP2_PRS_TCAM_PROTO_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002471
2472 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2473 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2474 /* Unmask all ports */
2475 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2476
2477 /* Update shadow table and hw entry */
2478 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2479 mvpp2_prs_hw_write(priv, &pe);
2480
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002481 /* Fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002482 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2483 MVPP2_PE_LAST_FREE_TID);
2484 if (tid < 0)
2485 return tid;
2486
2487 pe.index = tid;
2488 /* Clear ri before updating */
2489 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2490 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2491 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2492
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002493 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2494 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2495
2496 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2497 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002498
2499 /* Update shadow table and hw entry */
2500 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2501 mvpp2_prs_hw_write(priv, &pe);
2502
2503 return 0;
2504}
2505
2506/* IPv4 L3 multicast or broadcast */
2507static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2508{
2509 struct mvpp2_prs_entry pe;
2510 int mask, tid;
2511
2512 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2513 MVPP2_PE_LAST_FREE_TID);
2514 if (tid < 0)
2515 return tid;
2516
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002517 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002518 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2519 pe.index = tid;
2520
2521 switch (l3_cast) {
2522 case MVPP2_PRS_L3_MULTI_CAST:
2523 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2524 MVPP2_PRS_IPV4_MC_MASK);
2525 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2526 MVPP2_PRS_RI_L3_ADDR_MASK);
2527 break;
2528 case MVPP2_PRS_L3_BROAD_CAST:
2529 mask = MVPP2_PRS_IPV4_BC_MASK;
2530 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2531 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2532 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2533 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2534 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2535 MVPP2_PRS_RI_L3_ADDR_MASK);
2536 break;
2537 default:
2538 return -EINVAL;
2539 }
2540
2541 /* Finished: go to flowid generation */
2542 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2543 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2544
2545 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2546 MVPP2_PRS_IPV4_DIP_AI_BIT);
2547 /* Unmask all ports */
2548 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2549
2550 /* Update shadow table and hw entry */
2551 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2552 mvpp2_prs_hw_write(priv, &pe);
2553
2554 return 0;
2555}
2556
2557/* Set entries for protocols over IPv6 */
2558static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2559 unsigned int ri, unsigned int ri_mask)
2560{
2561 struct mvpp2_prs_entry pe;
2562 int tid;
2563
2564 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2565 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2566 return -EINVAL;
2567
2568 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2569 MVPP2_PE_LAST_FREE_TID);
2570 if (tid < 0)
2571 return tid;
2572
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002573 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002574 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2575 pe.index = tid;
2576
2577 /* Finished: go to flowid generation */
2578 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2579 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2580 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2581 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2582 sizeof(struct ipv6hdr) - 6,
2583 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2584
2585 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2586 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2587 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2588 /* Unmask all ports */
2589 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2590
2591 /* Write HW */
2592 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2593 mvpp2_prs_hw_write(priv, &pe);
2594
2595 return 0;
2596}
2597
2598/* IPv6 L3 multicast entry */
2599static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2600{
2601 struct mvpp2_prs_entry pe;
2602 int tid;
2603
2604 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2605 return -EINVAL;
2606
2607 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2608 MVPP2_PE_LAST_FREE_TID);
2609 if (tid < 0)
2610 return tid;
2611
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002612 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002613 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2614 pe.index = tid;
2615
2616 /* Finished: go to flowid generation */
2617 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2618 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2619 MVPP2_PRS_RI_L3_ADDR_MASK);
2620 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2621 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2622 /* Shift back to IPv6 NH */
2623 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2624
2625 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2626 MVPP2_PRS_IPV6_MC_MASK);
2627 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2628 /* Unmask all ports */
2629 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2630
2631 /* Update shadow table and hw entry */
2632 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2633 mvpp2_prs_hw_write(priv, &pe);
2634
2635 return 0;
2636}
2637
2638/* Parser per-port initialization */
2639static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2640 int lu_max, int offset)
2641{
2642 u32 val;
2643
2644 /* Set lookup ID */
2645 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2646 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2647 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2648 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2649
2650 /* Set maximum number of loops for packet received from port */
2651 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2652 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2653 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2654 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2655
2656 /* Set initial offset for packet header extraction for the first
2657 * searching loop
2658 */
2659 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2660 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2661 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2662 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2663}
2664
2665/* Default flow entries initialization for all ports */
2666static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2667{
2668 struct mvpp2_prs_entry pe;
2669 int port;
2670
2671 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002672 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002673 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2674 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2675
2676 /* Mask all ports */
2677 mvpp2_prs_tcam_port_map_set(&pe, 0);
2678
2679 /* Set flow ID*/
2680 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2681 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2682
2683 /* Update shadow table and hw entry */
2684 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2685 mvpp2_prs_hw_write(priv, &pe);
2686 }
2687}
2688
2689/* Set default entry for Marvell Header field */
2690static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2691{
2692 struct mvpp2_prs_entry pe;
2693
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002694 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002695
2696 pe.index = MVPP2_PE_MH_DEFAULT;
2697 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2698 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2699 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2700 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2701
2702 /* Unmask all ports */
2703 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2704
2705 /* Update shadow table and hw entry */
2706 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2707 mvpp2_prs_hw_write(priv, &pe);
2708}
2709
2710/* Set default entires (place holder) for promiscuous, non-promiscuous and
2711 * multicast MAC addresses
2712 */
2713static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2714{
2715 struct mvpp2_prs_entry pe;
2716
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002717 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002718
2719 /* Non-promiscuous mode for all ports - DROP unknown packets */
2720 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2721 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2722
2723 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2724 MVPP2_PRS_RI_DROP_MASK);
2725 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2726 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2727
2728 /* Unmask all ports */
2729 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2730
2731 /* Update shadow table and hw entry */
2732 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2733 mvpp2_prs_hw_write(priv, &pe);
2734
Maxime Chevallier10fea262018-03-07 15:18:04 +01002735 /* Create dummy entries for drop all and promiscuous modes */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002736 mvpp2_prs_mac_drop_all_set(priv, 0, false);
Maxime Chevallier10fea262018-03-07 15:18:04 +01002737 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
2738 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002739}
2740
2741/* Set default entries for various types of dsa packets */
2742static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2743{
2744 struct mvpp2_prs_entry pe;
2745
2746 /* None tagged EDSA entry - place holder */
2747 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2748 MVPP2_PRS_EDSA);
2749
2750 /* Tagged EDSA entry - place holder */
2751 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2752
2753 /* None tagged DSA entry - place holder */
2754 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2755 MVPP2_PRS_DSA);
2756
2757 /* Tagged DSA entry - place holder */
2758 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2759
2760 /* None tagged EDSA ethertype entry - place holder*/
2761 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2762 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2763
2764 /* Tagged EDSA ethertype entry - place holder*/
2765 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2766 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2767
2768 /* None tagged DSA ethertype entry */
2769 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2770 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2771
2772 /* Tagged DSA ethertype entry */
2773 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2774 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2775
2776 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002777 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002778 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2779 pe.index = MVPP2_PE_DSA_DEFAULT;
2780 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2781
2782 /* Shift 0 bytes */
2783 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2784 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2785
2786 /* Clear all sram ai bits for next iteration */
2787 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2788
2789 /* Unmask all ports */
2790 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2791
2792 mvpp2_prs_hw_write(priv, &pe);
2793}
2794
Maxime Chevallier56beda32018-02-28 10:14:13 +01002795/* Initialize parser entries for VID filtering */
2796static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2797{
2798 struct mvpp2_prs_entry pe;
2799
2800 memset(&pe, 0, sizeof(pe));
2801
2802 /* Set default vid entry */
2803 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2804 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2805
2806 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2807
2808 /* Skip VLAN header - Set offset to 4 bytes */
2809 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2810 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2811
2812 /* Clear all ai bits for next iteration */
2813 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2814
2815 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2816
2817 /* Unmask all ports */
2818 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2819
2820 /* Update shadow table and hw entry */
2821 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2822 mvpp2_prs_hw_write(priv, &pe);
2823
2824 /* Set default vid entry for extended DSA*/
2825 memset(&pe, 0, sizeof(pe));
2826
2827 /* Set default vid entry */
2828 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2829 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2830
2831 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2832 MVPP2_PRS_EDSA_VID_AI_BIT);
2833
2834 /* Skip VLAN header - Set offset to 8 bytes */
2835 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2836 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2837
2838 /* Clear all ai bits for next iteration */
2839 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2840
2841 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2842
2843 /* Unmask all ports */
2844 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2845
2846 /* Update shadow table and hw entry */
2847 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2848 mvpp2_prs_hw_write(priv, &pe);
2849}
2850
Marcin Wojtas3f518502014-07-10 16:52:13 -03002851/* Match basic ethertypes */
2852static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2853{
2854 struct mvpp2_prs_entry pe;
2855 int tid;
2856
2857 /* Ethertype: PPPoE */
2858 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2859 MVPP2_PE_LAST_FREE_TID);
2860 if (tid < 0)
2861 return tid;
2862
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002863 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2865 pe.index = tid;
2866
2867 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2868
2869 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2870 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2871 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2872 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2873 MVPP2_PRS_RI_PPPOE_MASK);
2874
2875 /* Update shadow table and hw entry */
2876 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2877 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2878 priv->prs_shadow[pe.index].finish = false;
2879 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2880 MVPP2_PRS_RI_PPPOE_MASK);
2881 mvpp2_prs_hw_write(priv, &pe);
2882
2883 /* Ethertype: ARP */
2884 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2885 MVPP2_PE_LAST_FREE_TID);
2886 if (tid < 0)
2887 return tid;
2888
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002889 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002890 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2891 pe.index = tid;
2892
2893 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2894
2895 /* Generate flow in the next iteration*/
2896 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2897 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2898 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2899 MVPP2_PRS_RI_L3_PROTO_MASK);
2900 /* Set L3 offset */
2901 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2902 MVPP2_ETH_TYPE_LEN,
2903 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2904
2905 /* Update shadow table and hw entry */
2906 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2907 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2908 priv->prs_shadow[pe.index].finish = true;
2909 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2910 MVPP2_PRS_RI_L3_PROTO_MASK);
2911 mvpp2_prs_hw_write(priv, &pe);
2912
2913 /* Ethertype: LBTD */
2914 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2915 MVPP2_PE_LAST_FREE_TID);
2916 if (tid < 0)
2917 return tid;
2918
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002919 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002920 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2921 pe.index = tid;
2922
2923 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2924
2925 /* Generate flow in the next iteration*/
2926 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2927 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2928 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2929 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2930 MVPP2_PRS_RI_CPU_CODE_MASK |
2931 MVPP2_PRS_RI_UDF3_MASK);
2932 /* Set L3 offset */
2933 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2934 MVPP2_ETH_TYPE_LEN,
2935 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2936
2937 /* Update shadow table and hw entry */
2938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2939 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2940 priv->prs_shadow[pe.index].finish = true;
2941 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2942 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2943 MVPP2_PRS_RI_CPU_CODE_MASK |
2944 MVPP2_PRS_RI_UDF3_MASK);
2945 mvpp2_prs_hw_write(priv, &pe);
2946
2947 /* Ethertype: IPv4 without options */
2948 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2949 MVPP2_PE_LAST_FREE_TID);
2950 if (tid < 0)
2951 return tid;
2952
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002953 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002954 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2955 pe.index = tid;
2956
2957 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2958 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2959 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2960 MVPP2_PRS_IPV4_HEAD_MASK |
2961 MVPP2_PRS_IPV4_IHL_MASK);
2962
2963 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2964 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2965 MVPP2_PRS_RI_L3_PROTO_MASK);
2966 /* Skip eth_type + 4 bytes of IP header */
2967 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2968 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2969 /* Set L3 offset */
2970 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2971 MVPP2_ETH_TYPE_LEN,
2972 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2973
2974 /* Update shadow table and hw entry */
2975 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2976 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2977 priv->prs_shadow[pe.index].finish = false;
2978 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2979 MVPP2_PRS_RI_L3_PROTO_MASK);
2980 mvpp2_prs_hw_write(priv, &pe);
2981
2982 /* Ethertype: IPv4 with options */
2983 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2984 MVPP2_PE_LAST_FREE_TID);
2985 if (tid < 0)
2986 return tid;
2987
2988 pe.index = tid;
2989
2990 /* Clear tcam data before updating */
2991 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2992 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2993
2994 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2995 MVPP2_PRS_IPV4_HEAD,
2996 MVPP2_PRS_IPV4_HEAD_MASK);
2997
2998 /* Clear ri before updating */
2999 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3000 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3001 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3002 MVPP2_PRS_RI_L3_PROTO_MASK);
3003
3004 /* Update shadow table and hw entry */
3005 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3006 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3007 priv->prs_shadow[pe.index].finish = false;
3008 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3009 MVPP2_PRS_RI_L3_PROTO_MASK);
3010 mvpp2_prs_hw_write(priv, &pe);
3011
3012 /* Ethertype: IPv6 without options */
3013 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3014 MVPP2_PE_LAST_FREE_TID);
3015 if (tid < 0)
3016 return tid;
3017
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003018 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003019 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3020 pe.index = tid;
3021
3022 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3023
3024 /* Skip DIP of IPV6 header */
3025 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3026 MVPP2_MAX_L3_ADDR_SIZE,
3027 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3028 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3029 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3030 MVPP2_PRS_RI_L3_PROTO_MASK);
3031 /* Set L3 offset */
3032 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3033 MVPP2_ETH_TYPE_LEN,
3034 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3035
3036 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3037 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3038 priv->prs_shadow[pe.index].finish = false;
3039 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3040 MVPP2_PRS_RI_L3_PROTO_MASK);
3041 mvpp2_prs_hw_write(priv, &pe);
3042
3043 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3044 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3045 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3046 pe.index = MVPP2_PE_ETH_TYPE_UN;
3047
3048 /* Unmask all ports */
3049 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3050
3051 /* Generate flow in the next iteration*/
3052 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3053 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3054 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3055 MVPP2_PRS_RI_L3_PROTO_MASK);
3056 /* Set L3 offset even it's unknown L3 */
3057 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3058 MVPP2_ETH_TYPE_LEN,
3059 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3060
3061 /* Update shadow table and hw entry */
3062 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3063 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3064 priv->prs_shadow[pe.index].finish = true;
3065 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3066 MVPP2_PRS_RI_L3_PROTO_MASK);
3067 mvpp2_prs_hw_write(priv, &pe);
3068
3069 return 0;
3070}
3071
3072/* Configure vlan entries and detect up to 2 successive VLAN tags.
3073 * Possible options:
3074 * 0x8100, 0x88A8
3075 * 0x8100, 0x8100
3076 * 0x8100
3077 * 0x88A8
3078 */
3079static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3080{
3081 struct mvpp2_prs_entry pe;
3082 int err;
3083
3084 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3085 MVPP2_PRS_DBL_VLANS_MAX,
3086 GFP_KERNEL);
3087 if (!priv->prs_double_vlans)
3088 return -ENOMEM;
3089
3090 /* Double VLAN: 0x8100, 0x88A8 */
3091 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3092 MVPP2_PRS_PORT_MASK);
3093 if (err)
3094 return err;
3095
3096 /* Double VLAN: 0x8100, 0x8100 */
3097 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3098 MVPP2_PRS_PORT_MASK);
3099 if (err)
3100 return err;
3101
3102 /* Single VLAN: 0x88a8 */
3103 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3104 MVPP2_PRS_PORT_MASK);
3105 if (err)
3106 return err;
3107
3108 /* Single VLAN: 0x8100 */
3109 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3110 MVPP2_PRS_PORT_MASK);
3111 if (err)
3112 return err;
3113
3114 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003115 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003116 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3117 pe.index = MVPP2_PE_VLAN_DBL;
3118
Maxime Chevallier56beda32018-02-28 10:14:13 +01003119 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3120
Marcin Wojtas3f518502014-07-10 16:52:13 -03003121 /* Clear ai for next iterations */
3122 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3123 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3124 MVPP2_PRS_RI_VLAN_MASK);
3125
3126 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3127 MVPP2_PRS_DBL_VLAN_AI_BIT);
3128 /* Unmask all ports */
3129 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3130
3131 /* Update shadow table and hw entry */
3132 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3133 mvpp2_prs_hw_write(priv, &pe);
3134
3135 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003136 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003137 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3138 pe.index = MVPP2_PE_VLAN_NONE;
3139
3140 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3141 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3142 MVPP2_PRS_RI_VLAN_MASK);
3143
3144 /* Unmask all ports */
3145 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3146
3147 /* Update shadow table and hw entry */
3148 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3149 mvpp2_prs_hw_write(priv, &pe);
3150
3151 return 0;
3152}
3153
3154/* Set entries for PPPoE ethertype */
3155static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3156{
3157 struct mvpp2_prs_entry pe;
3158 int tid;
3159
3160 /* IPv4 over PPPoE with options */
3161 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3162 MVPP2_PE_LAST_FREE_TID);
3163 if (tid < 0)
3164 return tid;
3165
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003166 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003167 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3168 pe.index = tid;
3169
3170 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3171
3172 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3173 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3174 MVPP2_PRS_RI_L3_PROTO_MASK);
3175 /* Skip eth_type + 4 bytes of IP header */
3176 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3177 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3178 /* Set L3 offset */
3179 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3180 MVPP2_ETH_TYPE_LEN,
3181 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3182
3183 /* Update shadow table and hw entry */
3184 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3185 mvpp2_prs_hw_write(priv, &pe);
3186
3187 /* IPv4 over PPPoE without options */
3188 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3189 MVPP2_PE_LAST_FREE_TID);
3190 if (tid < 0)
3191 return tid;
3192
3193 pe.index = tid;
3194
3195 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3196 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3197 MVPP2_PRS_IPV4_HEAD_MASK |
3198 MVPP2_PRS_IPV4_IHL_MASK);
3199
3200 /* Clear ri before updating */
3201 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3202 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3203 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3204 MVPP2_PRS_RI_L3_PROTO_MASK);
3205
3206 /* Update shadow table and hw entry */
3207 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3208 mvpp2_prs_hw_write(priv, &pe);
3209
3210 /* IPv6 over PPPoE */
3211 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3212 MVPP2_PE_LAST_FREE_TID);
3213 if (tid < 0)
3214 return tid;
3215
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003216 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003217 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3218 pe.index = tid;
3219
3220 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3221
3222 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3223 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3224 MVPP2_PRS_RI_L3_PROTO_MASK);
3225 /* Skip eth_type + 4 bytes of IPv6 header */
3226 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3227 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3228 /* Set L3 offset */
3229 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3230 MVPP2_ETH_TYPE_LEN,
3231 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3232
3233 /* Update shadow table and hw entry */
3234 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3235 mvpp2_prs_hw_write(priv, &pe);
3236
3237 /* Non-IP over PPPoE */
3238 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3239 MVPP2_PE_LAST_FREE_TID);
3240 if (tid < 0)
3241 return tid;
3242
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003243 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003244 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3245 pe.index = tid;
3246
3247 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3248 MVPP2_PRS_RI_L3_PROTO_MASK);
3249
3250 /* Finished: go to flowid generation */
3251 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3252 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3253 /* Set L3 offset even if it's unknown L3 */
3254 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3255 MVPP2_ETH_TYPE_LEN,
3256 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3257
3258 /* Update shadow table and hw entry */
3259 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3260 mvpp2_prs_hw_write(priv, &pe);
3261
3262 return 0;
3263}
3264
3265/* Initialize entries for IPv4 */
3266static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3267{
3268 struct mvpp2_prs_entry pe;
3269 int err;
3270
3271 /* Set entries for TCP, UDP and IGMP over IPv4 */
3272 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3273 MVPP2_PRS_RI_L4_PROTO_MASK);
3274 if (err)
3275 return err;
3276
3277 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3278 MVPP2_PRS_RI_L4_PROTO_MASK);
3279 if (err)
3280 return err;
3281
3282 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3283 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3284 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3285 MVPP2_PRS_RI_CPU_CODE_MASK |
3286 MVPP2_PRS_RI_UDF3_MASK);
3287 if (err)
3288 return err;
3289
3290 /* IPv4 Broadcast */
3291 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3292 if (err)
3293 return err;
3294
3295 /* IPv4 Multicast */
3296 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3297 if (err)
3298 return err;
3299
3300 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003301 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003302 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3303 pe.index = MVPP2_PE_IP4_PROTO_UN;
3304
3305 /* Set next lu to IPv4 */
3306 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3307 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3308 /* Set L4 offset */
3309 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3310 sizeof(struct iphdr) - 4,
3311 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3312 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3313 MVPP2_PRS_IPV4_DIP_AI_BIT);
3314 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3315 MVPP2_PRS_RI_L4_PROTO_MASK);
3316
3317 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3318 /* Unmask all ports */
3319 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3320
3321 /* Update shadow table and hw entry */
3322 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3323 mvpp2_prs_hw_write(priv, &pe);
3324
3325 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003326 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003327 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3328 pe.index = MVPP2_PE_IP4_ADDR_UN;
3329
3330 /* Finished: go to flowid generation */
3331 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3332 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3333 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3334 MVPP2_PRS_RI_L3_ADDR_MASK);
3335
3336 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3337 MVPP2_PRS_IPV4_DIP_AI_BIT);
3338 /* Unmask all ports */
3339 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3340
3341 /* Update shadow table and hw entry */
3342 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3343 mvpp2_prs_hw_write(priv, &pe);
3344
3345 return 0;
3346}
3347
3348/* Initialize entries for IPv6 */
3349static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3350{
3351 struct mvpp2_prs_entry pe;
3352 int tid, err;
3353
3354 /* Set entries for TCP, UDP and ICMP over IPv6 */
3355 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3356 MVPP2_PRS_RI_L4_TCP,
3357 MVPP2_PRS_RI_L4_PROTO_MASK);
3358 if (err)
3359 return err;
3360
3361 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3362 MVPP2_PRS_RI_L4_UDP,
3363 MVPP2_PRS_RI_L4_PROTO_MASK);
3364 if (err)
3365 return err;
3366
3367 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3368 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3369 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3370 MVPP2_PRS_RI_CPU_CODE_MASK |
3371 MVPP2_PRS_RI_UDF3_MASK);
3372 if (err)
3373 return err;
3374
3375 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3376 /* Result Info: UDF7=1, DS lite */
3377 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3378 MVPP2_PRS_RI_UDF7_IP6_LITE,
3379 MVPP2_PRS_RI_UDF7_MASK);
3380 if (err)
3381 return err;
3382
3383 /* IPv6 multicast */
3384 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3385 if (err)
3386 return err;
3387
3388 /* Entry for checking hop limit */
3389 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3390 MVPP2_PE_LAST_FREE_TID);
3391 if (tid < 0)
3392 return tid;
3393
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003394 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003395 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3396 pe.index = tid;
3397
3398 /* Finished: go to flowid generation */
3399 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3400 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3401 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3402 MVPP2_PRS_RI_DROP_MASK,
3403 MVPP2_PRS_RI_L3_PROTO_MASK |
3404 MVPP2_PRS_RI_DROP_MASK);
3405
3406 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3407 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3408 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3409
3410 /* Update shadow table and hw entry */
3411 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3412 mvpp2_prs_hw_write(priv, &pe);
3413
3414 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003415 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003416 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3417 pe.index = MVPP2_PE_IP6_PROTO_UN;
3418
3419 /* Finished: go to flowid generation */
3420 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3421 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3422 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3423 MVPP2_PRS_RI_L4_PROTO_MASK);
3424 /* Set L4 offset relatively to our current place */
3425 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3426 sizeof(struct ipv6hdr) - 4,
3427 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3428
3429 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3430 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3431 /* Unmask all ports */
3432 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3433
3434 /* Update shadow table and hw entry */
3435 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3436 mvpp2_prs_hw_write(priv, &pe);
3437
3438 /* Default IPv6 entry for unknown ext protocols */
3439 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3440 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3441 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3442
3443 /* Finished: go to flowid generation */
3444 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3445 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3446 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3447 MVPP2_PRS_RI_L4_PROTO_MASK);
3448
3449 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3450 MVPP2_PRS_IPV6_EXT_AI_BIT);
3451 /* Unmask all ports */
3452 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3453
3454 /* Update shadow table and hw entry */
3455 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3456 mvpp2_prs_hw_write(priv, &pe);
3457
3458 /* Default IPv6 entry for unicast address */
3459 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3460 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3461 pe.index = MVPP2_PE_IP6_ADDR_UN;
3462
3463 /* Finished: go to IPv6 again */
3464 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3465 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3466 MVPP2_PRS_RI_L3_ADDR_MASK);
3467 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3468 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3469 /* Shift back to IPV6 NH */
3470 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3471
3472 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3473 /* Unmask all ports */
3474 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3475
3476 /* Update shadow table and hw entry */
3477 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3478 mvpp2_prs_hw_write(priv, &pe);
3479
3480 return 0;
3481}
3482
Maxime Chevallier56beda32018-02-28 10:14:13 +01003483/* Find tcam entry with matched pair <vid,port> */
3484static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3485 u16 mask)
3486{
3487 unsigned char byte[2], enable[2];
3488 struct mvpp2_prs_entry pe;
3489 u16 rvid, rmask;
3490 int tid;
3491
3492 /* Go through the all entries with MVPP2_PRS_LU_VID */
3493 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3494 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3495 if (!priv->prs_shadow[tid].valid ||
3496 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3497 continue;
3498
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003499 mvpp2_prs_init_from_hw(priv, &pe, tid);
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003500
Maxime Chevallier56beda32018-02-28 10:14:13 +01003501 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3502 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3503
3504 rvid = ((byte[0] & 0xf) << 8) + byte[1];
3505 rmask = ((enable[0] & 0xf) << 8) + enable[1];
3506
3507 if (rvid != vid || rmask != mask)
3508 continue;
3509
3510 return tid;
3511 }
3512
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003513 return -ENOENT;
Maxime Chevallier56beda32018-02-28 10:14:13 +01003514}
3515
3516/* Write parser entry for VID filtering */
3517static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3518{
3519 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3520 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3521 unsigned int mask = 0xfff, reg_val, shift;
3522 struct mvpp2 *priv = port->priv;
3523 struct mvpp2_prs_entry pe;
3524 int tid;
3525
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003526 memset(&pe, 0, sizeof(pe));
3527
Maxime Chevallier56beda32018-02-28 10:14:13 +01003528 /* Scan TCAM and see if entry with this <vid,port> already exist */
3529 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3530
3531 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3532 if (reg_val & MVPP2_DSA_EXTENDED)
3533 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3534 else
3535 shift = MVPP2_VLAN_TAG_LEN;
3536
3537 /* No such entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003538 if (tid < 0) {
Maxime Chevallier56beda32018-02-28 10:14:13 +01003539
3540 /* Go through all entries from first to last in vlan range */
3541 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3542 vid_start +
3543 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3544
3545 /* There isn't room for a new VID filter */
3546 if (tid < 0)
3547 return tid;
3548
3549 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3550 pe.index = tid;
3551
3552 /* Mask all ports */
3553 mvpp2_prs_tcam_port_map_set(&pe, 0);
3554 } else {
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003555 mvpp2_prs_init_from_hw(priv, &pe, tid);
Maxime Chevallier56beda32018-02-28 10:14:13 +01003556 }
3557
3558 /* Enable the current port */
3559 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3560
3561 /* Continue - set next lookup */
3562 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3563
3564 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3565 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3566
3567 /* Set match on VID */
3568 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3569
3570 /* Clear all ai bits for next iteration */
3571 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3572
3573 /* Update shadow table */
3574 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3575 mvpp2_prs_hw_write(priv, &pe);
3576
3577 return 0;
3578}
3579
3580/* Write parser entry for VID filtering */
3581static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3582{
3583 struct mvpp2 *priv = port->priv;
3584 int tid;
3585
3586 /* Scan TCAM and see if entry with this <vid,port> already exist */
3587 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3588
3589 /* No such entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003590 if (tid < 0)
Maxime Chevallier56beda32018-02-28 10:14:13 +01003591 return;
3592
3593 mvpp2_prs_hw_inv(priv, tid);
3594 priv->prs_shadow[tid].valid = false;
3595}
3596
3597/* Remove all existing VID filters on this port */
3598static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3599{
3600 struct mvpp2 *priv = port->priv;
3601 int tid;
3602
3603 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3604 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3605 if (priv->prs_shadow[tid].valid)
3606 mvpp2_prs_vid_entry_remove(port, tid);
3607 }
3608}
3609
3610/* Remove VID filering entry for this port */
3611static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3612{
3613 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3614 struct mvpp2 *priv = port->priv;
3615
3616 /* Invalidate the guard entry */
3617 mvpp2_prs_hw_inv(priv, tid);
3618
3619 priv->prs_shadow[tid].valid = false;
3620}
3621
3622/* Add guard entry that drops packets when no VID is matched on this port */
3623static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3624{
3625 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3626 struct mvpp2 *priv = port->priv;
3627 unsigned int reg_val, shift;
3628 struct mvpp2_prs_entry pe;
3629
3630 if (priv->prs_shadow[tid].valid)
3631 return;
3632
3633 memset(&pe, 0, sizeof(pe));
3634
3635 pe.index = tid;
3636
3637 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3638 if (reg_val & MVPP2_DSA_EXTENDED)
3639 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3640 else
3641 shift = MVPP2_VLAN_TAG_LEN;
3642
3643 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3644
3645 /* Mask all ports */
3646 mvpp2_prs_tcam_port_map_set(&pe, 0);
3647
3648 /* Update port mask */
3649 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3650
3651 /* Continue - set next lookup */
3652 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3653
3654 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3655 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3656
3657 /* Drop VLAN packets that don't belong to any VIDs on this port */
3658 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3659 MVPP2_PRS_RI_DROP_MASK);
3660
3661 /* Clear all ai bits for next iteration */
3662 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3663
3664 /* Update shadow table */
3665 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3666 mvpp2_prs_hw_write(priv, &pe);
3667}
3668
Marcin Wojtas3f518502014-07-10 16:52:13 -03003669/* Parser default initialization */
3670static int mvpp2_prs_default_init(struct platform_device *pdev,
3671 struct mvpp2 *priv)
3672{
3673 int err, index, i;
3674
3675 /* Enable tcam table */
3676 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3677
3678 /* Clear all tcam and sram entries */
3679 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3680 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3681 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3682 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3683
3684 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3685 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3686 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3687 }
3688
3689 /* Invalidate all tcam entries */
3690 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3691 mvpp2_prs_hw_inv(priv, index);
3692
3693 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003694 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003695 GFP_KERNEL);
3696 if (!priv->prs_shadow)
3697 return -ENOMEM;
3698
3699 /* Always start from lookup = 0 */
3700 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3701 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3702 MVPP2_PRS_PORT_LU_MAX, 0);
3703
3704 mvpp2_prs_def_flow_init(priv);
3705
3706 mvpp2_prs_mh_init(priv);
3707
3708 mvpp2_prs_mac_init(priv);
3709
3710 mvpp2_prs_dsa_init(priv);
3711
Maxime Chevallier56beda32018-02-28 10:14:13 +01003712 mvpp2_prs_vid_init(priv);
3713
Marcin Wojtas3f518502014-07-10 16:52:13 -03003714 err = mvpp2_prs_etype_init(priv);
3715 if (err)
3716 return err;
3717
3718 err = mvpp2_prs_vlan_init(pdev, priv);
3719 if (err)
3720 return err;
3721
3722 err = mvpp2_prs_pppoe_init(priv);
3723 if (err)
3724 return err;
3725
3726 err = mvpp2_prs_ip6_init(priv);
3727 if (err)
3728 return err;
3729
3730 err = mvpp2_prs_ip4_init(priv);
3731 if (err)
3732 return err;
3733
3734 return 0;
3735}
3736
3737/* Compare MAC DA with tcam entry data */
3738static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3739 const u8 *da, unsigned char *mask)
3740{
3741 unsigned char tcam_byte, tcam_mask;
3742 int index;
3743
3744 for (index = 0; index < ETH_ALEN; index++) {
3745 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3746 if (tcam_mask != mask[index])
3747 return false;
3748
3749 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3750 return false;
3751 }
3752
3753 return true;
3754}
3755
3756/* Find tcam entry with matched pair <MAC DA, port> */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003757static int
Marcin Wojtas3f518502014-07-10 16:52:13 -03003758mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3759 unsigned char *mask, int udf_type)
3760{
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003761 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003762 int tid;
3763
Marcin Wojtas3f518502014-07-10 16:52:13 -03003764 /* Go through the all entires with MVPP2_PRS_LU_MAC */
Maxime Chevallier10fea262018-03-07 15:18:04 +01003765 for (tid = MVPP2_PE_MAC_RANGE_START;
3766 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003767 unsigned int entry_pmap;
3768
3769 if (!priv->prs_shadow[tid].valid ||
3770 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3771 (priv->prs_shadow[tid].udf != udf_type))
3772 continue;
3773
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003774 mvpp2_prs_init_from_hw(priv, &pe, tid);
3775 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003776
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003777 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
Marcin Wojtas3f518502014-07-10 16:52:13 -03003778 entry_pmap == pmap)
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003779 return tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003780 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03003781
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003782 return -ENOENT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003783}
3784
3785/* Update parser's mac da entry */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003786static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
3787 bool add)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003788{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003789 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003790 struct mvpp2 *priv = port->priv;
3791 unsigned int pmap, len, ri;
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003792 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003793 int tid;
3794
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003795 memset(&pe, 0, sizeof(pe));
3796
Marcin Wojtas3f518502014-07-10 16:52:13 -03003797 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003798 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
3799 MVPP2_PRS_UDF_MAC_DEF);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003800
3801 /* No such entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003802 if (tid < 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003803 if (!add)
3804 return 0;
3805
3806 /* Create new TCAM entry */
Marcin Wojtas3f518502014-07-10 16:52:13 -03003807 /* Go through the all entries from first to last */
Maxime Chevallier10fea262018-03-07 15:18:04 +01003808 tid = mvpp2_prs_tcam_first_free(priv,
3809 MVPP2_PE_MAC_RANGE_START,
3810 MVPP2_PE_MAC_RANGE_END);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003811 if (tid < 0)
3812 return tid;
3813
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003814 pe.index = tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003815
3816 /* Mask all ports */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003817 mvpp2_prs_tcam_port_map_set(&pe, 0);
3818 } else {
3819 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003820 }
3821
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003822 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3823
Marcin Wojtas3f518502014-07-10 16:52:13 -03003824 /* Update port mask */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003825 mvpp2_prs_tcam_port_set(&pe, port->id, add);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003826
3827 /* Invalidate the entry if no ports are left enabled */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003828 pmap = mvpp2_prs_tcam_port_map_get(&pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003829 if (pmap == 0) {
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003830 if (add)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303831 return -EINVAL;
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003832
3833 mvpp2_prs_hw_inv(priv, pe.index);
3834 priv->prs_shadow[pe.index].valid = false;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003835 return 0;
3836 }
3837
3838 /* Continue - set next lookup */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003839 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003840
3841 /* Set match on DA */
3842 len = ETH_ALEN;
3843 while (len--)
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003844 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003845
3846 /* Set result info bits */
Maxime Chevallier10fea262018-03-07 15:18:04 +01003847 if (is_broadcast_ether_addr(da)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003848 ri = MVPP2_PRS_RI_L2_BCAST;
Maxime Chevallier10fea262018-03-07 15:18:04 +01003849 } else if (is_multicast_ether_addr(da)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003850 ri = MVPP2_PRS_RI_L2_MCAST;
Maxime Chevallier10fea262018-03-07 15:18:04 +01003851 } else {
3852 ri = MVPP2_PRS_RI_L2_UCAST;
3853
3854 if (ether_addr_equal(da, port->dev->dev_addr))
3855 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
3856 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03003857
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003858 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
Marcin Wojtas3f518502014-07-10 16:52:13 -03003859 MVPP2_PRS_RI_MAC_ME_MASK);
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003860 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
Marcin Wojtas3f518502014-07-10 16:52:13 -03003861 MVPP2_PRS_RI_MAC_ME_MASK);
3862
3863 /* Shift to ethertype */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003864 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003865 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3866
3867 /* Update shadow table and hw entry */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003868 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
3869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
3870 mvpp2_prs_hw_write(priv, &pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003871
3872 return 0;
3873}
3874
3875static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3876{
3877 struct mvpp2_port *port = netdev_priv(dev);
3878 int err;
3879
3880 /* Remove old parser entry */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003881 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003882 if (err)
3883 return err;
3884
3885 /* Add new parser entry */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003886 err = mvpp2_prs_mac_da_accept(port, da, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003887 if (err)
3888 return err;
3889
3890 /* Set addr in the device */
3891 ether_addr_copy(dev->dev_addr, da);
3892
3893 return 0;
3894}
3895
Maxime Chevallier10fea262018-03-07 15:18:04 +01003896static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003897{
Maxime Chevallier10fea262018-03-07 15:18:04 +01003898 struct mvpp2 *priv = port->priv;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003899 struct mvpp2_prs_entry pe;
Maxime Chevallier10fea262018-03-07 15:18:04 +01003900 unsigned long pmap;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003901 int index, tid;
3902
Maxime Chevallier10fea262018-03-07 15:18:04 +01003903 for (tid = MVPP2_PE_MAC_RANGE_START;
3904 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003905 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3906
3907 if (!priv->prs_shadow[tid].valid ||
3908 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3909 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3910 continue;
3911
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003912 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003913
Maxime Chevallier10fea262018-03-07 15:18:04 +01003914 pmap = mvpp2_prs_tcam_port_map_get(&pe);
3915
3916 /* We only want entries active on this port */
3917 if (!test_bit(port->id, &pmap))
3918 continue;
3919
Marcin Wojtas3f518502014-07-10 16:52:13 -03003920 /* Read mac addr from entry */
3921 for (index = 0; index < ETH_ALEN; index++)
3922 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3923 &da_mask[index]);
3924
Maxime Chevallier10fea262018-03-07 15:18:04 +01003925 /* Special cases : Don't remove broadcast and port's own
3926 * address
3927 */
3928 if (is_broadcast_ether_addr(da) ||
3929 ether_addr_equal(da, port->dev->dev_addr))
3930 continue;
3931
3932 /* Remove entry from TCAM */
3933 mvpp2_prs_mac_da_accept(port, da, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003934 }
3935}
3936
3937static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3938{
3939 switch (type) {
3940 case MVPP2_TAG_TYPE_EDSA:
3941 /* Add port to EDSA entries */
3942 mvpp2_prs_dsa_tag_set(priv, port, true,
3943 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3944 mvpp2_prs_dsa_tag_set(priv, port, true,
3945 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3946 /* Remove port from DSA entries */
3947 mvpp2_prs_dsa_tag_set(priv, port, false,
3948 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3949 mvpp2_prs_dsa_tag_set(priv, port, false,
3950 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3951 break;
3952
3953 case MVPP2_TAG_TYPE_DSA:
3954 /* Add port to DSA entries */
3955 mvpp2_prs_dsa_tag_set(priv, port, true,
3956 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3957 mvpp2_prs_dsa_tag_set(priv, port, true,
3958 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3959 /* Remove port from EDSA entries */
3960 mvpp2_prs_dsa_tag_set(priv, port, false,
3961 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3962 mvpp2_prs_dsa_tag_set(priv, port, false,
3963 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3964 break;
3965
3966 case MVPP2_TAG_TYPE_MH:
3967 case MVPP2_TAG_TYPE_NONE:
3968 /* Remove port form EDSA and DSA entries */
3969 mvpp2_prs_dsa_tag_set(priv, port, false,
3970 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3971 mvpp2_prs_dsa_tag_set(priv, port, false,
3972 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3973 mvpp2_prs_dsa_tag_set(priv, port, false,
3974 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3975 mvpp2_prs_dsa_tag_set(priv, port, false,
3976 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3977 break;
3978
3979 default:
3980 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3981 return -EINVAL;
3982 }
3983
3984 return 0;
3985}
3986
3987/* Set prs flow for the port */
3988static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3989{
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003990 struct mvpp2_prs_entry pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003991 int tid;
3992
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003993 memset(&pe, 0, sizeof(pe));
3994
3995 tid = mvpp2_prs_flow_find(port->priv, port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003996
3997 /* Such entry not exist */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02003998 if (tid < 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003999 /* Go through the all entires from last to first */
4000 tid = mvpp2_prs_tcam_first_free(port->priv,
4001 MVPP2_PE_LAST_FREE_TID,
4002 MVPP2_PE_FIRST_FREE_TID);
4003 if (tid < 0)
4004 return tid;
4005
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02004006 pe.index = tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004007
4008 /* Set flow ID*/
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02004009 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4010 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004011
4012 /* Update shadow table */
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02004013 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
4014 } else {
4015 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004016 }
4017
Maxime Chevallier0c6d9b42018-03-26 15:34:23 +02004018 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4019 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
4020 mvpp2_prs_hw_write(port->priv, &pe);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004021
4022 return 0;
4023}
4024
4025/* Classifier configuration routines */
4026
4027/* Update classification flow table registers */
4028static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4029 struct mvpp2_cls_flow_entry *fe)
4030{
4031 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4032 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4033 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4034 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4035}
4036
4037/* Update classification lookup table register */
4038static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4039 struct mvpp2_cls_lookup_entry *le)
4040{
4041 u32 val;
4042
4043 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4044 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4045 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4046}
4047
4048/* Classifier default initialization */
4049static void mvpp2_cls_init(struct mvpp2 *priv)
4050{
4051 struct mvpp2_cls_lookup_entry le;
4052 struct mvpp2_cls_flow_entry fe;
4053 int index;
4054
4055 /* Enable classifier */
4056 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4057
4058 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01004059 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004060 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4061 fe.index = index;
4062 mvpp2_cls_flow_write(priv, &fe);
4063 }
4064
4065 /* Clear classifier lookup table */
4066 le.data = 0;
4067 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4068 le.lkpid = index;
4069 le.way = 0;
4070 mvpp2_cls_lookup_write(priv, &le);
4071
4072 le.way = 1;
4073 mvpp2_cls_lookup_write(priv, &le);
4074 }
4075}
4076
4077static void mvpp2_cls_port_config(struct mvpp2_port *port)
4078{
4079 struct mvpp2_cls_lookup_entry le;
4080 u32 val;
4081
4082 /* Set way for the port */
4083 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4084 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4085 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4086
4087 /* Pick the entry to be accessed in lookup ID decoding table
4088 * according to the way and lkpid.
4089 */
4090 le.lkpid = port->id;
4091 le.way = 0;
4092 le.data = 0;
4093
4094 /* Set initial CPU queue for receiving packets */
4095 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4096 le.data |= port->first_rxq;
4097
4098 /* Disable classification engines */
4099 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4100
4101 /* Update lookup ID table entry */
4102 mvpp2_cls_lookup_write(port->priv, &le);
4103}
4104
4105/* Set CPU queue number for oversize packets */
4106static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4107{
4108 u32 val;
4109
4110 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4111 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4112
4113 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4114 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4115
4116 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4117 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4118 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4119}
4120
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004121static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4122{
4123 if (likely(pool->frag_size <= PAGE_SIZE))
4124 return netdev_alloc_frag(pool->frag_size);
4125 else
4126 return kmalloc(pool->frag_size, GFP_ATOMIC);
4127}
4128
4129static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4130{
4131 if (likely(pool->frag_size <= PAGE_SIZE))
4132 skb_free_frag(data);
4133 else
4134 kfree(data);
4135}
4136
Marcin Wojtas3f518502014-07-10 16:52:13 -03004137/* Buffer Manager configuration routines */
4138
4139/* Create pool */
4140static int mvpp2_bm_pool_create(struct platform_device *pdev,
4141 struct mvpp2 *priv,
4142 struct mvpp2_bm_pool *bm_pool, int size)
4143{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004144 u32 val;
4145
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004146 /* Number of buffer pointers must be a multiple of 16, as per
4147 * hardware constraints
4148 */
4149 if (!IS_ALIGNED(size, 16))
4150 return -EINVAL;
4151
4152 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4153 * bytes per buffer pointer
4154 */
4155 if (priv->hw_version == MVPP21)
4156 bm_pool->size_bytes = 2 * sizeof(u32) * size;
4157 else
4158 bm_pool->size_bytes = 2 * sizeof(u64) * size;
4159
4160 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004161 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004162 GFP_KERNEL);
4163 if (!bm_pool->virt_addr)
4164 return -ENOMEM;
4165
Thomas Petazzonid3158802017-02-21 11:28:13 +01004166 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4167 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004168 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4169 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004170 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4171 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4172 return -ENOMEM;
4173 }
4174
4175 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004176 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004177 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4178
4179 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4180 val |= MVPP2_BM_START_MASK;
4181 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4182
Marcin Wojtas3f518502014-07-10 16:52:13 -03004183 bm_pool->size = size;
4184 bm_pool->pkt_size = 0;
4185 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004186
4187 return 0;
4188}
4189
4190/* Set pool buffer size */
4191static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4192 struct mvpp2_bm_pool *bm_pool,
4193 int buf_size)
4194{
4195 u32 val;
4196
4197 bm_pool->buf_size = buf_size;
4198
4199 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4200 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4201}
4202
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004203static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4204 struct mvpp2_bm_pool *bm_pool,
4205 dma_addr_t *dma_addr,
4206 phys_addr_t *phys_addr)
4207{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004208 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004209
4210 *dma_addr = mvpp2_percpu_read(priv, cpu,
4211 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4212 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004213
4214 if (priv->hw_version == MVPP22) {
4215 u32 val;
4216 u32 dma_addr_highbits, phys_addr_highbits;
4217
Thomas Petazzonia7868412017-03-07 16:53:13 +01004218 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004219 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4220 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4221 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4222
4223 if (sizeof(dma_addr_t) == 8)
4224 *dma_addr |= (u64)dma_addr_highbits << 32;
4225
4226 if (sizeof(phys_addr_t) == 8)
4227 *phys_addr |= (u64)phys_addr_highbits << 32;
4228 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004229
4230 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004231}
4232
Ezequiel Garcia7861f122014-07-21 13:48:14 -03004233/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004234static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004235 struct mvpp2_bm_pool *bm_pool, int buf_num)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004236{
4237 int i;
4238
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004239 if (buf_num > bm_pool->buf_num) {
4240 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
4241 bm_pool->id, buf_num);
4242 buf_num = bm_pool->buf_num;
4243 }
4244
4245 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01004246 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004247 phys_addr_t buf_phys_addr;
4248 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004249
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004250 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4251 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01004252
Thomas Petazzoni20396132017-03-07 16:53:00 +01004253 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01004254 bm_pool->buf_size, DMA_FROM_DEVICE);
4255
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004256 data = (void *)phys_to_virt(buf_phys_addr);
4257 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004258 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004259
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004260 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004261 }
4262
4263 /* Update BM driver with number of buffers removed from pool */
4264 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004265}
4266
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004267/* Check number of buffers in BM pool */
kbuild test robot6e61e102018-03-06 13:05:06 +08004268static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004269{
4270 int buf_num = 0;
4271
4272 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
4273 MVPP22_BM_POOL_PTRS_NUM_MASK;
4274 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
4275 MVPP2_BM_BPPI_PTR_NUM_MASK;
4276
4277 /* HW has one buffer ready which is not reflected in the counters */
4278 if (buf_num)
4279 buf_num += 1;
4280
4281 return buf_num;
4282}
4283
Marcin Wojtas3f518502014-07-10 16:52:13 -03004284/* Cleanup pool */
4285static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4286 struct mvpp2 *priv,
4287 struct mvpp2_bm_pool *bm_pool)
4288{
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004289 int buf_num;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004290 u32 val;
4291
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004292 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4293 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
4294
4295 /* Check buffer counters after free */
4296 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4297 if (buf_num) {
4298 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
4299 bm_pool->id, bm_pool->buf_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004300 return 0;
4301 }
4302
4303 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4304 val |= MVPP2_BM_STOP_MASK;
4305 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4306
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004307 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004308 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004309 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004310 return 0;
4311}
4312
4313static int mvpp2_bm_pools_init(struct platform_device *pdev,
4314 struct mvpp2 *priv)
4315{
4316 int i, err, size;
4317 struct mvpp2_bm_pool *bm_pool;
4318
4319 /* Create all pools with maximum size */
4320 size = MVPP2_BM_POOL_SIZE_MAX;
4321 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4322 bm_pool = &priv->bm_pools[i];
4323 bm_pool->id = i;
4324 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4325 if (err)
4326 goto err_unroll_pools;
4327 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4328 }
4329 return 0;
4330
4331err_unroll_pools:
4332 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4333 for (i = i - 1; i >= 0; i--)
4334 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4335 return err;
4336}
4337
4338static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4339{
4340 int i, err;
4341
4342 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4343 /* Mask BM all interrupts */
4344 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4345 /* Clear BM cause register */
4346 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4347 }
4348
4349 /* Allocate and initialize BM pools */
4350 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02004351 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004352 if (!priv->bm_pools)
4353 return -ENOMEM;
4354
4355 err = mvpp2_bm_pools_init(pdev, priv);
4356 if (err < 0)
4357 return err;
4358 return 0;
4359}
4360
Stefan Chulski01d04932018-03-05 15:16:50 +01004361static void mvpp2_setup_bm_pool(void)
4362{
4363 /* Short pool */
4364 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
4365 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
4366
4367 /* Long pool */
4368 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
4369 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
Stefan Chulski576193f2018-03-05 15:16:54 +01004370
4371 /* Jumbo pool */
4372 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
4373 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
Stefan Chulski01d04932018-03-05 15:16:50 +01004374}
4375
Marcin Wojtas3f518502014-07-10 16:52:13 -03004376/* Attach long pool to rxq */
4377static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4378 int lrxq, int long_pool)
4379{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004380 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004381 int prxq;
4382
4383 /* Get queue physical ID */
4384 prxq = port->rxqs[lrxq]->id;
4385
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004386 if (port->priv->hw_version == MVPP21)
4387 mask = MVPP21_RXQ_POOL_LONG_MASK;
4388 else
4389 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004390
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004391 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4392 val &= ~mask;
4393 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004394 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4395}
4396
4397/* Attach short pool to rxq */
4398static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4399 int lrxq, int short_pool)
4400{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004401 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004402 int prxq;
4403
4404 /* Get queue physical ID */
4405 prxq = port->rxqs[lrxq]->id;
4406
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004407 if (port->priv->hw_version == MVPP21)
4408 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4409 else
4410 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004411
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004412 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4413 val &= ~mask;
4414 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004415 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4416}
4417
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004418static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4419 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004420 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004421 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004422 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004423{
Thomas Petazzoni20396132017-03-07 16:53:00 +01004424 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004425 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004426
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004427 data = mvpp2_frag_alloc(bm_pool);
4428 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004429 return NULL;
4430
Thomas Petazzoni20396132017-03-07 16:53:00 +01004431 dma_addr = dma_map_single(port->dev->dev.parent, data,
4432 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4433 DMA_FROM_DEVICE);
4434 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004435 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004436 return NULL;
4437 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01004438 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004439 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004440
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004441 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004442}
4443
Marcin Wojtas3f518502014-07-10 16:52:13 -03004444/* Release buffer to BM */
4445static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004446 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004447 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004448{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004449 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004450
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004451 if (port->priv->hw_version == MVPP22) {
4452 u32 val = 0;
4453
4454 if (sizeof(dma_addr_t) == 8)
4455 val |= upper_32_bits(buf_dma_addr) &
4456 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4457
4458 if (sizeof(phys_addr_t) == 8)
4459 val |= (upper_32_bits(buf_phys_addr)
4460 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4461 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4462
Yan Markmancdcfeb02018-03-27 16:49:05 +02004463 mvpp2_percpu_write_relaxed(port->priv, cpu,
4464 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004465 }
4466
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004467 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4468 * returned in the "cookie" field of the RX
4469 * descriptor. Instead of storing the virtual address, we
4470 * store the physical address
4471 */
Yan Markmancdcfeb02018-03-27 16:49:05 +02004472 mvpp2_percpu_write_relaxed(port->priv, cpu,
4473 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4474 mvpp2_percpu_write_relaxed(port->priv, cpu,
4475 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004476
4477 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004478}
4479
Marcin Wojtas3f518502014-07-10 16:52:13 -03004480/* Allocate buffers for the pool */
4481static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4482 struct mvpp2_bm_pool *bm_pool, int buf_num)
4483{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004484 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01004485 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004486 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004487 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004488
4489 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4490 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4491
4492 if (buf_num < 0 ||
4493 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4494 netdev_err(port->dev,
4495 "cannot allocate %d buffers for pool %d\n",
4496 buf_num, bm_pool->id);
4497 return 0;
4498 }
4499
Marcin Wojtas3f518502014-07-10 16:52:13 -03004500 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004501 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4502 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004503 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004504 break;
4505
Thomas Petazzoni20396132017-03-07 16:53:00 +01004506 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004507 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004508 }
4509
4510 /* Update BM driver with number of buffers added to pool */
4511 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004512
4513 netdev_dbg(port->dev,
Stefan Chulski01d04932018-03-05 15:16:50 +01004514 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
Marcin Wojtas3f518502014-07-10 16:52:13 -03004515 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4516
4517 netdev_dbg(port->dev,
Stefan Chulski01d04932018-03-05 15:16:50 +01004518 "pool %d: %d of %d buffers added\n",
Marcin Wojtas3f518502014-07-10 16:52:13 -03004519 bm_pool->id, i, buf_num);
4520 return i;
4521}
4522
4523/* Notify the driver that BM pool is being used as specific type and return the
4524 * pool pointer on success
4525 */
4526static struct mvpp2_bm_pool *
Stefan Chulski01d04932018-03-05 15:16:50 +01004527mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004528{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004529 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4530 int num;
4531
Stefan Chulski01d04932018-03-05 15:16:50 +01004532 if (pool >= MVPP2_BM_POOLS_NUM) {
4533 netdev_err(port->dev, "Invalid pool %d\n", pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004534 return NULL;
4535 }
4536
Marcin Wojtas3f518502014-07-10 16:52:13 -03004537 /* Allocate buffers in case BM pool is used as long pool, but packet
4538 * size doesn't match MTU or BM pool hasn't being used yet
4539 */
Stefan Chulski01d04932018-03-05 15:16:50 +01004540 if (new_pool->pkt_size == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004541 int pkts_num;
4542
4543 /* Set default buffer number or free all the buffers in case
4544 * the pool is not empty
4545 */
4546 pkts_num = new_pool->buf_num;
4547 if (pkts_num == 0)
Stefan Chulski01d04932018-03-05 15:16:50 +01004548 pkts_num = mvpp2_pools[pool].buf_num;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004549 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004550 mvpp2_bm_bufs_free(port->dev->dev.parent,
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004551 port->priv, new_pool, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004552
4553 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004554 new_pool->frag_size =
4555 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4556 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004557
4558 /* Allocate buffers for this pool */
4559 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4560 if (num != pkts_num) {
4561 WARN(1, "pool %d: %d of %d allocated\n",
4562 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004563 return NULL;
4564 }
4565 }
4566
4567 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4568 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4569
Marcin Wojtas3f518502014-07-10 16:52:13 -03004570 return new_pool;
4571}
4572
4573/* Initialize pools for swf */
4574static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4575{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004576 int rxq;
Stefan Chulski576193f2018-03-05 15:16:54 +01004577 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
4578
4579 /* If port pkt_size is higher than 1518B:
4580 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4581 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4582 */
4583 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
4584 long_log_pool = MVPP2_BM_JUMBO;
4585 short_log_pool = MVPP2_BM_LONG;
4586 } else {
4587 long_log_pool = MVPP2_BM_LONG;
4588 short_log_pool = MVPP2_BM_SHORT;
4589 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004590
4591 if (!port->pool_long) {
4592 port->pool_long =
Stefan Chulski576193f2018-03-05 15:16:54 +01004593 mvpp2_bm_pool_use(port, long_log_pool,
4594 mvpp2_pools[long_log_pool].pkt_size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004595 if (!port->pool_long)
4596 return -ENOMEM;
4597
Stefan Chulski576193f2018-03-05 15:16:54 +01004598 port->pool_long->port_map |= BIT(port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004599
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004600 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004601 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4602 }
4603
4604 if (!port->pool_short) {
4605 port->pool_short =
Stefan Chulski576193f2018-03-05 15:16:54 +01004606 mvpp2_bm_pool_use(port, short_log_pool,
Colin Ian Kinge2e03162018-03-21 17:31:15 +00004607 mvpp2_pools[short_log_pool].pkt_size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004608 if (!port->pool_short)
4609 return -ENOMEM;
4610
Stefan Chulski576193f2018-03-05 15:16:54 +01004611 port->pool_short->port_map |= BIT(port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004612
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004613 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004614 mvpp2_rxq_short_pool_set(port, rxq,
4615 port->pool_short->id);
4616 }
4617
4618 return 0;
4619}
4620
4621static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4622{
4623 struct mvpp2_port *port = netdev_priv(dev);
Stefan Chulski576193f2018-03-05 15:16:54 +01004624 enum mvpp2_bm_pool_log_num new_long_pool;
4625 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004626
Stefan Chulski576193f2018-03-05 15:16:54 +01004627 /* If port MTU is higher than 1518B:
4628 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4629 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4630 */
4631 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
4632 new_long_pool = MVPP2_BM_JUMBO;
4633 else
4634 new_long_pool = MVPP2_BM_LONG;
4635
4636 if (new_long_pool != port->pool_long->id) {
4637 /* Remove port from old short & long pool */
4638 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
4639 port->pool_long->pkt_size);
4640 port->pool_long->port_map &= ~BIT(port->id);
4641 port->pool_long = NULL;
4642
4643 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
4644 port->pool_short->pkt_size);
4645 port->pool_short->port_map &= ~BIT(port->id);
4646 port->pool_short = NULL;
4647
4648 port->pkt_size = pkt_size;
4649
4650 /* Add port to new short & long pool */
4651 mvpp2_swf_bm_pool_init(port);
4652
4653 /* Update L4 checksum when jumbo enable/disable on port */
4654 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
4655 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4656 dev->hw_features &= ~(NETIF_F_IP_CSUM |
4657 NETIF_F_IPV6_CSUM);
4658 } else {
4659 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4660 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4661 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004662 }
4663
Marcin Wojtas3f518502014-07-10 16:52:13 -03004664 dev->mtu = mtu;
Stefan Chulski576193f2018-03-05 15:16:54 +01004665 dev->wanted_features = dev->features;
4666
Marcin Wojtas3f518502014-07-10 16:52:13 -03004667 netdev_update_features(dev);
4668 return 0;
4669}
4670
4671static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4672{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004673 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004674
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004675 for (i = 0; i < port->nqvecs; i++)
4676 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4677
Marcin Wojtas3f518502014-07-10 16:52:13 -03004678 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004679 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004680}
4681
4682static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4683{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004684 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004685
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004686 for (i = 0; i < port->nqvecs; i++)
4687 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4688
Marcin Wojtas3f518502014-07-10 16:52:13 -03004689 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004690 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4691}
4692
4693static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4694{
4695 struct mvpp2_port *port = qvec->port;
4696
4697 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4698 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4699}
4700
4701static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4702{
4703 struct mvpp2_port *port = qvec->port;
4704
4705 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4706 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004707}
4708
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004709/* Mask the current CPU's Rx/Tx interrupts
4710 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4711 * using smp_processor_id() is OK.
4712 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004713static void mvpp2_interrupts_mask(void *arg)
4714{
4715 struct mvpp2_port *port = arg;
4716
Thomas Petazzonia7868412017-03-07 16:53:13 +01004717 mvpp2_percpu_write(port->priv, smp_processor_id(),
4718 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004719}
4720
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004721/* Unmask the current CPU's Rx/Tx interrupts.
4722 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4723 * using smp_processor_id() is OK.
4724 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004725static void mvpp2_interrupts_unmask(void *arg)
4726{
4727 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004728 u32 val;
4729
4730 val = MVPP2_CAUSE_MISC_SUM_MASK |
4731 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4732 if (port->has_tx_irqs)
4733 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004734
Thomas Petazzonia7868412017-03-07 16:53:13 +01004735 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004736 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4737}
4738
4739static void
4740mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4741{
4742 u32 val;
4743 int i;
4744
4745 if (port->priv->hw_version != MVPP22)
4746 return;
4747
4748 if (mask)
4749 val = 0;
4750 else
4751 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4752
4753 for (i = 0; i < port->nqvecs; i++) {
4754 struct mvpp2_queue_vector *v = port->qvecs + i;
4755
4756 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4757 continue;
4758
4759 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4760 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4761 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004762}
4763
4764/* Port configuration routines */
4765
Antoine Ténartf84bf382017-08-22 19:08:27 +02004766static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4767{
4768 struct mvpp2 *priv = port->priv;
4769 u32 val;
4770
4771 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4772 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4773 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4774
4775 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4776 if (port->gop_id == 2)
4777 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4778 else if (port->gop_id == 3)
4779 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4780 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4781}
4782
4783static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4784{
4785 struct mvpp2 *priv = port->priv;
4786 u32 val;
4787
4788 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4789 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4790 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4791 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4792
4793 if (port->gop_id > 1) {
4794 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4795 if (port->gop_id == 2)
4796 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4797 else if (port->gop_id == 3)
4798 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4799 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4800 }
4801}
4802
4803static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4804{
4805 struct mvpp2 *priv = port->priv;
4806 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4807 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4808 u32 val;
4809
4810 /* XPCS */
4811 val = readl(xpcs + MVPP22_XPCS_CFG0);
4812 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4813 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4814 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4815 writel(val, xpcs + MVPP22_XPCS_CFG0);
4816
4817 /* MPCS */
4818 val = readl(mpcs + MVPP22_MPCS_CTRL);
4819 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4820 writel(val, mpcs + MVPP22_MPCS_CTRL);
4821
4822 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4823 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4824 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4825 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4826 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4827
4828 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4829 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4830 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4831}
4832
4833static int mvpp22_gop_init(struct mvpp2_port *port)
4834{
4835 struct mvpp2 *priv = port->priv;
4836 u32 val;
4837
4838 if (!priv->sysctrl_base)
4839 return 0;
4840
4841 switch (port->phy_interface) {
4842 case PHY_INTERFACE_MODE_RGMII:
4843 case PHY_INTERFACE_MODE_RGMII_ID:
4844 case PHY_INTERFACE_MODE_RGMII_RXID:
4845 case PHY_INTERFACE_MODE_RGMII_TXID:
4846 if (port->gop_id == 0)
4847 goto invalid_conf;
4848 mvpp22_gop_init_rgmii(port);
4849 break;
4850 case PHY_INTERFACE_MODE_SGMII:
4851 mvpp22_gop_init_sgmii(port);
4852 break;
4853 case PHY_INTERFACE_MODE_10GKR:
4854 if (port->gop_id != 0)
4855 goto invalid_conf;
4856 mvpp22_gop_init_10gkr(port);
4857 break;
4858 default:
4859 goto unsupported_conf;
4860 }
4861
4862 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4863 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4864 GENCONF_PORT_CTRL1_EN(port->gop_id);
4865 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4866
4867 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4868 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4869 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4870
4871 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4872 val |= GENCONF_SOFT_RESET1_GOP;
4873 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4874
4875unsupported_conf:
4876 return 0;
4877
4878invalid_conf:
4879 netdev_err(port->dev, "Invalid port configuration\n");
4880 return -EINVAL;
4881}
4882
Antoine Tenartfd3651b2017-09-01 11:04:54 +02004883static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4884{
4885 u32 val;
4886
4887 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4888 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4889 /* Enable the GMAC link status irq for this port */
4890 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4891 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4892 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4893 }
4894
4895 if (port->gop_id == 0) {
4896 /* Enable the XLG/GIG irqs for this port */
4897 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4898 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4899 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4900 else
4901 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4902 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4903 }
4904}
4905
4906static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4907{
4908 u32 val;
4909
4910 if (port->gop_id == 0) {
4911 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4912 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4913 MVPP22_XLG_EXT_INT_MASK_GIG);
4914 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4915 }
4916
4917 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4918 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4919 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4920 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4921 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4922 }
4923}
4924
4925static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4926{
4927 u32 val;
4928
4929 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4930 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4931 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4932 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4933 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4934 }
4935
4936 if (port->gop_id == 0) {
4937 val = readl(port->base + MVPP22_XLG_INT_MASK);
4938 val |= MVPP22_XLG_INT_MASK_LINK;
4939 writel(val, port->base + MVPP22_XLG_INT_MASK);
4940 }
4941
4942 mvpp22_gop_unmask_irq(port);
4943}
4944
Antoine Tenart542897d2017-08-30 10:29:15 +02004945static int mvpp22_comphy_init(struct mvpp2_port *port)
4946{
4947 enum phy_mode mode;
4948 int ret;
4949
4950 if (!port->comphy)
4951 return 0;
4952
4953 switch (port->phy_interface) {
4954 case PHY_INTERFACE_MODE_SGMII:
4955 mode = PHY_MODE_SGMII;
4956 break;
4957 case PHY_INTERFACE_MODE_10GKR:
4958 mode = PHY_MODE_10GKR;
4959 break;
4960 default:
4961 return -EINVAL;
4962 }
4963
4964 ret = phy_set_mode(port->comphy, mode);
4965 if (ret)
4966 return ret;
4967
4968 return phy_power_on(port->comphy);
4969}
4970
Antoine Ténart39193572017-08-22 19:08:24 +02004971static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4972{
4973 u32 val;
4974
4975 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4976 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4977 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4978 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4979 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4980 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Tenart1df22702017-09-01 11:04:52 +02004981 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02004982 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4983 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4984 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4985 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4986 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4987 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Ténart39193572017-08-22 19:08:24 +02004988 }
4989
4990 /* The port is connected to a copper PHY */
4991 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4992 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4993 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4994
4995 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4996 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4997 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4998 MVPP2_GMAC_AN_DUPLEX_EN;
4999 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5000 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
5001 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5002}
5003
5004static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
5005{
5006 u32 val;
5007
5008 /* Force link down */
5009 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5010 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5011 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5012 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5013
5014 /* Set the GMAC in a reset state */
5015 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5016 val |= MVPP2_GMAC_PORT_RESET_MASK;
5017 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5018
5019 /* Configure the PCS and in-band AN */
5020 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5021 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5022 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Tenart1df22702017-09-01 11:04:52 +02005023 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02005024 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Ténart39193572017-08-22 19:08:24 +02005025 }
5026 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5027
5028 mvpp2_port_mii_gmac_configure_mode(port);
5029
5030 /* Unset the GMAC reset state */
5031 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5032 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
5033 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5034
5035 /* Stop forcing link down */
5036 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5037 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5038 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5039}
5040
Antoine Ténart77321952017-08-22 19:08:25 +02005041static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
5042{
5043 u32 val;
5044
5045 if (port->gop_id != 0)
5046 return;
5047
5048 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5049 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5050 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5051
5052 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5053 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5054 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5055 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5056}
5057
Thomas Petazzoni26975822017-03-07 16:53:14 +01005058static void mvpp22_port_mii_set(struct mvpp2_port *port)
5059{
5060 u32 val;
5061
Thomas Petazzoni26975822017-03-07 16:53:14 +01005062 /* Only GOP port 0 has an XLG MAC */
5063 if (port->gop_id == 0) {
5064 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5065 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02005066
5067 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5068 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5069 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5070 else
5071 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5072
Thomas Petazzoni26975822017-03-07 16:53:14 +01005073 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5074 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01005075}
5076
Marcin Wojtas3f518502014-07-10 16:52:13 -03005077static void mvpp2_port_mii_set(struct mvpp2_port *port)
5078{
Thomas Petazzoni26975822017-03-07 16:53:14 +01005079 if (port->priv->hw_version == MVPP22)
5080 mvpp22_port_mii_set(port);
5081
Antoine Tenart1df22702017-09-01 11:04:52 +02005082 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
Antoine Ténart39193572017-08-22 19:08:24 +02005083 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5084 mvpp2_port_mii_gmac_configure(port);
Antoine Ténart77321952017-08-22 19:08:25 +02005085 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5086 mvpp2_port_mii_xlg_configure(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03005087}
5088
5089static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5090{
5091 u32 val;
5092
5093 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5094 val |= MVPP2_GMAC_FC_ADV_EN;
5095 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005096}
5097
5098static void mvpp2_port_enable(struct mvpp2_port *port)
5099{
5100 u32 val;
5101
Antoine Ténart725757a2017-06-12 16:01:39 +02005102 /* Only GOP port 0 has an XLG MAC */
5103 if (port->gop_id == 0 &&
5104 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5105 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5106 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5107 val |= MVPP22_XLG_CTRL0_PORT_EN |
5108 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5109 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5110 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5111 } else {
5112 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5113 val |= MVPP2_GMAC_PORT_EN_MASK;
5114 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5115 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5116 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005117}
5118
5119static void mvpp2_port_disable(struct mvpp2_port *port)
5120{
5121 u32 val;
5122
Antoine Ténart725757a2017-06-12 16:01:39 +02005123 /* Only GOP port 0 has an XLG MAC */
5124 if (port->gop_id == 0 &&
5125 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5126 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5127 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5128 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5129 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5130 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5131 } else {
5132 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5133 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5134 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5135 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005136}
5137
5138/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5139static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5140{
5141 u32 val;
5142
5143 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5144 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5145 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5146}
5147
5148/* Configure loopback port */
5149static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5150{
5151 u32 val;
5152
5153 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5154
5155 if (port->speed == 1000)
5156 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5157 else
5158 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5159
5160 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5161 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5162 else
5163 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5164
5165 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5166}
5167
Miquel Raynal118d6292017-11-06 22:56:53 +01005168struct mvpp2_ethtool_counter {
5169 unsigned int offset;
5170 const char string[ETH_GSTRING_LEN];
5171 bool reg_is_64b;
5172};
5173
5174static u64 mvpp2_read_count(struct mvpp2_port *port,
5175 const struct mvpp2_ethtool_counter *counter)
5176{
5177 u64 val;
5178
5179 val = readl(port->stats_base + counter->offset);
5180 if (counter->reg_is_64b)
5181 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5182
5183 return val;
5184}
5185
5186/* Due to the fact that software statistics and hardware statistics are, by
5187 * design, incremented at different moments in the chain of packet processing,
5188 * it is very likely that incoming packets could have been dropped after being
5189 * counted by hardware but before reaching software statistics (most probably
5190 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5191 * are added in between as well as TSO skb will be split and header bytes added.
5192 * Hence, statistics gathered from userspace with ifconfig (software) and
5193 * ethtool (hardware) cannot be compared.
5194 */
5195static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5196 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5197 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5198 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5199 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5200 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5201 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5202 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5203 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5204 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5205 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5206 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5207 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5208 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5209 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5210 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5211 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5212 { MVPP2_MIB_FC_SENT, "fc_sent" },
5213 { MVPP2_MIB_FC_RCVD, "fc_received" },
5214 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5215 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5216 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5217 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5218 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5219 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5220 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5221 { MVPP2_MIB_COLLISION, "collision" },
5222 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
5223};
5224
5225static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5226 u8 *data)
5227{
5228 if (sset == ETH_SS_STATS) {
5229 int i;
5230
5231 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5232 memcpy(data + i * ETH_GSTRING_LEN,
5233 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5234 }
5235}
5236
5237static void mvpp2_gather_hw_statistics(struct work_struct *work)
5238{
5239 struct delayed_work *del_work = to_delayed_work(work);
Miquel Raynale5c500e2017-11-08 08:59:40 +01005240 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5241 stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005242 u64 *pstats;
Miquel Raynale5c500e2017-11-08 08:59:40 +01005243 int i;
Miquel Raynal118d6292017-11-06 22:56:53 +01005244
Miquel Raynale5c500e2017-11-08 08:59:40 +01005245 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005246
Miquel Raynale5c500e2017-11-08 08:59:40 +01005247 pstats = port->ethtool_stats;
5248 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5249 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Miquel Raynal118d6292017-11-06 22:56:53 +01005250
5251 /* No need to read again the counters right after this function if it
5252 * was called asynchronously by the user (ie. use of ethtool).
5253 */
Miquel Raynale5c500e2017-11-08 08:59:40 +01005254 cancel_delayed_work(&port->stats_work);
5255 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01005256 MVPP2_MIB_COUNTERS_STATS_DELAY);
5257
Miquel Raynale5c500e2017-11-08 08:59:40 +01005258 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005259}
5260
5261static void mvpp2_ethtool_get_stats(struct net_device *dev,
5262 struct ethtool_stats *stats, u64 *data)
5263{
5264 struct mvpp2_port *port = netdev_priv(dev);
5265
Miquel Raynale5c500e2017-11-08 08:59:40 +01005266 /* Update statistics for the given port, then take the lock to avoid
5267 * concurrent accesses on the ethtool_stats structure during its copy.
5268 */
5269 mvpp2_gather_hw_statistics(&port->stats_work.work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005270
Miquel Raynale5c500e2017-11-08 08:59:40 +01005271 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005272 memcpy(data, port->ethtool_stats,
5273 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
Miquel Raynale5c500e2017-11-08 08:59:40 +01005274 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005275}
5276
5277static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5278{
5279 if (sset == ETH_SS_STATS)
5280 return ARRAY_SIZE(mvpp2_ethtool_regs);
5281
5282 return -EOPNOTSUPP;
5283}
5284
Marcin Wojtas3f518502014-07-10 16:52:13 -03005285static void mvpp2_port_reset(struct mvpp2_port *port)
5286{
5287 u32 val;
Miquel Raynal118d6292017-11-06 22:56:53 +01005288 unsigned int i;
5289
5290 /* Read the GOP statistics to reset the hardware counters */
5291 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5292 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005293
5294 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5295 ~MVPP2_GMAC_PORT_RESET_MASK;
5296 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5297
5298 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5299 MVPP2_GMAC_PORT_RESET_MASK)
5300 continue;
5301}
5302
5303/* Change maximum receive size of the port */
5304static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5305{
5306 u32 val;
5307
5308 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5309 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5310 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5311 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5312 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5313}
5314
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005315/* Change maximum receive size of the port */
5316static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5317{
5318 u32 val;
5319
5320 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
5321 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5322 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
Antoine Ténartec15ecd2017-08-25 15:24:46 +02005323 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005324 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5325}
5326
Marcin Wojtas3f518502014-07-10 16:52:13 -03005327/* Set defaults to the MVPP2 port */
5328static void mvpp2_defaults_set(struct mvpp2_port *port)
5329{
5330 int tx_port_num, val, queue, ptxq, lrxq;
5331
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005332 if (port->priv->hw_version == MVPP21) {
5333 /* Configure port to loopback if needed */
5334 if (port->flags & MVPP2_F_LOOPBACK)
5335 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005336
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005337 /* Update TX FIFO MIN Threshold */
5338 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5339 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5340 /* Min. TX threshold must be less than minimal packet length */
5341 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5342 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5343 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005344
5345 /* Disable Legacy WRR, Disable EJP, Release from reset */
5346 tx_port_num = mvpp2_egress_port(port);
5347 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5348 tx_port_num);
5349 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5350
5351 /* Close bandwidth for all queues */
5352 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5353 ptxq = mvpp2_txq_phys(port->id, queue);
5354 mvpp2_write(port->priv,
5355 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5356 }
5357
5358 /* Set refill period to 1 usec, refill tokens
5359 * and bucket size to maximum
5360 */
5361 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5362 port->priv->tclk / USEC_PER_SEC);
5363 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5364 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5365 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5366 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5367 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5368 val = MVPP2_TXP_TOKEN_SIZE_MAX;
5369 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5370
5371 /* Set MaximumLowLatencyPacketSize value to 256 */
5372 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5373 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5374 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5375
5376 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005377 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005378 queue = port->rxqs[lrxq]->id;
5379 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5380 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5381 MVPP2_SNOOP_BUF_HDR_MASK;
5382 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5383 }
5384
5385 /* At default, mask all interrupts to all present cpus */
5386 mvpp2_interrupts_disable(port);
5387}
5388
5389/* Enable/disable receiving packets */
5390static void mvpp2_ingress_enable(struct mvpp2_port *port)
5391{
5392 u32 val;
5393 int lrxq, queue;
5394
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005395 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005396 queue = port->rxqs[lrxq]->id;
5397 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5398 val &= ~MVPP2_RXQ_DISABLE_MASK;
5399 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5400 }
5401}
5402
5403static void mvpp2_ingress_disable(struct mvpp2_port *port)
5404{
5405 u32 val;
5406 int lrxq, queue;
5407
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005408 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005409 queue = port->rxqs[lrxq]->id;
5410 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5411 val |= MVPP2_RXQ_DISABLE_MASK;
5412 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5413 }
5414}
5415
5416/* Enable transmit via physical egress queue
5417 * - HW starts take descriptors from DRAM
5418 */
5419static void mvpp2_egress_enable(struct mvpp2_port *port)
5420{
5421 u32 qmap;
5422 int queue;
5423 int tx_port_num = mvpp2_egress_port(port);
5424
5425 /* Enable all initialized TXs. */
5426 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005427 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005428 struct mvpp2_tx_queue *txq = port->txqs[queue];
5429
Markus Elfringdbbb2f02017-04-17 14:07:52 +02005430 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005431 qmap |= (1 << queue);
5432 }
5433
5434 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5435 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5436}
5437
5438/* Disable transmit via physical egress queue
5439 * - HW doesn't take descriptors from DRAM
5440 */
5441static void mvpp2_egress_disable(struct mvpp2_port *port)
5442{
5443 u32 reg_data;
5444 int delay;
5445 int tx_port_num = mvpp2_egress_port(port);
5446
5447 /* Issue stop command for active channels only */
5448 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5449 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5450 MVPP2_TXP_SCHED_ENQ_MASK;
5451 if (reg_data != 0)
5452 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5453 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5454
5455 /* Wait for all Tx activity to terminate. */
5456 delay = 0;
5457 do {
5458 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5459 netdev_warn(port->dev,
5460 "Tx stop timed out, status=0x%08x\n",
5461 reg_data);
5462 break;
5463 }
5464 mdelay(1);
5465 delay++;
5466
5467 /* Check port TX Command register that all
5468 * Tx queues are stopped
5469 */
5470 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5471 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5472}
5473
5474/* Rx descriptors helper methods */
5475
5476/* Get number of Rx descriptors occupied by received packets */
5477static inline int
5478mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5479{
5480 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5481
5482 return val & MVPP2_RXQ_OCCUPIED_MASK;
5483}
5484
5485/* Update Rx queue status with the number of occupied and available
5486 * Rx descriptor slots.
5487 */
5488static inline void
5489mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5490 int used_count, int free_count)
5491{
5492 /* Decrement the number of used descriptors and increment count
5493 * increment the number of free descriptors.
5494 */
5495 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5496
5497 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5498}
5499
5500/* Get pointer to next RX descriptor to be processed by SW */
5501static inline struct mvpp2_rx_desc *
5502mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5503{
5504 int rx_desc = rxq->next_desc_to_proc;
5505
5506 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5507 prefetch(rxq->descs + rxq->next_desc_to_proc);
5508 return rxq->descs + rx_desc;
5509}
5510
5511/* Set rx queue offset */
5512static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5513 int prxq, int offset)
5514{
5515 u32 val;
5516
5517 /* Convert offset from bytes to units of 32 bytes */
5518 offset = offset >> 5;
5519
5520 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5521 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5522
5523 /* Offset is in */
5524 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5525 MVPP2_RXQ_PACKET_OFFSET_MASK);
5526
5527 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5528}
5529
Marcin Wojtas3f518502014-07-10 16:52:13 -03005530/* Tx descriptors helper methods */
5531
Marcin Wojtas3f518502014-07-10 16:52:13 -03005532/* Get pointer to next Tx descriptor to be processed (send) by HW */
5533static struct mvpp2_tx_desc *
5534mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5535{
5536 int tx_desc = txq->next_desc_to_proc;
5537
5538 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5539 return txq->descs + tx_desc;
5540}
5541
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005542/* Update HW with number of aggregated Tx descriptors to be sent
5543 *
5544 * Called only from mvpp2_tx(), so migration is disabled, using
5545 * smp_processor_id() is OK.
5546 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005547static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5548{
5549 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005550 mvpp2_percpu_write(port->priv, smp_processor_id(),
5551 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005552}
5553
5554
5555/* Check if there are enough free descriptors in aggregated txq.
5556 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005557 *
5558 * Called only from mvpp2_tx(), so migration is disabled, using
5559 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005560 */
5561static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5562 struct mvpp2_tx_queue *aggr_txq, int num)
5563{
Antoine Tenart02856a32017-10-30 11:23:32 +01005564 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005565 /* Update number of occupied aggregated Tx descriptors */
5566 int cpu = smp_processor_id();
Yan Markmancdcfeb02018-03-27 16:49:05 +02005567 u32 val = mvpp2_read_relaxed(priv,
5568 MVPP2_AGGR_TXQ_STATUS_REG(cpu));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005569
5570 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5571 }
5572
Antoine Tenart02856a32017-10-30 11:23:32 +01005573 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005574 return -ENOMEM;
5575
5576 return 0;
5577}
5578
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005579/* Reserved Tx descriptors allocation request
5580 *
5581 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5582 * only by mvpp2_tx(), so migration is disabled, using
5583 * smp_processor_id() is OK.
5584 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005585static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5586 struct mvpp2_tx_queue *txq, int num)
5587{
5588 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005589 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005590
5591 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Yan Markmancdcfeb02018-03-27 16:49:05 +02005592 mvpp2_percpu_write_relaxed(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005593
Yan Markmancdcfeb02018-03-27 16:49:05 +02005594 val = mvpp2_percpu_read_relaxed(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005595
5596 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5597}
5598
5599/* Check if there are enough reserved descriptors for transmission.
5600 * If not, request chunk of reserved descriptors and check again.
5601 */
5602static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5603 struct mvpp2_tx_queue *txq,
5604 struct mvpp2_txq_pcpu *txq_pcpu,
5605 int num)
5606{
5607 int req, cpu, desc_count;
5608
5609 if (txq_pcpu->reserved_num >= num)
5610 return 0;
5611
5612 /* Not enough descriptors reserved! Update the reserved descriptor
5613 * count and check again.
5614 */
5615
5616 desc_count = 0;
5617 /* Compute total of used descriptors */
5618 for_each_present_cpu(cpu) {
5619 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5620
5621 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5622 desc_count += txq_pcpu_aux->count;
5623 desc_count += txq_pcpu_aux->reserved_num;
5624 }
5625
5626 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5627 desc_count += req;
5628
5629 if (desc_count >
5630 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5631 return -ENOMEM;
5632
5633 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5634
5635 /* OK, the descriptor cound has been updated: check again. */
5636 if (txq_pcpu->reserved_num < num)
5637 return -ENOMEM;
5638 return 0;
5639}
5640
5641/* Release the last allocated Tx descriptor. Useful to handle DMA
5642 * mapping failures in the Tx path.
5643 */
5644static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5645{
5646 if (txq->next_desc_to_proc == 0)
5647 txq->next_desc_to_proc = txq->last_desc - 1;
5648 else
5649 txq->next_desc_to_proc--;
5650}
5651
5652/* Set Tx descriptors fields relevant for CSUM calculation */
5653static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5654 int ip_hdr_len, int l4_proto)
5655{
5656 u32 command;
5657
5658 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5659 * G_L4_chk, L4_type required only for checksum calculation
5660 */
5661 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5662 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5663 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5664
5665 if (l3_proto == swab16(ETH_P_IP)) {
5666 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5667 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5668 } else {
5669 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5670 }
5671
5672 if (l4_proto == IPPROTO_TCP) {
5673 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5674 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5675 } else if (l4_proto == IPPROTO_UDP) {
5676 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5677 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5678 } else {
5679 command |= MVPP2_TXD_L4_CSUM_NOT;
5680 }
5681
5682 return command;
5683}
5684
5685/* Get number of sent descriptors and decrement counter.
5686 * The number of sent descriptors is returned.
5687 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005688 *
5689 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5690 * (migration disabled) and from the TX completion tasklet (migration
5691 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005692 */
5693static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5694 struct mvpp2_tx_queue *txq)
5695{
5696 u32 val;
5697
5698 /* Reading status reg resets transmitted descriptor counter */
Yan Markmancdcfeb02018-03-27 16:49:05 +02005699 val = mvpp2_percpu_read_relaxed(port->priv, smp_processor_id(),
5700 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005701
5702 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5703 MVPP2_TRANSMITTED_COUNT_OFFSET;
5704}
5705
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005706/* Called through on_each_cpu(), so runs on all CPUs, with migration
5707 * disabled, therefore using smp_processor_id() is OK.
5708 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005709static void mvpp2_txq_sent_counter_clear(void *arg)
5710{
5711 struct mvpp2_port *port = arg;
5712 int queue;
5713
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005714 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005715 int id = port->txqs[queue]->id;
5716
Thomas Petazzonia7868412017-03-07 16:53:13 +01005717 mvpp2_percpu_read(port->priv, smp_processor_id(),
5718 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005719 }
5720}
5721
5722/* Set max sizes for Tx queues */
5723static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5724{
5725 u32 val, size, mtu;
5726 int txq, tx_port_num;
5727
5728 mtu = port->pkt_size * 8;
5729 if (mtu > MVPP2_TXP_MTU_MAX)
5730 mtu = MVPP2_TXP_MTU_MAX;
5731
5732 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5733 mtu = 3 * mtu;
5734
5735 /* Indirect access to registers */
5736 tx_port_num = mvpp2_egress_port(port);
5737 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5738
5739 /* Set MTU */
5740 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5741 val &= ~MVPP2_TXP_MTU_MAX;
5742 val |= mtu;
5743 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5744
5745 /* TXP token size and all TXQs token size must be larger that MTU */
5746 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5747 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5748 if (size < mtu) {
5749 size = mtu;
5750 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5751 val |= size;
5752 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5753 }
5754
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005755 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005756 val = mvpp2_read(port->priv,
5757 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5758 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5759
5760 if (size < mtu) {
5761 size = mtu;
5762 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5763 val |= size;
5764 mvpp2_write(port->priv,
5765 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5766 val);
5767 }
5768 }
5769}
5770
5771/* Set the number of packets that will be received before Rx interrupt
5772 * will be generated by HW.
5773 */
5774static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005775 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005776{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005777 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005778
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01005779 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5780 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005781
Thomas Petazzonia7868412017-03-07 16:53:13 +01005782 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5783 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5784 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005785
5786 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005787}
5788
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005789/* For some reason in the LSP this is done on each CPU. Why ? */
5790static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5791 struct mvpp2_tx_queue *txq)
5792{
5793 int cpu = get_cpu();
5794 u32 val;
5795
5796 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5797 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5798
5799 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5800 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5801 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5802
5803 put_cpu();
5804}
5805
Thomas Petazzoniab426762017-02-21 11:28:04 +01005806static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5807{
5808 u64 tmp = (u64)clk_hz * usec;
5809
5810 do_div(tmp, USEC_PER_SEC);
5811
5812 return tmp > U32_MAX ? U32_MAX : tmp;
5813}
5814
5815static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5816{
5817 u64 tmp = (u64)cycles * USEC_PER_SEC;
5818
5819 do_div(tmp, clk_hz);
5820
5821 return tmp > U32_MAX ? U32_MAX : tmp;
5822}
5823
Marcin Wojtas3f518502014-07-10 16:52:13 -03005824/* Set the time delay in usec before Rx interrupt */
5825static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005826 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005827{
Thomas Petazzoniab426762017-02-21 11:28:04 +01005828 unsigned long freq = port->priv->tclk;
5829 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005830
Thomas Petazzoniab426762017-02-21 11:28:04 +01005831 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5832 rxq->time_coal =
5833 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5834
5835 /* re-evaluate to get actual register value */
5836 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5837 }
5838
Marcin Wojtas3f518502014-07-10 16:52:13 -03005839 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005840}
5841
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005842static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5843{
5844 unsigned long freq = port->priv->tclk;
5845 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5846
5847 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5848 port->tx_time_coal =
5849 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5850
5851 /* re-evaluate to get actual register value */
5852 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5853 }
5854
5855 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5856}
5857
Marcin Wojtas3f518502014-07-10 16:52:13 -03005858/* Free Tx queue skbuffs */
5859static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5860 struct mvpp2_tx_queue *txq,
5861 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5862{
5863 int i;
5864
5865 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01005866 struct mvpp2_txq_pcpu_buf *tx_buf =
5867 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005868
Antoine Tenart20920262017-10-23 15:24:30 +02005869 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5870 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5871 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01005872 if (tx_buf->skb)
5873 dev_kfree_skb_any(tx_buf->skb);
5874
5875 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005876 }
5877}
5878
5879static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5880 u32 cause)
5881{
5882 int queue = fls(cause) - 1;
5883
5884 return port->rxqs[queue];
5885}
5886
5887static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5888 u32 cause)
5889{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005890 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005891
5892 return port->txqs[queue];
5893}
5894
5895/* Handle end of transmission */
5896static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5897 struct mvpp2_txq_pcpu *txq_pcpu)
5898{
5899 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5900 int tx_done;
5901
5902 if (txq_pcpu->cpu != smp_processor_id())
5903 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5904
5905 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5906 if (!tx_done)
5907 return;
5908 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5909
5910 txq_pcpu->count -= tx_done;
5911
5912 if (netif_tx_queue_stopped(nq))
Antoine Tenart1d17db02017-10-30 11:23:31 +01005913 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005914 netif_tx_wake_queue(nq);
5915}
5916
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005917static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5918 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005919{
5920 struct mvpp2_tx_queue *txq;
5921 struct mvpp2_txq_pcpu *txq_pcpu;
5922 unsigned int tx_todo = 0;
5923
5924 while (cause) {
5925 txq = mvpp2_get_tx_queue(port, cause);
5926 if (!txq)
5927 break;
5928
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005929 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005930
5931 if (txq_pcpu->count) {
5932 mvpp2_txq_done(port, txq, txq_pcpu);
5933 tx_todo += txq_pcpu->count;
5934 }
5935
5936 cause &= ~(1 << txq->log_id);
5937 }
5938 return tx_todo;
5939}
5940
Marcin Wojtas3f518502014-07-10 16:52:13 -03005941/* Rx/Tx queue initialization/cleanup methods */
5942
5943/* Allocate and initialize descriptors for aggr TXQ */
5944static int mvpp2_aggr_txq_init(struct platform_device *pdev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005945 struct mvpp2_tx_queue *aggr_txq, int cpu,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005946 struct mvpp2 *priv)
5947{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005948 u32 txq_dma;
5949
Marcin Wojtas3f518502014-07-10 16:52:13 -03005950 /* Allocate memory for TX descriptors */
Yan Markmana154f8e2017-11-30 10:49:46 +01005951 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005952 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005953 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005954 if (!aggr_txq->descs)
5955 return -ENOMEM;
5956
Antoine Tenart02856a32017-10-30 11:23:32 +01005957 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005958
5959 /* Aggr TXQ no reset WA */
5960 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5961 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5962
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005963 /* Set Tx descriptors queue starting address indirect
5964 * access
5965 */
5966 if (priv->hw_version == MVPP21)
5967 txq_dma = aggr_txq->descs_dma;
5968 else
5969 txq_dma = aggr_txq->descs_dma >>
5970 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5971
5972 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Antoine Ténart85affd72017-08-23 09:46:55 +02005973 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5974 MVPP2_AGGR_TXQ_SIZE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005975
5976 return 0;
5977}
5978
5979/* Create a specified Rx queue */
5980static int mvpp2_rxq_init(struct mvpp2_port *port,
5981 struct mvpp2_rx_queue *rxq)
5982
5983{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005984 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005985 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005986
Marcin Wojtas3f518502014-07-10 16:52:13 -03005987 rxq->size = port->rx_ring_size;
5988
5989 /* Allocate memory for RX descriptors */
5990 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5991 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005992 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005993 if (!rxq->descs)
5994 return -ENOMEM;
5995
Marcin Wojtas3f518502014-07-10 16:52:13 -03005996 rxq->last_desc = rxq->size - 1;
5997
5998 /* Zero occupied and non-occupied counters - direct access */
5999 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6000
6001 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006002 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006003 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01006004 if (port->priv->hw_version == MVPP21)
6005 rxq_dma = rxq->descs_dma;
6006 else
6007 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006008 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
6009 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
6010 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006011 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006012
6013 /* Set Offset */
6014 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
6015
6016 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01006017 mvpp2_rx_pkts_coal_set(port, rxq);
6018 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006019
6020 /* Add number of descriptors ready for receiving packets */
6021 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
6022
6023 return 0;
6024}
6025
6026/* Push packets received by the RXQ to BM pool */
6027static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
6028 struct mvpp2_rx_queue *rxq)
6029{
6030 int rx_received, i;
6031
6032 rx_received = mvpp2_rxq_received(port, rxq->id);
6033 if (!rx_received)
6034 return;
6035
6036 for (i = 0; i < rx_received; i++) {
6037 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006038 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6039 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006040
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006041 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6042 MVPP2_RXD_BM_POOL_ID_OFFS;
6043
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006044 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006045 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
6046 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006047 }
6048 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
6049}
6050
6051/* Cleanup Rx queue */
6052static void mvpp2_rxq_deinit(struct mvpp2_port *port,
6053 struct mvpp2_rx_queue *rxq)
6054{
Thomas Petazzonia7868412017-03-07 16:53:13 +01006055 int cpu;
6056
Marcin Wojtas3f518502014-07-10 16:52:13 -03006057 mvpp2_rxq_drop_pkts(port, rxq);
6058
6059 if (rxq->descs)
6060 dma_free_coherent(port->dev->dev.parent,
6061 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6062 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006063 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006064
6065 rxq->descs = NULL;
6066 rxq->last_desc = 0;
6067 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006068 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006069
6070 /* Clear Rx descriptors queue starting address and size;
6071 * free descriptor number
6072 */
6073 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006074 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006075 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6076 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6077 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006078 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006079}
6080
6081/* Create and initialize a Tx queue */
6082static int mvpp2_txq_init(struct mvpp2_port *port,
6083 struct mvpp2_tx_queue *txq)
6084{
6085 u32 val;
6086 int cpu, desc, desc_per_txq, tx_port_num;
6087 struct mvpp2_txq_pcpu *txq_pcpu;
6088
6089 txq->size = port->tx_ring_size;
6090
6091 /* Allocate memory for Tx descriptors */
6092 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6093 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006094 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006095 if (!txq->descs)
6096 return -ENOMEM;
6097
Marcin Wojtas3f518502014-07-10 16:52:13 -03006098 txq->last_desc = txq->size - 1;
6099
6100 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006101 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006102 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6103 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6104 txq->descs_dma);
6105 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6106 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6107 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6108 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6109 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6110 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006111 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006112 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006113
6114 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
6115 * for each existing TXQ.
6116 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6117 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6118 */
6119 desc_per_txq = 16;
6120 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6121 (txq->log_id * desc_per_txq);
6122
Thomas Petazzonia7868412017-03-07 16:53:13 +01006123 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6124 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6125 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006126 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006127
6128 /* WRR / EJP configuration - indirect access */
6129 tx_port_num = mvpp2_egress_port(port);
6130 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6131
6132 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6133 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6134 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6135 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6136 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6137
6138 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6139 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6140 val);
6141
6142 for_each_present_cpu(cpu) {
6143 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6144 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02006145 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6146 sizeof(*txq_pcpu->buffs),
6147 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006148 if (!txq_pcpu->buffs)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006149 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006150
6151 txq_pcpu->count = 0;
6152 txq_pcpu->reserved_num = 0;
6153 txq_pcpu->txq_put_index = 0;
6154 txq_pcpu->txq_get_index = 0;
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006155 txq_pcpu->tso_headers = NULL;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006156
Antoine Tenart1d17db02017-10-30 11:23:31 +01006157 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6158 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6159
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006160 txq_pcpu->tso_headers =
6161 dma_alloc_coherent(port->dev->dev.parent,
Yan Markman822eaf72017-10-23 15:24:29 +02006162 txq_pcpu->size * TSO_HEADER_SIZE,
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006163 &txq_pcpu->tso_headers_dma,
6164 GFP_KERNEL);
6165 if (!txq_pcpu->tso_headers)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006166 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006167 }
6168
6169 return 0;
6170}
6171
6172/* Free allocated TXQ resources */
6173static void mvpp2_txq_deinit(struct mvpp2_port *port,
6174 struct mvpp2_tx_queue *txq)
6175{
6176 struct mvpp2_txq_pcpu *txq_pcpu;
6177 int cpu;
6178
6179 for_each_present_cpu(cpu) {
6180 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006181 kfree(txq_pcpu->buffs);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006182
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006183 if (txq_pcpu->tso_headers)
6184 dma_free_coherent(port->dev->dev.parent,
6185 txq_pcpu->size * TSO_HEADER_SIZE,
6186 txq_pcpu->tso_headers,
6187 txq_pcpu->tso_headers_dma);
6188
6189 txq_pcpu->tso_headers = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006190 }
6191
6192 if (txq->descs)
6193 dma_free_coherent(port->dev->dev.parent,
6194 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006195 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006196
6197 txq->descs = NULL;
6198 txq->last_desc = 0;
6199 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006200 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006201
6202 /* Set minimum bandwidth for disabled TXQs */
6203 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6204
6205 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006206 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006207 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6208 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6209 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006210 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006211}
6212
6213/* Cleanup Tx ports */
6214static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6215{
6216 struct mvpp2_txq_pcpu *txq_pcpu;
6217 int delay, pending, cpu;
6218 u32 val;
6219
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006220 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006221 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6222 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006223 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006224 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006225
6226 /* The napi queue has been stopped so wait for all packets
6227 * to be transmitted.
6228 */
6229 delay = 0;
6230 do {
6231 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6232 netdev_warn(port->dev,
6233 "port %d: cleaning queue %d timed out\n",
6234 port->id, txq->log_id);
6235 break;
6236 }
6237 mdelay(1);
6238 delay++;
6239
Thomas Petazzonia7868412017-03-07 16:53:13 +01006240 pending = mvpp2_percpu_read(port->priv, cpu,
6241 MVPP2_TXQ_PENDING_REG);
6242 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006243 } while (pending);
6244
6245 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006246 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006247 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006248
6249 for_each_present_cpu(cpu) {
6250 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6251
6252 /* Release all packets */
6253 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6254
6255 /* Reset queue */
6256 txq_pcpu->count = 0;
6257 txq_pcpu->txq_put_index = 0;
6258 txq_pcpu->txq_get_index = 0;
6259 }
6260}
6261
6262/* Cleanup all Tx queues */
6263static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6264{
6265 struct mvpp2_tx_queue *txq;
6266 int queue;
6267 u32 val;
6268
6269 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6270
6271 /* Reset Tx ports and delete Tx queues */
6272 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6273 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6274
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006275 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006276 txq = port->txqs[queue];
6277 mvpp2_txq_clean(port, txq);
6278 mvpp2_txq_deinit(port, txq);
6279 }
6280
6281 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6282
6283 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6284 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6285}
6286
6287/* Cleanup all Rx queues */
6288static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6289{
6290 int queue;
6291
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006292 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006293 mvpp2_rxq_deinit(port, port->rxqs[queue]);
6294}
6295
6296/* Init all Rx queues for port */
6297static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6298{
6299 int queue, err;
6300
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006301 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006302 err = mvpp2_rxq_init(port, port->rxqs[queue]);
6303 if (err)
6304 goto err_cleanup;
6305 }
6306 return 0;
6307
6308err_cleanup:
6309 mvpp2_cleanup_rxqs(port);
6310 return err;
6311}
6312
6313/* Init all tx queues for port */
6314static int mvpp2_setup_txqs(struct mvpp2_port *port)
6315{
6316 struct mvpp2_tx_queue *txq;
6317 int queue, err;
6318
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006319 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006320 txq = port->txqs[queue];
6321 err = mvpp2_txq_init(port, txq);
6322 if (err)
6323 goto err_cleanup;
6324 }
6325
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006326 if (port->has_tx_irqs) {
6327 mvpp2_tx_time_coal_set(port);
6328 for (queue = 0; queue < port->ntxqs; queue++) {
6329 txq = port->txqs[queue];
6330 mvpp2_tx_pkts_coal_set(port, txq);
6331 }
6332 }
6333
Marcin Wojtas3f518502014-07-10 16:52:13 -03006334 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6335 return 0;
6336
6337err_cleanup:
6338 mvpp2_cleanup_txqs(port);
6339 return err;
6340}
6341
6342/* The callback for per-port interrupt */
6343static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6344{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006345 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006346
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006347 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006348
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006349 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006350
6351 return IRQ_HANDLED;
6352}
6353
Antoine Tenartfd3651b2017-09-01 11:04:54 +02006354/* Per-port interrupt for link status changes */
6355static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6356{
6357 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6358 struct net_device *dev = port->dev;
6359 bool event = false, link = false;
6360 u32 val;
6361
6362 mvpp22_gop_mask_irq(port);
6363
6364 if (port->gop_id == 0 &&
6365 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6366 val = readl(port->base + MVPP22_XLG_INT_STAT);
6367 if (val & MVPP22_XLG_INT_STAT_LINK) {
6368 event = true;
6369 val = readl(port->base + MVPP22_XLG_STATUS);
6370 if (val & MVPP22_XLG_STATUS_LINK_UP)
6371 link = true;
6372 }
6373 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6374 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6375 val = readl(port->base + MVPP22_GMAC_INT_STAT);
6376 if (val & MVPP22_GMAC_INT_STAT_LINK) {
6377 event = true;
6378 val = readl(port->base + MVPP2_GMAC_STATUS0);
6379 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6380 link = true;
6381 }
6382 }
6383
6384 if (!netif_running(dev) || !event)
6385 goto handled;
6386
6387 if (link) {
6388 mvpp2_interrupts_enable(port);
6389
6390 mvpp2_egress_enable(port);
6391 mvpp2_ingress_enable(port);
6392 netif_carrier_on(dev);
6393 netif_tx_wake_all_queues(dev);
6394 } else {
6395 netif_tx_stop_all_queues(dev);
6396 netif_carrier_off(dev);
6397 mvpp2_ingress_disable(port);
6398 mvpp2_egress_disable(port);
6399
6400 mvpp2_interrupts_disable(port);
6401 }
6402
6403handled:
6404 mvpp22_gop_unmask_irq(port);
6405 return IRQ_HANDLED;
6406}
6407
Antoine Tenart65a2c092017-08-30 10:29:18 +02006408static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6409 struct phy_device *phydev)
6410{
6411 u32 val;
6412
6413 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6414 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6415 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6416 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6417 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6418 return;
6419
6420 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6421 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6422 MVPP2_GMAC_CONFIG_GMII_SPEED |
6423 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6424 MVPP2_GMAC_AN_SPEED_EN |
6425 MVPP2_GMAC_AN_DUPLEX_EN);
6426
6427 if (phydev->duplex)
6428 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6429
6430 if (phydev->speed == SPEED_1000)
6431 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6432 else if (phydev->speed == SPEED_100)
6433 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6434
6435 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Antoine Tenart65a2c092017-08-30 10:29:18 +02006436}
6437
Marcin Wojtas3f518502014-07-10 16:52:13 -03006438/* Adjust link */
6439static void mvpp2_link_event(struct net_device *dev)
6440{
6441 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02006442 struct phy_device *phydev = dev->phydev;
Antoine Tenart89273bc2017-08-30 10:29:19 +02006443 bool link_reconfigured = false;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006444 u32 val;
6445
6446 if (phydev->link) {
Antoine Tenart89273bc2017-08-30 10:29:19 +02006447 if (port->phy_interface != phydev->interface && port->comphy) {
6448 /* disable current port for reconfiguration */
6449 mvpp2_interrupts_disable(port);
6450 netif_carrier_off(port->dev);
6451 mvpp2_port_disable(port);
6452 phy_power_off(port->comphy);
6453
6454 /* comphy reconfiguration */
6455 port->phy_interface = phydev->interface;
6456 mvpp22_comphy_init(port);
6457
6458 /* gop/mac reconfiguration */
6459 mvpp22_gop_init(port);
6460 mvpp2_port_mii_set(port);
6461
6462 link_reconfigured = true;
6463 }
6464
Marcin Wojtas3f518502014-07-10 16:52:13 -03006465 if ((port->speed != phydev->speed) ||
6466 (port->duplex != phydev->duplex)) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006467 mvpp2_gmac_set_autoneg(port, phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006468
6469 port->duplex = phydev->duplex;
6470 port->speed = phydev->speed;
6471 }
6472 }
6473
Antoine Tenart89273bc2017-08-30 10:29:19 +02006474 if (phydev->link != port->link || link_reconfigured) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006475 port->link = phydev->link;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006476
Marcin Wojtas3f518502014-07-10 16:52:13 -03006477 if (phydev->link) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006478 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6479 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6480 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6481 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6482 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6483 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6484 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6485 MVPP2_GMAC_FORCE_LINK_DOWN);
6486 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6487 }
Antoine Tenartf55744a2017-08-30 10:29:17 +02006488
6489 mvpp2_interrupts_enable(port);
6490 mvpp2_port_enable(port);
6491
Marcin Wojtas3f518502014-07-10 16:52:13 -03006492 mvpp2_egress_enable(port);
6493 mvpp2_ingress_enable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006494 netif_carrier_on(dev);
6495 netif_tx_wake_all_queues(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006496 } else {
Antoine Tenart968b2112017-08-30 10:29:16 +02006497 port->duplex = -1;
6498 port->speed = 0;
6499
Antoine Tenartf55744a2017-08-30 10:29:17 +02006500 netif_tx_stop_all_queues(dev);
6501 netif_carrier_off(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006502 mvpp2_ingress_disable(port);
6503 mvpp2_egress_disable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006504
6505 mvpp2_port_disable(port);
6506 mvpp2_interrupts_disable(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006507 }
Antoine Tenart968b2112017-08-30 10:29:16 +02006508
Marcin Wojtas3f518502014-07-10 16:52:13 -03006509 phy_print_status(phydev);
6510 }
6511}
6512
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006513static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6514{
6515 ktime_t interval;
6516
6517 if (!port_pcpu->timer_scheduled) {
6518 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01006519 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006520 hrtimer_start(&port_pcpu->tx_done_timer, interval,
6521 HRTIMER_MODE_REL_PINNED);
6522 }
6523}
6524
6525static void mvpp2_tx_proc_cb(unsigned long data)
6526{
6527 struct net_device *dev = (struct net_device *)data;
6528 struct mvpp2_port *port = netdev_priv(dev);
6529 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6530 unsigned int tx_todo, cause;
6531
6532 if (!netif_running(dev))
6533 return;
6534 port_pcpu->timer_scheduled = false;
6535
6536 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006537 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006538 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006539
6540 /* Set the timer in case not all the packets were processed */
6541 if (tx_todo)
6542 mvpp2_timer_set(port_pcpu);
6543}
6544
6545static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6546{
6547 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6548 struct mvpp2_port_pcpu,
6549 tx_done_timer);
6550
6551 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6552
6553 return HRTIMER_NORESTART;
6554}
6555
Marcin Wojtas3f518502014-07-10 16:52:13 -03006556/* Main RX/TX processing routines */
6557
6558/* Display more error info */
6559static void mvpp2_rx_error(struct mvpp2_port *port,
6560 struct mvpp2_rx_desc *rx_desc)
6561{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006562 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6563 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006564
6565 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6566 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006567 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6568 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006569 break;
6570 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006571 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6572 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006573 break;
6574 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006575 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6576 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006577 break;
6578 }
6579}
6580
6581/* Handle RX checksum offload */
6582static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6583 struct sk_buff *skb)
6584{
6585 if (((status & MVPP2_RXD_L3_IP4) &&
6586 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6587 (status & MVPP2_RXD_L3_IP6))
6588 if (((status & MVPP2_RXD_L4_UDP) ||
6589 (status & MVPP2_RXD_L4_TCP)) &&
6590 (status & MVPP2_RXD_L4_CSUM_OK)) {
6591 skb->csum = 0;
6592 skb->ip_summed = CHECKSUM_UNNECESSARY;
6593 return;
6594 }
6595
6596 skb->ip_summed = CHECKSUM_NONE;
6597}
6598
6599/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6600static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006601 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006602{
Thomas Petazzoni20396132017-03-07 16:53:00 +01006603 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006604 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006605 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006606
Marcin Wojtas3f518502014-07-10 16:52:13 -03006607 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006608 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6609 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006610 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006611 return -ENOMEM;
6612
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006613 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01006614
Marcin Wojtas3f518502014-07-10 16:52:13 -03006615 return 0;
6616}
6617
6618/* Handle tx checksum */
6619static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6620{
6621 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6622 int ip_hdr_len = 0;
6623 u8 l4_proto;
6624
6625 if (skb->protocol == htons(ETH_P_IP)) {
6626 struct iphdr *ip4h = ip_hdr(skb);
6627
6628 /* Calculate IPv4 checksum and L4 checksum */
6629 ip_hdr_len = ip4h->ihl;
6630 l4_proto = ip4h->protocol;
6631 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6632 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6633
6634 /* Read l4_protocol from one of IPv6 extra headers */
6635 if (skb_network_header_len(skb) > 0)
6636 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6637 l4_proto = ip6h->nexthdr;
6638 } else {
6639 return MVPP2_TXD_L4_CSUM_NOT;
6640 }
6641
6642 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6643 skb->protocol, ip_hdr_len, l4_proto);
6644 }
6645
6646 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6647}
6648
Marcin Wojtas3f518502014-07-10 16:52:13 -03006649/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006650static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6651 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006652{
6653 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01006654 int rx_received;
6655 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006656 u32 rcvd_pkts = 0;
6657 u32 rcvd_bytes = 0;
6658
6659 /* Get number of received packets and clamp the to-do */
6660 rx_received = mvpp2_rxq_received(port, rxq->id);
6661 if (rx_todo > rx_received)
6662 rx_todo = rx_received;
6663
Marcin Wojtasb5015852015-12-03 15:20:51 +01006664 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006665 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6666 struct mvpp2_bm_pool *bm_pool;
6667 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006668 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006669 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006670 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006671 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006672 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006673 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006674
Marcin Wojtasb5015852015-12-03 15:20:51 +01006675 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006676 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6677 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6678 rx_bytes -= MVPP2_MH_SIZE;
6679 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6680 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6681 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006682
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006683 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6684 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006685 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03006686
6687 /* In case of an error, release the requested buffer pointer
6688 * to the Buffer Manager. This request process is controlled
6689 * by the hardware, and the information about the buffer is
6690 * comprised by the RX descriptor.
6691 */
6692 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02006693err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006694 dev->stats.rx_errors++;
6695 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006696 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006697 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006698 continue;
6699 }
6700
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006701 if (bm_pool->frag_size > PAGE_SIZE)
6702 frag_size = 0;
6703 else
6704 frag_size = bm_pool->frag_size;
6705
6706 skb = build_skb(data, frag_size);
6707 if (!skb) {
6708 netdev_warn(port->dev, "skb build failed\n");
6709 goto err_drop_frame;
6710 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006711
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006712 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006713 if (err) {
6714 netdev_err(port->dev, "failed to refill BM pools\n");
6715 goto err_drop_frame;
6716 }
6717
Thomas Petazzoni20396132017-03-07 16:53:00 +01006718 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01006719 bm_pool->buf_size, DMA_FROM_DEVICE);
6720
Marcin Wojtas3f518502014-07-10 16:52:13 -03006721 rcvd_pkts++;
6722 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006723
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006724 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006725 skb_put(skb, rx_bytes);
6726 skb->protocol = eth_type_trans(skb, dev);
6727 mvpp2_rx_csum(port, rx_status, skb);
6728
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006729 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006730 }
6731
6732 if (rcvd_pkts) {
6733 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6734
6735 u64_stats_update_begin(&stats->syncp);
6736 stats->rx_packets += rcvd_pkts;
6737 stats->rx_bytes += rcvd_bytes;
6738 u64_stats_update_end(&stats->syncp);
6739 }
6740
6741 /* Update Rx queue management counters */
6742 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01006743 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006744
6745 return rx_todo;
6746}
6747
6748static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006749tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006750 struct mvpp2_tx_desc *desc)
6751{
Antoine Tenart20920262017-10-23 15:24:30 +02006752 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6753
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006754 dma_addr_t buf_dma_addr =
6755 mvpp2_txdesc_dma_addr_get(port, desc);
6756 size_t buf_sz =
6757 mvpp2_txdesc_size_get(port, desc);
Antoine Tenart20920262017-10-23 15:24:30 +02006758 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6759 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6760 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006761 mvpp2_txq_desc_put(txq);
6762}
6763
6764/* Handle tx fragmentation processing */
6765static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6766 struct mvpp2_tx_queue *aggr_txq,
6767 struct mvpp2_tx_queue *txq)
6768{
6769 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6770 struct mvpp2_tx_desc *tx_desc;
6771 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006772 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006773
6774 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6775 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6776 void *addr = page_address(frag->page.p) + frag->page_offset;
6777
6778 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006779 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6780 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006781
Thomas Petazzoni20396132017-03-07 16:53:00 +01006782 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006783 frag->size,
6784 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006785 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006786 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02006787 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006788 }
6789
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006790 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006791
6792 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6793 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006794 mvpp2_txdesc_cmd_set(port, tx_desc,
6795 MVPP2_TXD_L_DESC);
6796 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006797 } else {
6798 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006799 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6800 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006801 }
6802 }
6803
6804 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02006805cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006806 /* Release all descriptors that were used to map fragments of
6807 * this packet, as well as the corresponding DMA mappings
6808 */
6809 for (i = i - 1; i >= 0; i--) {
6810 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006811 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006812 }
6813
6814 return -ENOMEM;
6815}
6816
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006817static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6818 struct net_device *dev,
6819 struct mvpp2_tx_queue *txq,
6820 struct mvpp2_tx_queue *aggr_txq,
6821 struct mvpp2_txq_pcpu *txq_pcpu,
6822 int hdr_sz)
6823{
6824 struct mvpp2_port *port = netdev_priv(dev);
6825 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6826 dma_addr_t addr;
6827
6828 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6829 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6830
6831 addr = txq_pcpu->tso_headers_dma +
6832 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006833 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006834
6835 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6836 MVPP2_TXD_F_DESC |
6837 MVPP2_TXD_PADDING_DISABLE);
6838 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6839}
6840
6841static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6842 struct net_device *dev, struct tso_t *tso,
6843 struct mvpp2_tx_queue *txq,
6844 struct mvpp2_tx_queue *aggr_txq,
6845 struct mvpp2_txq_pcpu *txq_pcpu,
6846 int sz, bool left, bool last)
6847{
6848 struct mvpp2_port *port = netdev_priv(dev);
6849 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6850 dma_addr_t buf_dma_addr;
6851
6852 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6853 mvpp2_txdesc_size_set(port, tx_desc, sz);
6854
6855 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6856 DMA_TO_DEVICE);
6857 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6858 mvpp2_txq_desc_put(txq);
6859 return -ENOMEM;
6860 }
6861
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006862 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006863
6864 if (!left) {
6865 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6866 if (last) {
6867 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6868 return 0;
6869 }
6870 } else {
6871 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6872 }
6873
6874 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6875 return 0;
6876}
6877
6878static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6879 struct mvpp2_tx_queue *txq,
6880 struct mvpp2_tx_queue *aggr_txq,
6881 struct mvpp2_txq_pcpu *txq_pcpu)
6882{
6883 struct mvpp2_port *port = netdev_priv(dev);
6884 struct tso_t tso;
6885 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6886 int i, len, descs = 0;
6887
6888 /* Check number of available descriptors */
6889 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6890 tso_count_descs(skb)) ||
6891 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6892 tso_count_descs(skb)))
6893 return 0;
6894
6895 tso_start(skb, &tso);
6896 len = skb->len - hdr_sz;
6897 while (len > 0) {
6898 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6899 char *hdr = txq_pcpu->tso_headers +
6900 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6901
6902 len -= left;
6903 descs++;
6904
6905 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6906 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6907
6908 while (left > 0) {
6909 int sz = min_t(int, tso.size, left);
6910 left -= sz;
6911 descs++;
6912
6913 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6914 txq_pcpu, sz, left, len == 0))
6915 goto release;
6916 tso_build_data(skb, &tso, sz);
6917 }
6918 }
6919
6920 return descs;
6921
6922release:
6923 for (i = descs - 1; i >= 0; i--) {
6924 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6925 tx_desc_unmap_put(port, txq, tx_desc);
6926 }
6927 return 0;
6928}
6929
Marcin Wojtas3f518502014-07-10 16:52:13 -03006930/* Main tx processing */
6931static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6932{
6933 struct mvpp2_port *port = netdev_priv(dev);
6934 struct mvpp2_tx_queue *txq, *aggr_txq;
6935 struct mvpp2_txq_pcpu *txq_pcpu;
6936 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006937 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006938 int frags = 0;
6939 u16 txq_id;
6940 u32 tx_cmd;
6941
6942 txq_id = skb_get_queue_mapping(skb);
6943 txq = port->txqs[txq_id];
6944 txq_pcpu = this_cpu_ptr(txq->pcpu);
6945 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6946
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006947 if (skb_is_gso(skb)) {
6948 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6949 goto out;
6950 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006951 frags = skb_shinfo(skb)->nr_frags + 1;
6952
6953 /* Check number of available descriptors */
6954 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6955 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6956 txq_pcpu, frags)) {
6957 frags = 0;
6958 goto out;
6959 }
6960
6961 /* Get a descriptor for the first part of the packet */
6962 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006963 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6964 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006965
Thomas Petazzoni20396132017-03-07 16:53:00 +01006966 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006967 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006968 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006969 mvpp2_txq_desc_put(txq);
6970 frags = 0;
6971 goto out;
6972 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006973
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006974 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006975
6976 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6977
6978 if (frags == 1) {
6979 /* First and Last descriptor */
6980 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006981 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6982 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006983 } else {
6984 /* First but not Last */
6985 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006986 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6987 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006988
6989 /* Continue with other skb fragments */
6990 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006991 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006992 frags = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006993 }
6994 }
6995
Marcin Wojtas3f518502014-07-10 16:52:13 -03006996out:
6997 if (frags > 0) {
6998 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006999 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
7000
7001 txq_pcpu->reserved_num -= frags;
7002 txq_pcpu->count += frags;
7003 aggr_txq->count += frags;
7004
7005 /* Enable transmit */
7006 wmb();
7007 mvpp2_aggr_txq_pend_desc_add(port, frags);
7008
Antoine Tenart1d17db02017-10-30 11:23:31 +01007009 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
Antoine Ténart186cd4d2017-08-23 09:46:56 +02007010 netif_tx_stop_queue(nq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007011
7012 u64_stats_update_begin(&stats->syncp);
7013 stats->tx_packets++;
7014 stats->tx_bytes += skb->len;
7015 u64_stats_update_end(&stats->syncp);
7016 } else {
7017 dev->stats.tx_dropped++;
7018 dev_kfree_skb_any(skb);
7019 }
7020
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007021 /* Finalize TX processing */
Antoine Tenart082297e2017-10-23 15:24:31 +02007022 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007023 mvpp2_txq_done(port, txq, txq_pcpu);
7024
7025 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007026 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
7027 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007028 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
7029
7030 mvpp2_timer_set(port_pcpu);
7031 }
7032
Marcin Wojtas3f518502014-07-10 16:52:13 -03007033 return NETDEV_TX_OK;
7034}
7035
7036static inline void mvpp2_cause_error(struct net_device *dev, int cause)
7037{
7038 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
7039 netdev_err(dev, "FCS error\n");
7040 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
7041 netdev_err(dev, "rx fifo overrun error\n");
7042 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
7043 netdev_err(dev, "tx fifo underrun error\n");
7044}
7045
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007046static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007047{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007048 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007049 int rx_done = 0;
7050 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007051 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007052 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03007053
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007054 qv = container_of(napi, struct mvpp2_queue_vector, napi);
7055
Marcin Wojtas3f518502014-07-10 16:52:13 -03007056 /* Rx/Tx cause register
7057 *
7058 * Bits 0-15: each bit indicates received packets on the Rx queue
7059 * (bit 0 is for Rx queue 0).
7060 *
7061 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7062 * (bit 16 is for Tx queue 0).
7063 *
7064 * Each CPU has its own Rx/Tx cause register
7065 */
Yan Markmancdcfeb02018-03-27 16:49:05 +02007066 cause_rx_tx = mvpp2_percpu_read_relaxed(port->priv, qv->sw_thread_id,
7067 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03007068
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007069 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007070 if (cause_misc) {
7071 mvpp2_cause_error(port->dev, cause_misc);
7072
7073 /* Clear the cause register */
7074 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007075 mvpp2_percpu_write(port->priv, cpu,
7076 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7077 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007078 }
7079
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007080 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7081 if (cause_tx) {
7082 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7083 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7084 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007085
7086 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007087 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7088 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007089 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007090 while (cause_rx && budget > 0) {
7091 int count;
7092 struct mvpp2_rx_queue *rxq;
7093
7094 rxq = mvpp2_get_rx_queue(port, cause_rx);
7095 if (!rxq)
7096 break;
7097
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007098 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007099 rx_done += count;
7100 budget -= count;
7101 if (budget > 0) {
7102 /* Clear the bit associated to this Rx queue
7103 * so that next iteration will continue from
7104 * the next Rx queue.
7105 */
7106 cause_rx &= ~(1 << rxq->logic_rxq);
7107 }
7108 }
7109
7110 if (budget > 0) {
7111 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08007112 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007113
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007114 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007115 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007116 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007117 return rx_done;
7118}
7119
7120/* Set hw internals when starting port */
7121static void mvpp2_start_dev(struct mvpp2_port *port)
7122{
Philippe Reynes8e072692016-06-28 00:08:11 +02007123 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007124 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007125
Stefan Chulski76eb1b12017-08-22 19:08:26 +02007126 if (port->gop_id == 0 &&
7127 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7128 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7129 mvpp2_xlg_max_rx_size_set(port);
7130 else
7131 mvpp2_gmac_max_rx_size_set(port);
7132
Marcin Wojtas3f518502014-07-10 16:52:13 -03007133 mvpp2_txp_max_tx_size_set(port);
7134
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007135 for (i = 0; i < port->nqvecs; i++)
7136 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007137
7138 /* Enable interrupts on all CPUs */
7139 mvpp2_interrupts_enable(port);
7140
Antoine Tenart542897d2017-08-30 10:29:15 +02007141 if (port->priv->hw_version == MVPP22) {
7142 mvpp22_comphy_init(port);
Antoine Ténartf84bf382017-08-22 19:08:27 +02007143 mvpp22_gop_init(port);
Antoine Tenart542897d2017-08-30 10:29:15 +02007144 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02007145
Antoine Ténart2055d622017-08-22 19:08:23 +02007146 mvpp2_port_mii_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007147 mvpp2_port_enable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007148 if (ndev->phydev)
7149 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007150 netif_tx_start_all_queues(port->dev);
7151}
7152
7153/* Set hw internals when stopping port */
7154static void mvpp2_stop_dev(struct mvpp2_port *port)
7155{
Philippe Reynes8e072692016-06-28 00:08:11 +02007156 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007157 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007158
Marcin Wojtas3f518502014-07-10 16:52:13 -03007159 /* Stop new packets from arriving to RXQs */
7160 mvpp2_ingress_disable(port);
7161
7162 mdelay(10);
7163
7164 /* Disable interrupts on all CPUs */
7165 mvpp2_interrupts_disable(port);
7166
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007167 for (i = 0; i < port->nqvecs; i++)
7168 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007169
7170 netif_carrier_off(port->dev);
7171 netif_tx_stop_all_queues(port->dev);
7172
7173 mvpp2_egress_disable(port);
7174 mvpp2_port_disable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007175 if (ndev->phydev)
7176 phy_stop(ndev->phydev);
Antoine Tenart542897d2017-08-30 10:29:15 +02007177 phy_power_off(port->comphy);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007178}
7179
Marcin Wojtas3f518502014-07-10 16:52:13 -03007180static int mvpp2_check_ringparam_valid(struct net_device *dev,
7181 struct ethtool_ringparam *ring)
7182{
7183 u16 new_rx_pending = ring->rx_pending;
7184 u16 new_tx_pending = ring->tx_pending;
7185
7186 if (ring->rx_pending == 0 || ring->tx_pending == 0)
7187 return -EINVAL;
7188
Yan Markman7cf87e42017-12-11 09:13:26 +01007189 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7190 new_rx_pending = MVPP2_MAX_RXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007191 else if (!IS_ALIGNED(ring->rx_pending, 16))
7192 new_rx_pending = ALIGN(ring->rx_pending, 16);
7193
Yan Markman7cf87e42017-12-11 09:13:26 +01007194 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7195 new_tx_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007196 else if (!IS_ALIGNED(ring->tx_pending, 32))
7197 new_tx_pending = ALIGN(ring->tx_pending, 32);
7198
Antoine Tenart76e583c2017-11-28 14:19:51 +01007199 /* The Tx ring size cannot be smaller than the minimum number of
7200 * descriptors needed for TSO.
7201 */
7202 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7203 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7204
Marcin Wojtas3f518502014-07-10 16:52:13 -03007205 if (ring->rx_pending != new_rx_pending) {
7206 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7207 ring->rx_pending, new_rx_pending);
7208 ring->rx_pending = new_rx_pending;
7209 }
7210
7211 if (ring->tx_pending != new_tx_pending) {
7212 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7213 ring->tx_pending, new_tx_pending);
7214 ring->tx_pending = new_tx_pending;
7215 }
7216
7217 return 0;
7218}
7219
Thomas Petazzoni26975822017-03-07 16:53:14 +01007220static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007221{
7222 u32 mac_addr_l, mac_addr_m, mac_addr_h;
7223
7224 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7225 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7226 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7227 addr[0] = (mac_addr_h >> 24) & 0xFF;
7228 addr[1] = (mac_addr_h >> 16) & 0xFF;
7229 addr[2] = (mac_addr_h >> 8) & 0xFF;
7230 addr[3] = mac_addr_h & 0xFF;
7231 addr[4] = mac_addr_m & 0xFF;
7232 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7233}
7234
7235static int mvpp2_phy_connect(struct mvpp2_port *port)
7236{
7237 struct phy_device *phy_dev;
7238
Antoine Tenart5997c862017-09-01 11:04:53 +02007239 /* No PHY is attached */
7240 if (!port->phy_node)
7241 return 0;
7242
Marcin Wojtas3f518502014-07-10 16:52:13 -03007243 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7244 port->phy_interface);
7245 if (!phy_dev) {
7246 netdev_err(port->dev, "cannot connect to phy\n");
7247 return -ENODEV;
7248 }
7249 phy_dev->supported &= PHY_GBIT_FEATURES;
7250 phy_dev->advertising = phy_dev->supported;
7251
Marcin Wojtas3f518502014-07-10 16:52:13 -03007252 port->link = 0;
7253 port->duplex = 0;
7254 port->speed = 0;
7255
7256 return 0;
7257}
7258
7259static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7260{
Philippe Reynes8e072692016-06-28 00:08:11 +02007261 struct net_device *ndev = port->dev;
7262
Antoine Tenart5997c862017-09-01 11:04:53 +02007263 if (!ndev->phydev)
7264 return;
7265
Philippe Reynes8e072692016-06-28 00:08:11 +02007266 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007267}
7268
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007269static int mvpp2_irqs_init(struct mvpp2_port *port)
7270{
7271 int err, i;
7272
7273 for (i = 0; i < port->nqvecs; i++) {
7274 struct mvpp2_queue_vector *qv = port->qvecs + i;
7275
Marc Zyngier13c249a2017-11-04 12:33:47 +00007276 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7277 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7278
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007279 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7280 if (err)
7281 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007282
7283 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7284 irq_set_affinity_hint(qv->irq,
7285 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007286 }
7287
7288 return 0;
7289err:
7290 for (i = 0; i < port->nqvecs; i++) {
7291 struct mvpp2_queue_vector *qv = port->qvecs + i;
7292
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007293 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007294 free_irq(qv->irq, qv);
7295 }
7296
7297 return err;
7298}
7299
7300static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7301{
7302 int i;
7303
7304 for (i = 0; i < port->nqvecs; i++) {
7305 struct mvpp2_queue_vector *qv = port->qvecs + i;
7306
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007307 irq_set_affinity_hint(qv->irq, NULL);
Marc Zyngier13c249a2017-11-04 12:33:47 +00007308 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007309 free_irq(qv->irq, qv);
7310 }
7311}
7312
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007313static void mvpp22_init_rss(struct mvpp2_port *port)
7314{
7315 struct mvpp2 *priv = port->priv;
7316 int i;
7317
7318 /* Set the table width: replace the whole classifier Rx queue number
7319 * with the ones configured in RSS table entries.
7320 */
7321 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7322 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7323
7324 /* Loop through the classifier Rx Queues and map them to a RSS table.
7325 * Map them all to the first table (0) by default.
7326 */
7327 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7328 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7329 mvpp2_write(priv, MVPP22_RSS_TABLE,
7330 MVPP22_RSS_TABLE_POINTER(0));
7331 }
7332
7333 /* Configure the first table to evenly distribute the packets across
7334 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7335 */
7336 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7337 u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7338 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7339 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7340
7341 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7342 }
7343
7344}
7345
Marcin Wojtas3f518502014-07-10 16:52:13 -03007346static int mvpp2_open(struct net_device *dev)
7347{
7348 struct mvpp2_port *port = netdev_priv(dev);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007349 struct mvpp2 *priv = port->priv;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007350 unsigned char mac_bcast[ETH_ALEN] = {
7351 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7352 int err;
7353
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01007354 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007355 if (err) {
7356 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7357 return err;
7358 }
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01007359 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007360 if (err) {
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01007361 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007362 return err;
7363 }
7364 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7365 if (err) {
7366 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7367 return err;
7368 }
7369 err = mvpp2_prs_def_flow(port);
7370 if (err) {
7371 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7372 return err;
7373 }
7374
7375 /* Allocate the Rx/Tx queues */
7376 err = mvpp2_setup_rxqs(port);
7377 if (err) {
7378 netdev_err(port->dev, "cannot allocate Rx queues\n");
7379 return err;
7380 }
7381
7382 err = mvpp2_setup_txqs(port);
7383 if (err) {
7384 netdev_err(port->dev, "cannot allocate Tx queues\n");
7385 goto err_cleanup_rxqs;
7386 }
7387
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007388 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007389 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007390 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007391 goto err_cleanup_txqs;
7392 }
7393
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007394 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7395 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7396 dev->name, port);
7397 if (err) {
7398 netdev_err(port->dev, "cannot request link IRQ %d\n",
7399 port->link_irq);
7400 goto err_free_irq;
7401 }
7402
7403 mvpp22_gop_setup_irq(port);
7404 }
7405
Marcin Wojtas3f518502014-07-10 16:52:13 -03007406 /* In default link is down */
7407 netif_carrier_off(port->dev);
7408
7409 err = mvpp2_phy_connect(port);
7410 if (err < 0)
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007411 goto err_free_link_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007412
7413 /* Unmask interrupts on all CPUs */
7414 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007415 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007416
7417 mvpp2_start_dev(port);
7418
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007419 if (priv->hw_version == MVPP22)
7420 mvpp22_init_rss(port);
7421
Miquel Raynal118d6292017-11-06 22:56:53 +01007422 /* Start hardware statistics gathering */
Miquel Raynale5c500e2017-11-08 08:59:40 +01007423 queue_delayed_work(priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01007424 MVPP2_MIB_COUNTERS_STATS_DELAY);
7425
Marcin Wojtas3f518502014-07-10 16:52:13 -03007426 return 0;
7427
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007428err_free_link_irq:
7429 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7430 free_irq(port->link_irq, port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007431err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007432 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007433err_cleanup_txqs:
7434 mvpp2_cleanup_txqs(port);
7435err_cleanup_rxqs:
7436 mvpp2_cleanup_rxqs(port);
7437 return err;
7438}
7439
7440static int mvpp2_stop(struct net_device *dev)
7441{
7442 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007443 struct mvpp2_port_pcpu *port_pcpu;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007444 struct mvpp2 *priv = port->priv;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007445 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007446
7447 mvpp2_stop_dev(port);
7448 mvpp2_phy_disconnect(port);
7449
7450 /* Mask interrupts on all CPUs */
7451 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007452 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007453
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007454 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7455 free_irq(port->link_irq, port);
7456
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007457 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007458 if (!port->has_tx_irqs) {
7459 for_each_present_cpu(cpu) {
7460 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007461
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007462 hrtimer_cancel(&port_pcpu->tx_done_timer);
7463 port_pcpu->timer_scheduled = false;
7464 tasklet_kill(&port_pcpu->tx_done_tasklet);
7465 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007466 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007467 mvpp2_cleanup_rxqs(port);
7468 mvpp2_cleanup_txqs(port);
7469
Miquel Raynale5c500e2017-11-08 08:59:40 +01007470 cancel_delayed_work_sync(&port->stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01007471
Marcin Wojtas3f518502014-07-10 16:52:13 -03007472 return 0;
7473}
7474
Maxime Chevallier10fea262018-03-07 15:18:04 +01007475static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
7476 struct netdev_hw_addr_list *list)
7477{
7478 struct netdev_hw_addr *ha;
7479 int ret;
7480
7481 netdev_hw_addr_list_for_each(ha, list) {
7482 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
7483 if (ret)
7484 return ret;
7485 }
7486
7487 return 0;
7488}
7489
7490static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
7491{
7492 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
7493 mvpp2_prs_vid_enable_filtering(port);
7494 else
7495 mvpp2_prs_vid_disable_filtering(port);
7496
7497 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7498 MVPP2_PRS_L2_UNI_CAST, enable);
7499
7500 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7501 MVPP2_PRS_L2_MULTI_CAST, enable);
7502}
7503
Marcin Wojtas3f518502014-07-10 16:52:13 -03007504static void mvpp2_set_rx_mode(struct net_device *dev)
7505{
7506 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007507
Maxime Chevallier10fea262018-03-07 15:18:04 +01007508 /* Clear the whole UC and MC list */
7509 mvpp2_prs_mac_del_all(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007510
Maxime Chevallier10fea262018-03-07 15:18:04 +01007511 if (dev->flags & IFF_PROMISC) {
7512 mvpp2_set_rx_promisc(port, true);
7513 return;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007514 }
Maxime Chevallier56beda32018-02-28 10:14:13 +01007515
Maxime Chevallier10fea262018-03-07 15:18:04 +01007516 mvpp2_set_rx_promisc(port, false);
7517
7518 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
7519 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
7520 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7521 MVPP2_PRS_L2_UNI_CAST, true);
7522
7523 if (dev->flags & IFF_ALLMULTI) {
7524 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7525 MVPP2_PRS_L2_MULTI_CAST, true);
7526 return;
7527 }
7528
7529 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
7530 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
7531 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7532 MVPP2_PRS_L2_MULTI_CAST, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007533}
7534
7535static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7536{
7537 struct mvpp2_port *port = netdev_priv(dev);
7538 const struct sockaddr *addr = p;
7539 int err;
7540
7541 if (!is_valid_ether_addr(addr->sa_data)) {
7542 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02007543 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007544 }
7545
7546 if (!netif_running(dev)) {
7547 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7548 if (!err)
7549 return 0;
7550 /* Reconfigure parser to accept the original MAC address */
7551 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7552 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007553 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007554 }
7555
7556 mvpp2_stop_dev(port);
7557
7558 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7559 if (!err)
7560 goto out_start;
7561
7562 /* Reconfigure parser accept the original MAC address */
7563 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7564 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007565 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007566out_start:
7567 mvpp2_start_dev(port);
7568 mvpp2_egress_enable(port);
7569 mvpp2_ingress_enable(port);
7570 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007571log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007572 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007573 return err;
7574}
7575
7576static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7577{
7578 struct mvpp2_port *port = netdev_priv(dev);
7579 int err;
7580
Jarod Wilson57779872016-10-17 15:54:06 -04007581 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7582 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7583 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7584 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007585 }
7586
7587 if (!netif_running(dev)) {
7588 err = mvpp2_bm_update_mtu(dev, mtu);
7589 if (!err) {
7590 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7591 return 0;
7592 }
7593
7594 /* Reconfigure BM to the original MTU */
7595 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7596 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007597 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007598 }
7599
7600 mvpp2_stop_dev(port);
7601
7602 err = mvpp2_bm_update_mtu(dev, mtu);
7603 if (!err) {
7604 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7605 goto out_start;
7606 }
7607
7608 /* Reconfigure BM to the original MTU */
7609 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7610 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007611 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007612
7613out_start:
7614 mvpp2_start_dev(port);
7615 mvpp2_egress_enable(port);
7616 mvpp2_ingress_enable(port);
7617
7618 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007619log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007620 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007621 return err;
7622}
7623
stephen hemmingerbc1f4472017-01-06 19:12:52 -08007624static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03007625mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7626{
7627 struct mvpp2_port *port = netdev_priv(dev);
7628 unsigned int start;
7629 int cpu;
7630
7631 for_each_possible_cpu(cpu) {
7632 struct mvpp2_pcpu_stats *cpu_stats;
7633 u64 rx_packets;
7634 u64 rx_bytes;
7635 u64 tx_packets;
7636 u64 tx_bytes;
7637
7638 cpu_stats = per_cpu_ptr(port->stats, cpu);
7639 do {
7640 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7641 rx_packets = cpu_stats->rx_packets;
7642 rx_bytes = cpu_stats->rx_bytes;
7643 tx_packets = cpu_stats->tx_packets;
7644 tx_bytes = cpu_stats->tx_bytes;
7645 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7646
7647 stats->rx_packets += rx_packets;
7648 stats->rx_bytes += rx_bytes;
7649 stats->tx_packets += tx_packets;
7650 stats->tx_bytes += tx_bytes;
7651 }
7652
7653 stats->rx_errors = dev->stats.rx_errors;
7654 stats->rx_dropped = dev->stats.rx_dropped;
7655 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007656}
7657
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007658static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7659{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007660 int ret;
7661
Philippe Reynes8e072692016-06-28 00:08:11 +02007662 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007663 return -ENOTSUPP;
7664
Philippe Reynes8e072692016-06-28 00:08:11 +02007665 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007666 if (!ret)
7667 mvpp2_link_event(dev);
7668
7669 return ret;
7670}
7671
Maxime Chevallier56beda32018-02-28 10:14:13 +01007672static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7673{
7674 struct mvpp2_port *port = netdev_priv(dev);
7675 int ret;
7676
7677 ret = mvpp2_prs_vid_entry_add(port, vid);
7678 if (ret)
7679 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7680 MVPP2_PRS_VLAN_FILT_MAX - 1);
7681 return ret;
7682}
7683
7684static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7685{
7686 struct mvpp2_port *port = netdev_priv(dev);
7687
7688 mvpp2_prs_vid_entry_remove(port, vid);
7689 return 0;
7690}
7691
7692static int mvpp2_set_features(struct net_device *dev,
7693 netdev_features_t features)
7694{
7695 netdev_features_t changed = dev->features ^ features;
7696 struct mvpp2_port *port = netdev_priv(dev);
7697
7698 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7699 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7700 mvpp2_prs_vid_enable_filtering(port);
7701 } else {
7702 /* Invalidate all registered VID filters for this
7703 * port
7704 */
7705 mvpp2_prs_vid_remove_all(port);
7706
7707 mvpp2_prs_vid_disable_filtering(port);
7708 }
7709 }
7710
7711 return 0;
7712}
7713
Marcin Wojtas3f518502014-07-10 16:52:13 -03007714/* Ethtool methods */
7715
Marcin Wojtas3f518502014-07-10 16:52:13 -03007716/* Set interrupt coalescing for ethtools */
7717static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7718 struct ethtool_coalesce *c)
7719{
7720 struct mvpp2_port *port = netdev_priv(dev);
7721 int queue;
7722
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007723 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007724 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7725
7726 rxq->time_coal = c->rx_coalesce_usecs;
7727 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01007728 mvpp2_rx_pkts_coal_set(port, rxq);
7729 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007730 }
7731
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007732 if (port->has_tx_irqs) {
7733 port->tx_time_coal = c->tx_coalesce_usecs;
7734 mvpp2_tx_time_coal_set(port);
7735 }
7736
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007737 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007738 struct mvpp2_tx_queue *txq = port->txqs[queue];
7739
7740 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007741
7742 if (port->has_tx_irqs)
7743 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007744 }
7745
Marcin Wojtas3f518502014-07-10 16:52:13 -03007746 return 0;
7747}
7748
7749/* get coalescing for ethtools */
7750static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7751 struct ethtool_coalesce *c)
7752{
7753 struct mvpp2_port *port = netdev_priv(dev);
7754
Antoine Tenart385c2842017-12-11 09:13:27 +01007755 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7756 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7757 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
Antoine Tenart24b28cc2017-12-11 09:13:28 +01007758 c->tx_coalesce_usecs = port->tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007759 return 0;
7760}
7761
7762static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7763 struct ethtool_drvinfo *drvinfo)
7764{
7765 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7766 sizeof(drvinfo->driver));
7767 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7768 sizeof(drvinfo->version));
7769 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7770 sizeof(drvinfo->bus_info));
7771}
7772
7773static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7774 struct ethtool_ringparam *ring)
7775{
7776 struct mvpp2_port *port = netdev_priv(dev);
7777
Yan Markman7cf87e42017-12-11 09:13:26 +01007778 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7779 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007780 ring->rx_pending = port->rx_ring_size;
7781 ring->tx_pending = port->tx_ring_size;
7782}
7783
7784static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7785 struct ethtool_ringparam *ring)
7786{
7787 struct mvpp2_port *port = netdev_priv(dev);
7788 u16 prev_rx_ring_size = port->rx_ring_size;
7789 u16 prev_tx_ring_size = port->tx_ring_size;
7790 int err;
7791
7792 err = mvpp2_check_ringparam_valid(dev, ring);
7793 if (err)
7794 return err;
7795
7796 if (!netif_running(dev)) {
7797 port->rx_ring_size = ring->rx_pending;
7798 port->tx_ring_size = ring->tx_pending;
7799 return 0;
7800 }
7801
7802 /* The interface is running, so we have to force a
7803 * reallocation of the queues
7804 */
7805 mvpp2_stop_dev(port);
7806 mvpp2_cleanup_rxqs(port);
7807 mvpp2_cleanup_txqs(port);
7808
7809 port->rx_ring_size = ring->rx_pending;
7810 port->tx_ring_size = ring->tx_pending;
7811
7812 err = mvpp2_setup_rxqs(port);
7813 if (err) {
7814 /* Reallocate Rx queues with the original ring size */
7815 port->rx_ring_size = prev_rx_ring_size;
7816 ring->rx_pending = prev_rx_ring_size;
7817 err = mvpp2_setup_rxqs(port);
7818 if (err)
7819 goto err_out;
7820 }
7821 err = mvpp2_setup_txqs(port);
7822 if (err) {
7823 /* Reallocate Tx queues with the original ring size */
7824 port->tx_ring_size = prev_tx_ring_size;
7825 ring->tx_pending = prev_tx_ring_size;
7826 err = mvpp2_setup_txqs(port);
7827 if (err)
7828 goto err_clean_rxqs;
7829 }
7830
7831 mvpp2_start_dev(port);
7832 mvpp2_egress_enable(port);
7833 mvpp2_ingress_enable(port);
7834
7835 return 0;
7836
7837err_clean_rxqs:
7838 mvpp2_cleanup_rxqs(port);
7839err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02007840 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007841 return err;
7842}
7843
7844/* Device ops */
7845
7846static const struct net_device_ops mvpp2_netdev_ops = {
7847 .ndo_open = mvpp2_open,
7848 .ndo_stop = mvpp2_stop,
7849 .ndo_start_xmit = mvpp2_tx,
7850 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7851 .ndo_set_mac_address = mvpp2_set_mac_address,
7852 .ndo_change_mtu = mvpp2_change_mtu,
7853 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007854 .ndo_do_ioctl = mvpp2_ioctl,
Maxime Chevallier56beda32018-02-28 10:14:13 +01007855 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
7856 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
7857 .ndo_set_features = mvpp2_set_features,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007858};
7859
7860static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08007861 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007862 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007863 .set_coalesce = mvpp2_ethtool_set_coalesce,
7864 .get_coalesce = mvpp2_ethtool_get_coalesce,
7865 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7866 .get_ringparam = mvpp2_ethtool_get_ringparam,
7867 .set_ringparam = mvpp2_ethtool_set_ringparam,
Miquel Raynal118d6292017-11-06 22:56:53 +01007868 .get_strings = mvpp2_ethtool_get_strings,
7869 .get_ethtool_stats = mvpp2_ethtool_get_stats,
7870 .get_sset_count = mvpp2_ethtool_get_sset_count,
Philippe Reynesfb773e92016-06-28 00:08:12 +02007871 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7872 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007873};
7874
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007875/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7876 * had a single IRQ defined per-port.
7877 */
7878static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7879 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007880{
7881 struct mvpp2_queue_vector *v = &port->qvecs[0];
7882
7883 v->first_rxq = 0;
7884 v->nrxqs = port->nrxqs;
7885 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7886 v->sw_thread_id = 0;
7887 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7888 v->port = port;
7889 v->irq = irq_of_parse_and_map(port_node, 0);
7890 if (v->irq <= 0)
7891 return -EINVAL;
7892 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7893 NAPI_POLL_WEIGHT);
7894
7895 port->nqvecs = 1;
7896
7897 return 0;
7898}
7899
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007900static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7901 struct device_node *port_node)
7902{
7903 struct mvpp2_queue_vector *v;
7904 int i, ret;
7905
7906 port->nqvecs = num_possible_cpus();
7907 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7908 port->nqvecs += 1;
7909
7910 for (i = 0; i < port->nqvecs; i++) {
7911 char irqname[16];
7912
7913 v = port->qvecs + i;
7914
7915 v->port = port;
7916 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7917 v->sw_thread_id = i;
7918 v->sw_thread_mask = BIT(i);
7919
7920 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7921
7922 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7923 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7924 v->nrxqs = MVPP2_DEFAULT_RXQ;
7925 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7926 i == (port->nqvecs - 1)) {
7927 v->first_rxq = 0;
7928 v->nrxqs = port->nrxqs;
7929 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7930 strncpy(irqname, "rx-shared", sizeof(irqname));
7931 }
7932
Marcin Wojtasa75edc72018-01-18 13:31:44 +01007933 if (port_node)
7934 v->irq = of_irq_get_byname(port_node, irqname);
7935 else
7936 v->irq = fwnode_irq_get(port->fwnode, i);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007937 if (v->irq <= 0) {
7938 ret = -EINVAL;
7939 goto err;
7940 }
7941
7942 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7943 NAPI_POLL_WEIGHT);
7944 }
7945
7946 return 0;
7947
7948err:
7949 for (i = 0; i < port->nqvecs; i++)
7950 irq_dispose_mapping(port->qvecs[i].irq);
7951 return ret;
7952}
7953
7954static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7955 struct device_node *port_node)
7956{
7957 if (port->has_tx_irqs)
7958 return mvpp2_multi_queue_vectors_init(port, port_node);
7959 else
7960 return mvpp2_simple_queue_vectors_init(port, port_node);
7961}
7962
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007963static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7964{
7965 int i;
7966
7967 for (i = 0; i < port->nqvecs; i++)
7968 irq_dispose_mapping(port->qvecs[i].irq);
7969}
7970
7971/* Configure Rx queue group interrupt for this port */
7972static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7973{
7974 struct mvpp2 *priv = port->priv;
7975 u32 val;
7976 int i;
7977
7978 if (priv->hw_version == MVPP21) {
7979 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7980 port->nrxqs);
7981 return;
7982 }
7983
7984 /* Handle the more complicated PPv2.2 case */
7985 for (i = 0; i < port->nqvecs; i++) {
7986 struct mvpp2_queue_vector *qv = port->qvecs + i;
7987
7988 if (!qv->nrxqs)
7989 continue;
7990
7991 val = qv->sw_thread_id;
7992 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7993 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7994
7995 val = qv->first_rxq;
7996 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7997 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
7998 }
7999}
8000
Marcin Wojtas3f518502014-07-10 16:52:13 -03008001/* Initialize port HW */
8002static int mvpp2_port_init(struct mvpp2_port *port)
8003{
8004 struct device *dev = port->dev->dev.parent;
8005 struct mvpp2 *priv = port->priv;
8006 struct mvpp2_txq_pcpu *txq_pcpu;
8007 int queue, cpu, err;
8008
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008009 /* Checks for hardware constraints */
8010 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008011 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008012 return -EINVAL;
8013
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008014 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
8015 (port->ntxqs > MVPP2_MAX_TXQ))
8016 return -EINVAL;
8017
Marcin Wojtas3f518502014-07-10 16:52:13 -03008018 /* Disable port */
8019 mvpp2_egress_disable(port);
8020 mvpp2_port_disable(port);
8021
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008022 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
8023
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008024 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008025 GFP_KERNEL);
8026 if (!port->txqs)
8027 return -ENOMEM;
8028
8029 /* Associate physical Tx queues to this port and initialize.
8030 * The mapping is predefined.
8031 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008032 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008033 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
8034 struct mvpp2_tx_queue *txq;
8035
8036 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01008037 if (!txq) {
8038 err = -ENOMEM;
8039 goto err_free_percpu;
8040 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008041
8042 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
8043 if (!txq->pcpu) {
8044 err = -ENOMEM;
8045 goto err_free_percpu;
8046 }
8047
8048 txq->id = queue_phy_id;
8049 txq->log_id = queue;
8050 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
8051 for_each_present_cpu(cpu) {
8052 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8053 txq_pcpu->cpu = cpu;
8054 }
8055
8056 port->txqs[queue] = txq;
8057 }
8058
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008059 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008060 GFP_KERNEL);
8061 if (!port->rxqs) {
8062 err = -ENOMEM;
8063 goto err_free_percpu;
8064 }
8065
8066 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008067 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008068 struct mvpp2_rx_queue *rxq;
8069
8070 /* Map physical Rx queue to port's logical Rx queue */
8071 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08008072 if (!rxq) {
8073 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008074 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08008075 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008076 /* Map this Rx queue to a physical queue */
8077 rxq->id = port->first_rxq + queue;
8078 rxq->port = port->id;
8079 rxq->logic_rxq = queue;
8080
8081 port->rxqs[queue] = rxq;
8082 }
8083
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008084 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008085
8086 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008087 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008088 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8089
8090 rxq->size = port->rx_ring_size;
8091 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8092 rxq->time_coal = MVPP2_RX_COAL_USEC;
8093 }
8094
8095 mvpp2_ingress_disable(port);
8096
8097 /* Port default configuration */
8098 mvpp2_defaults_set(port);
8099
8100 /* Port's classifier configuration */
8101 mvpp2_cls_oversize_rxq_set(port);
8102 mvpp2_cls_port_config(port);
8103
8104 /* Provide an initial Rx packet size */
8105 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8106
8107 /* Initialize pools for swf */
8108 err = mvpp2_swf_bm_pool_init(port);
8109 if (err)
8110 goto err_free_percpu;
8111
8112 return 0;
8113
8114err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008115 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008116 if (!port->txqs[queue])
8117 continue;
8118 free_percpu(port->txqs[queue]->pcpu);
8119 }
8120 return err;
8121}
8122
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008123/* Checks if the port DT description has the TX interrupts
8124 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8125 * there are available, but we need to keep support for old DTs.
8126 */
8127static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8128 struct device_node *port_node)
8129{
8130 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8131 "tx-cpu2", "tx-cpu3" };
8132 int ret, i;
8133
8134 if (priv->hw_version == MVPP21)
8135 return false;
8136
8137 for (i = 0; i < 5; i++) {
8138 ret = of_property_match_string(port_node, "interrupt-names",
8139 irqs[i]);
8140 if (ret < 0)
8141 return false;
8142 }
8143
8144 return true;
8145}
8146
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008147static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
Marcin Wojtas24812222018-01-18 13:31:43 +01008148 struct fwnode_handle *fwnode,
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008149 char **mac_from)
8150{
8151 struct mvpp2_port *port = netdev_priv(dev);
8152 char hw_mac_addr[ETH_ALEN] = {0};
Marcin Wojtas24812222018-01-18 13:31:43 +01008153 char fw_mac_addr[ETH_ALEN];
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008154
Marcin Wojtas24812222018-01-18 13:31:43 +01008155 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8156 *mac_from = "firmware node";
8157 ether_addr_copy(dev->dev_addr, fw_mac_addr);
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008158 return;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008159 }
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008160
8161 if (priv->hw_version == MVPP21) {
8162 mvpp21_get_mac_address(port, hw_mac_addr);
8163 if (is_valid_ether_addr(hw_mac_addr)) {
8164 *mac_from = "hardware";
8165 ether_addr_copy(dev->dev_addr, hw_mac_addr);
8166 return;
8167 }
8168 }
8169
8170 *mac_from = "random";
8171 eth_hw_addr_random(dev);
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008172}
8173
Marcin Wojtas3f518502014-07-10 16:52:13 -03008174/* Ports initialization */
8175static int mvpp2_port_probe(struct platform_device *pdev,
Marcin Wojtas24812222018-01-18 13:31:43 +01008176 struct fwnode_handle *port_fwnode,
Marcin Wojtasbf147152018-01-18 13:31:42 +01008177 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008178{
8179 struct device_node *phy_node;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008180 struct phy *comphy = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008181 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008182 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas24812222018-01-18 13:31:43 +01008183 struct device_node *port_node = to_of_node(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008184 struct net_device *dev;
8185 struct resource *res;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008186 char *mac_from = "";
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008187 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008188 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008189 u32 id;
8190 int features;
8191 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008192 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008193
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008194 if (port_node) {
8195 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8196 } else {
8197 has_tx_irqs = true;
8198 queue_mode = MVPP2_QDIST_MULTI_MODE;
8199 }
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008200
8201 if (!has_tx_irqs)
8202 queue_mode = MVPP2_QDIST_SINGLE_MODE;
8203
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008204 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008205 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8206 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8207 else
8208 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008209
8210 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008211 if (!dev)
8212 return -ENOMEM;
8213
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008214 if (port_node)
8215 phy_node = of_parse_phandle(port_node, "phy", 0);
8216 else
8217 phy_node = NULL;
8218
Marcin Wojtas24812222018-01-18 13:31:43 +01008219 phy_mode = fwnode_get_phy_mode(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008220 if (phy_mode < 0) {
8221 dev_err(&pdev->dev, "incorrect phy mode\n");
8222 err = phy_mode;
8223 goto err_free_netdev;
8224 }
8225
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008226 if (port_node) {
8227 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8228 if (IS_ERR(comphy)) {
8229 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8230 err = -EPROBE_DEFER;
8231 goto err_free_netdev;
8232 }
8233 comphy = NULL;
Antoine Tenart542897d2017-08-30 10:29:15 +02008234 }
Antoine Tenart542897d2017-08-30 10:29:15 +02008235 }
8236
Marcin Wojtas24812222018-01-18 13:31:43 +01008237 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008238 err = -EINVAL;
8239 dev_err(&pdev->dev, "missing port-id value\n");
8240 goto err_free_netdev;
8241 }
8242
Yan Markman7cf87e42017-12-11 09:13:26 +01008243 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008244 dev->watchdog_timeo = 5 * HZ;
8245 dev->netdev_ops = &mvpp2_netdev_ops;
8246 dev->ethtool_ops = &mvpp2_eth_tool_ops;
8247
8248 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008249 port->dev = dev;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008250 port->fwnode = port_fwnode;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008251 port->ntxqs = ntxqs;
8252 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008253 port->priv = priv;
8254 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008255
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008256 err = mvpp2_queue_vectors_init(port, port_node);
8257 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008258 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008259
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008260 if (port_node)
8261 port->link_irq = of_irq_get_byname(port_node, "link");
8262 else
8263 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008264 if (port->link_irq == -EPROBE_DEFER) {
8265 err = -EPROBE_DEFER;
8266 goto err_deinit_qvecs;
8267 }
8268 if (port->link_irq <= 0)
8269 /* the link irq is optional */
8270 port->link_irq = 0;
8271
Marcin Wojtas24812222018-01-18 13:31:43 +01008272 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
Marcin Wojtas3f518502014-07-10 16:52:13 -03008273 port->flags |= MVPP2_F_LOOPBACK;
8274
Marcin Wojtas3f518502014-07-10 16:52:13 -03008275 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008276 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008277 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008278 else
8279 port->first_rxq = port->id * priv->max_port_rxqs;
8280
Marcin Wojtas3f518502014-07-10 16:52:13 -03008281 port->phy_node = phy_node;
8282 port->phy_interface = phy_mode;
Antoine Tenart542897d2017-08-30 10:29:15 +02008283 port->comphy = comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008284
Thomas Petazzonia7868412017-03-07 16:53:13 +01008285 if (priv->hw_version == MVPP21) {
8286 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8287 port->base = devm_ioremap_resource(&pdev->dev, res);
8288 if (IS_ERR(port->base)) {
8289 err = PTR_ERR(port->base);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008290 goto err_free_irq;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008291 }
Miquel Raynal118d6292017-11-06 22:56:53 +01008292
8293 port->stats_base = port->priv->lms_base +
8294 MVPP21_MIB_COUNTERS_OFFSET +
8295 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008296 } else {
Marcin Wojtas24812222018-01-18 13:31:43 +01008297 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8298 &port->gop_id)) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008299 err = -EINVAL;
8300 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008301 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008302 }
8303
8304 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Miquel Raynal118d6292017-11-06 22:56:53 +01008305 port->stats_base = port->priv->iface_base +
8306 MVPP22_MIB_COUNTERS_OFFSET +
8307 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008308 }
8309
Miquel Raynal118d6292017-11-06 22:56:53 +01008310 /* Alloc per-cpu and ethtool stats */
Marcin Wojtas3f518502014-07-10 16:52:13 -03008311 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8312 if (!port->stats) {
8313 err = -ENOMEM;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008314 goto err_free_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008315 }
8316
Miquel Raynal118d6292017-11-06 22:56:53 +01008317 port->ethtool_stats = devm_kcalloc(&pdev->dev,
8318 ARRAY_SIZE(mvpp2_ethtool_regs),
8319 sizeof(u64), GFP_KERNEL);
8320 if (!port->ethtool_stats) {
8321 err = -ENOMEM;
8322 goto err_free_stats;
8323 }
8324
Miquel Raynale5c500e2017-11-08 08:59:40 +01008325 mutex_init(&port->gather_stats_lock);
8326 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8327
Marcin Wojtas24812222018-01-18 13:31:43 +01008328 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008329
Yan Markman7cf87e42017-12-11 09:13:26 +01008330 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8331 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008332 SET_NETDEV_DEV(dev, &pdev->dev);
8333
8334 err = mvpp2_port_init(port);
8335 if (err < 0) {
8336 dev_err(&pdev->dev, "failed to init port %d\n", id);
8337 goto err_free_stats;
8338 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01008339
Thomas Petazzoni26975822017-03-07 16:53:14 +01008340 mvpp2_port_periodic_xon_disable(port);
8341
8342 if (priv->hw_version == MVPP21)
8343 mvpp2_port_fc_adv_enable(port);
8344
8345 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008346
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008347 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8348 if (!port->pcpu) {
8349 err = -ENOMEM;
8350 goto err_free_txq_pcpu;
8351 }
8352
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008353 if (!port->has_tx_irqs) {
8354 for_each_present_cpu(cpu) {
8355 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008356
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008357 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8358 HRTIMER_MODE_REL_PINNED);
8359 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8360 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008361
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008362 tasklet_init(&port_pcpu->tx_done_tasklet,
8363 mvpp2_tx_proc_cb,
8364 (unsigned long)dev);
8365 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008366 }
8367
Antoine Tenart381c5672018-03-05 15:16:53 +01008368 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8369 NETIF_F_TSO;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008370 dev->features = features | NETIF_F_RXCSUM;
Maxime Chevallier56beda32018-02-28 10:14:13 +01008371 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8372 NETIF_F_HW_VLAN_CTAG_FILTER;
Stefan Chulski576193f2018-03-05 15:16:54 +01008373
8374 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
8375 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8376 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8377 }
8378
Marcin Wojtas3f518502014-07-10 16:52:13 -03008379 dev->vlan_features |= features;
Antoine Tenart1d17db02017-10-30 11:23:31 +01008380 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
Maxime Chevallier10fea262018-03-07 15:18:04 +01008381 dev->priv_flags |= IFF_UNICAST_FLT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008382
Stefan Chulski576193f2018-03-05 15:16:54 +01008383 /* MTU range: 68 - 9704 */
Jarod Wilson57779872016-10-17 15:54:06 -04008384 dev->min_mtu = ETH_MIN_MTU;
Stefan Chulski576193f2018-03-05 15:16:54 +01008385 /* 9704 == 9728 - 20 and rounding to 8 */
8386 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
Jarod Wilson57779872016-10-17 15:54:06 -04008387
Marcin Wojtas3f518502014-07-10 16:52:13 -03008388 err = register_netdev(dev);
8389 if (err < 0) {
8390 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008391 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008392 }
8393 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8394
Marcin Wojtasbf147152018-01-18 13:31:42 +01008395 priv->port_list[priv->port_count++] = port;
8396
Marcin Wojtas3f518502014-07-10 16:52:13 -03008397 return 0;
8398
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008399err_free_port_pcpu:
8400 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008401err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008402 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008403 free_percpu(port->txqs[i]->pcpu);
8404err_free_stats:
8405 free_percpu(port->stats);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008406err_free_irq:
8407 if (port->link_irq)
8408 irq_dispose_mapping(port->link_irq);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008409err_deinit_qvecs:
8410 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008411err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08008412 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008413 free_netdev(dev);
8414 return err;
8415}
8416
8417/* Ports removal routine */
8418static void mvpp2_port_remove(struct mvpp2_port *port)
8419{
8420 int i;
8421
8422 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08008423 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008424 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008425 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008426 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008427 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008428 mvpp2_queue_vectors_deinit(port);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008429 if (port->link_irq)
8430 irq_dispose_mapping(port->link_irq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008431 free_netdev(port->dev);
8432}
8433
8434/* Initialize decoding windows */
8435static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8436 struct mvpp2 *priv)
8437{
8438 u32 win_enable;
8439 int i;
8440
8441 for (i = 0; i < 6; i++) {
8442 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8443 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8444
8445 if (i < 4)
8446 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8447 }
8448
8449 win_enable = 0;
8450
8451 for (i = 0; i < dram->num_cs; i++) {
8452 const struct mbus_dram_window *cs = dram->cs + i;
8453
8454 mvpp2_write(priv, MVPP2_WIN_BASE(i),
8455 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8456 dram->mbus_dram_target_id);
8457
8458 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8459 (cs->size - 1) & 0xffff0000);
8460
8461 win_enable |= (1 << i);
8462 }
8463
8464 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8465}
8466
8467/* Initialize Rx FIFO's */
8468static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8469{
8470 int port;
8471
8472 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8473 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008474 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008475 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008476 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8477 }
8478
8479 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8480 MVPP2_RX_FIFO_PORT_MIN_PKT);
8481 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8482}
8483
8484static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8485{
8486 int port;
8487
8488 /* The FIFO size parameters are set depending on the maximum speed a
8489 * given port can handle:
8490 * - Port 0: 10Gbps
8491 * - Port 1: 2.5Gbps
8492 * - Ports 2 and 3: 1Gbps
8493 */
8494
8495 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8496 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8497 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8498 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8499
8500 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8501 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8502 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8503 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8504
8505 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8506 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8507 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8508 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8509 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008510 }
8511
8512 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8513 MVPP2_RX_FIFO_PORT_MIN_PKT);
8514 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8515}
8516
Yan Markman93ff1302018-03-05 15:16:52 +01008517/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
8518 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
8519 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
8520 */
Antoine Tenart7c10f972017-10-30 11:23:29 +01008521static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8522{
Yan Markman93ff1302018-03-05 15:16:52 +01008523 int port, size, thrs;
Antoine Tenart7c10f972017-10-30 11:23:29 +01008524
Yan Markman93ff1302018-03-05 15:16:52 +01008525 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8526 if (port == 0) {
8527 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
8528 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
8529 } else {
8530 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
8531 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
8532 }
8533 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
8534 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
8535 }
Antoine Tenart7c10f972017-10-30 11:23:29 +01008536}
8537
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008538static void mvpp2_axi_init(struct mvpp2 *priv)
8539{
8540 u32 val, rdval, wrval;
8541
8542 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8543
8544 /* AXI Bridge Configuration */
8545
8546 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8547 << MVPP22_AXI_ATTR_CACHE_OFFS;
8548 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8549 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8550
8551 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8552 << MVPP22_AXI_ATTR_CACHE_OFFS;
8553 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8554 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8555
8556 /* BM */
8557 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8558 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8559
8560 /* Descriptors */
8561 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8562 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8563 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8564 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8565
8566 /* Buffer Data */
8567 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8568 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8569
8570 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8571 << MVPP22_AXI_CODE_CACHE_OFFS;
8572 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8573 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8574 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8575 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8576
8577 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8578 << MVPP22_AXI_CODE_CACHE_OFFS;
8579 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8580 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8581
8582 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8583
8584 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8585 << MVPP22_AXI_CODE_CACHE_OFFS;
8586 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8587 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8588
8589 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8590}
8591
Marcin Wojtas3f518502014-07-10 16:52:13 -03008592/* Initialize network controller common part HW */
8593static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8594{
8595 const struct mbus_dram_target_info *dram_target_info;
8596 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03008597 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008598
Marcin Wojtas3f518502014-07-10 16:52:13 -03008599 /* MBUS windows configuration */
8600 dram_target_info = mv_mbus_dram_info();
8601 if (dram_target_info)
8602 mvpp2_conf_mbus_windows(dram_target_info, priv);
8603
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008604 if (priv->hw_version == MVPP22)
8605 mvpp2_axi_init(priv);
8606
Marcin Wojtas08a23752014-07-21 13:48:12 -03008607 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01008608 if (priv->hw_version == MVPP21) {
8609 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8610 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8611 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8612 } else {
8613 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8614 val &= ~MVPP22_SMI_POLLING_EN;
8615 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8616 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03008617
Marcin Wojtas3f518502014-07-10 16:52:13 -03008618 /* Allocate and initialize aggregated TXQs */
8619 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02008620 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008621 GFP_KERNEL);
8622 if (!priv->aggr_txqs)
8623 return -ENOMEM;
8624
8625 for_each_present_cpu(i) {
8626 priv->aggr_txqs[i].id = i;
8627 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
Antoine Ténart85affd72017-08-23 09:46:55 +02008628 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008629 if (err < 0)
8630 return err;
8631 }
8632
Antoine Tenart7c10f972017-10-30 11:23:29 +01008633 /* Fifo Init */
8634 if (priv->hw_version == MVPP21) {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008635 mvpp2_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008636 } else {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008637 mvpp22_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008638 mvpp22_tx_fifo_init(priv);
8639 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008640
Thomas Petazzoni26975822017-03-07 16:53:14 +01008641 if (priv->hw_version == MVPP21)
8642 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8643 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008644
8645 /* Allow cache snoop when transmiting packets */
8646 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8647
8648 /* Buffer Manager initialization */
8649 err = mvpp2_bm_init(pdev, priv);
8650 if (err < 0)
8651 return err;
8652
8653 /* Parser default initialization */
8654 err = mvpp2_prs_default_init(pdev, priv);
8655 if (err < 0)
8656 return err;
8657
8658 /* Classifier default initialization */
8659 mvpp2_cls_init(priv);
8660
8661 return 0;
8662}
8663
8664static int mvpp2_probe(struct platform_device *pdev)
8665{
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008666 const struct acpi_device_id *acpi_id;
Marcin Wojtas24812222018-01-18 13:31:43 +01008667 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8668 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008669 struct mvpp2 *priv;
8670 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008671 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +01008672 int i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008673 int err;
8674
Markus Elfring0b92e592017-04-17 08:38:32 +02008675 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008676 if (!priv)
8677 return -ENOMEM;
8678
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008679 if (has_acpi_companion(&pdev->dev)) {
8680 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8681 &pdev->dev);
8682 priv->hw_version = (unsigned long)acpi_id->driver_data;
8683 } else {
8684 priv->hw_version =
8685 (unsigned long)of_device_get_match_data(&pdev->dev);
8686 }
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008687
Marcin Wojtas3f518502014-07-10 16:52:13 -03008688 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01008689 base = devm_ioremap_resource(&pdev->dev, res);
8690 if (IS_ERR(base))
8691 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008692
Thomas Petazzonia7868412017-03-07 16:53:13 +01008693 if (priv->hw_version == MVPP21) {
8694 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8695 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8696 if (IS_ERR(priv->lms_base))
8697 return PTR_ERR(priv->lms_base);
8698 } else {
8699 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008700 if (has_acpi_companion(&pdev->dev)) {
8701 /* In case the MDIO memory region is declared in
8702 * the ACPI, it can already appear as 'in-use'
8703 * in the OS. Because it is overlapped by second
8704 * region of the network controller, make
8705 * sure it is released, before requesting it again.
8706 * The care is taken by mvpp2 driver to avoid
8707 * concurrent access to this memory region.
8708 */
8709 release_resource(res);
8710 }
Thomas Petazzonia7868412017-03-07 16:53:13 +01008711 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8712 if (IS_ERR(priv->iface_base))
8713 return PTR_ERR(priv->iface_base);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008714 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02008715
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008716 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
Antoine Ténartf84bf382017-08-22 19:08:27 +02008717 priv->sysctrl_base =
8718 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8719 "marvell,system-controller");
8720 if (IS_ERR(priv->sysctrl_base))
8721 /* The system controller regmap is optional for dt
8722 * compatibility reasons. When not provided, the
8723 * configuration of the GoP relies on the
8724 * firmware/bootloader.
8725 */
8726 priv->sysctrl_base = NULL;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008727 }
8728
Stefan Chulski01d04932018-03-05 15:16:50 +01008729 mvpp2_setup_bm_pool();
8730
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008731 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008732 u32 addr_space_sz;
8733
8734 addr_space_sz = (priv->hw_version == MVPP21 ?
8735 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008736 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008737 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008738
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008739 if (priv->hw_version == MVPP21)
8740 priv->max_port_rxqs = 8;
8741 else
8742 priv->max_port_rxqs = 32;
8743
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008744 if (dev_of_node(&pdev->dev)) {
8745 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8746 if (IS_ERR(priv->pp_clk))
8747 return PTR_ERR(priv->pp_clk);
8748 err = clk_prepare_enable(priv->pp_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008749 if (err < 0)
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008750 return err;
8751
8752 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8753 if (IS_ERR(priv->gop_clk)) {
8754 err = PTR_ERR(priv->gop_clk);
8755 goto err_pp_clk;
8756 }
8757 err = clk_prepare_enable(priv->gop_clk);
8758 if (err < 0)
8759 goto err_pp_clk;
8760
8761 if (priv->hw_version == MVPP22) {
8762 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8763 if (IS_ERR(priv->mg_clk)) {
8764 err = PTR_ERR(priv->mg_clk);
8765 goto err_gop_clk;
8766 }
8767
8768 err = clk_prepare_enable(priv->mg_clk);
8769 if (err < 0)
8770 goto err_gop_clk;
8771 }
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008772
8773 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8774 if (IS_ERR(priv->axi_clk)) {
8775 err = PTR_ERR(priv->axi_clk);
8776 if (err == -EPROBE_DEFER)
8777 goto err_gop_clk;
8778 priv->axi_clk = NULL;
8779 } else {
8780 err = clk_prepare_enable(priv->axi_clk);
8781 if (err < 0)
8782 goto err_gop_clk;
8783 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008784
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008785 /* Get system's tclk rate */
8786 priv->tclk = clk_get_rate(priv->pp_clk);
8787 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8788 &priv->tclk)) {
8789 dev_err(&pdev->dev, "missing clock-frequency value\n");
8790 return -EINVAL;
8791 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008792
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01008793 if (priv->hw_version == MVPP22) {
Maxime Chevallierda42bb22018-04-18 11:14:44 +02008794 err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01008795 if (err)
8796 goto err_mg_clk;
8797 /* Sadly, the BM pools all share the same register to
8798 * store the high 32 bits of their address. So they
8799 * must all have the same high 32 bits, which forces
8800 * us to restrict coherent memory to DMA_BIT_MASK(32).
8801 */
8802 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8803 if (err)
8804 goto err_mg_clk;
8805 }
8806
Marcin Wojtas3f518502014-07-10 16:52:13 -03008807 /* Initialize network controller */
8808 err = mvpp2_init(pdev, priv);
8809 if (err < 0) {
8810 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008811 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008812 }
8813
Marcin Wojtasbf147152018-01-18 13:31:42 +01008814 /* Initialize ports */
Marcin Wojtas24812222018-01-18 13:31:43 +01008815 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8816 err = mvpp2_port_probe(pdev, port_fwnode, priv);
Marcin Wojtasbf147152018-01-18 13:31:42 +01008817 if (err < 0)
8818 goto err_port_probe;
8819 }
8820
Miquel Raynal118d6292017-11-06 22:56:53 +01008821 if (priv->port_count == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008822 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08008823 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008824 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008825 }
8826
Miquel Raynal118d6292017-11-06 22:56:53 +01008827 /* Statistics must be gathered regularly because some of them (like
8828 * packets counters) are 32-bit registers and could overflow quite
8829 * quickly. For instance, a 10Gb link used at full bandwidth with the
8830 * smallest packets (64B) will overflow a 32-bit counter in less than
8831 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8832 */
Miquel Raynal118d6292017-11-06 22:56:53 +01008833 snprintf(priv->queue_name, sizeof(priv->queue_name),
8834 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8835 priv->port_count > 1 ? "+" : "");
8836 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8837 if (!priv->stats_queue) {
8838 err = -ENOMEM;
Antoine Tenart26146b02017-11-28 14:19:49 +01008839 goto err_port_probe;
Miquel Raynal118d6292017-11-06 22:56:53 +01008840 }
8841
Marcin Wojtas3f518502014-07-10 16:52:13 -03008842 platform_set_drvdata(pdev, priv);
8843 return 0;
8844
Antoine Tenart26146b02017-11-28 14:19:49 +01008845err_port_probe:
8846 i = 0;
Marcin Wojtas24812222018-01-18 13:31:43 +01008847 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Antoine Tenart26146b02017-11-28 14:19:49 +01008848 if (priv->port_list[i])
8849 mvpp2_port_remove(priv->port_list[i]);
8850 i++;
8851 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008852err_mg_clk:
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008853 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008854 if (priv->hw_version == MVPP22)
8855 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008856err_gop_clk:
8857 clk_disable_unprepare(priv->gop_clk);
8858err_pp_clk:
8859 clk_disable_unprepare(priv->pp_clk);
8860 return err;
8861}
8862
8863static int mvpp2_remove(struct platform_device *pdev)
8864{
8865 struct mvpp2 *priv = platform_get_drvdata(pdev);
Marcin Wojtas24812222018-01-18 13:31:43 +01008866 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8867 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008868 int i = 0;
8869
Miquel Raynale5c500e2017-11-08 08:59:40 +01008870 flush_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008871 destroy_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008872
Marcin Wojtas24812222018-01-18 13:31:43 +01008873 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Miquel Raynale5c500e2017-11-08 08:59:40 +01008874 if (priv->port_list[i]) {
8875 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008876 mvpp2_port_remove(priv->port_list[i]);
Miquel Raynale5c500e2017-11-08 08:59:40 +01008877 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008878 i++;
8879 }
8880
8881 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8882 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8883
8884 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8885 }
8886
8887 for_each_present_cpu(i) {
8888 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8889
8890 dma_free_coherent(&pdev->dev,
8891 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8892 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01008893 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008894 }
8895
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008896 if (is_acpi_node(port_fwnode))
8897 return 0;
8898
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008899 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008900 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008901 clk_disable_unprepare(priv->pp_clk);
8902 clk_disable_unprepare(priv->gop_clk);
8903
8904 return 0;
8905}
8906
8907static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008908 {
8909 .compatible = "marvell,armada-375-pp2",
8910 .data = (void *)MVPP21,
8911 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01008912 {
8913 .compatible = "marvell,armada-7k-pp22",
8914 .data = (void *)MVPP22,
8915 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03008916 { }
8917};
8918MODULE_DEVICE_TABLE(of, mvpp2_match);
8919
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008920static const struct acpi_device_id mvpp2_acpi_match[] = {
8921 { "MRVL0110", MVPP22 },
8922 { },
8923};
8924MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8925
Marcin Wojtas3f518502014-07-10 16:52:13 -03008926static struct platform_driver mvpp2_driver = {
8927 .probe = mvpp2_probe,
8928 .remove = mvpp2_remove,
8929 .driver = {
8930 .name = MVPP2_DRIVER_NAME,
8931 .of_match_table = mvpp2_match,
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008932 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008933 },
8934};
8935
8936module_platform_driver(mvpp2_driver);
8937
8938MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8939MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03008940MODULE_LICENSE("GPL v2");