blob: f51dcb3b09d75ede3351770cb553544547c58f20 [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
Marcin Wojtasa75edc72018-01-18 13:31:44 +010013#include <linux/acpi.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030014#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020022#include <linux/mfd/syscon.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030023#include <linux/interrupt.h>
24#include <linux/cpumask.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010030#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030031#include <linux/phy.h>
Antoine Tenart542897d2017-08-30 10:29:15 +020032#include <linux/phy/phy.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020034#include <linux/hrtimer.h>
35#include <linux/ktime.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020036#include <linux/regmap.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030037#include <uapi/linux/ppp_defs.h>
38#include <net/ip.h>
39#include <net/ipv6.h>
Antoine Ténart186cd4d2017-08-23 09:46:56 +020040#include <net/tso.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030041
Antoine Tenart7c10f972017-10-30 11:23:29 +010042/* Fifo Registers */
Marcin Wojtas3f518502014-07-10 16:52:13 -030043#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46#define MVPP2_RX_FIFO_INIT_REG 0x64
Yan Markman93ff1302018-03-05 15:16:52 +010047#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
Antoine Tenart7c10f972017-10-30 11:23:29 +010048#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
Marcin Wojtas3f518502014-07-10 16:52:13 -030049
50/* RX DMA Top Registers */
51#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
52#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
53#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
54#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
55#define MVPP2_POOL_BUF_SIZE_OFFSET 5
56#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
57#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
58#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
59#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010060#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
61#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030062#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010063#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
64#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030065#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
66#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
67#define MVPP2_RXQ_DISABLE_MASK BIT(31)
68
Maxime Chevallier56beda32018-02-28 10:14:13 +010069/* Top Registers */
70#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
71#define MVPP2_DSA_EXTENDED BIT(5)
72
Marcin Wojtas3f518502014-07-10 16:52:13 -030073/* Parser Registers */
74#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
75#define MVPP2_PRS_PORT_LU_MAX 0xf
76#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
77#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
78#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
79#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
80#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
81#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
82#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
83#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
84#define MVPP2_PRS_TCAM_IDX_REG 0x1100
85#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
86#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
87#define MVPP2_PRS_SRAM_IDX_REG 0x1200
88#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
89#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
90#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
91
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010092/* RSS Registers */
93#define MVPP22_RSS_INDEX 0x1500
Antoine Tenart8a7b7412017-12-08 10:24:20 +010094#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010095#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
96#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
97#define MVPP22_RSS_TABLE_ENTRY 0x1508
98#define MVPP22_RSS_TABLE 0x1510
99#define MVPP22_RSS_TABLE_POINTER(p) (p)
100#define MVPP22_RSS_WIDTH 0x150c
101
Marcin Wojtas3f518502014-07-10 16:52:13 -0300102/* Classifier Registers */
103#define MVPP2_CLS_MODE_REG 0x1800
104#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
105#define MVPP2_CLS_PORT_WAY_REG 0x1810
106#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
107#define MVPP2_CLS_LKP_INDEX_REG 0x1814
108#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
109#define MVPP2_CLS_LKP_TBL_REG 0x1818
110#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
111#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
112#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
113#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
114#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
115#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
116#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
117#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
118#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
119#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
120#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
121#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
122
123/* Descriptor Manager Top Registers */
124#define MVPP2_RXQ_NUM_REG 0x2040
125#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100126#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300127#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
128#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
129#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
130#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
131#define MVPP2_RXQ_NUM_NEW_OFFSET 16
132#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
133#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
134#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
135#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
136#define MVPP2_RXQ_THRESH_REG 0x204c
137#define MVPP2_OCCUPIED_THRESH_OFFSET 0
138#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
139#define MVPP2_RXQ_INDEX_REG 0x2050
140#define MVPP2_TXQ_NUM_REG 0x2080
141#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
142#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
143#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200144#define MVPP2_TXQ_THRESH_REG 0x2094
145#define MVPP2_TXQ_THRESH_OFFSET 16
146#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300147#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300148#define MVPP2_TXQ_INDEX_REG 0x2098
149#define MVPP2_TXQ_PREF_BUF_REG 0x209c
150#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
151#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
152#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
153#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
154#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
155#define MVPP2_TXQ_PENDING_REG 0x20a0
156#define MVPP2_TXQ_PENDING_MASK 0x3fff
157#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
158#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
159#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
160#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
161#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
162#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
163#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
164#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
165#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
166#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
167#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100168#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300169#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
170#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
171#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
172#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
173#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
174
175/* MBUS bridge registers */
176#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
177#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
178#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
179#define MVPP2_BASE_ADDR_ENABLE 0x4060
180
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100181/* AXI Bridge Registers */
182#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
183#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
184#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
185#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
186#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
187#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
188#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
189#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
190#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
191#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
192#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
193#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
194
195/* Values for AXI Bridge registers */
196#define MVPP22_AXI_ATTR_CACHE_OFFS 0
197#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
198
199#define MVPP22_AXI_CODE_CACHE_OFFS 0
200#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
201
202#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
203#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
204#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
205
206#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
207#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
208
Marcin Wojtas3f518502014-07-10 16:52:13 -0300209/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200210#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
211#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
212
Marcin Wojtas3f518502014-07-10 16:52:13 -0300213#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100214#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200215#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100216
Antoine Ténart81b66302017-08-22 19:08:21 +0200217#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100218#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200219#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
220#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100221
222#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200223#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100224
Antoine Ténart81b66302017-08-22 19:08:21 +0200225#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
226#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
227#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
228#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100229
Marcin Wojtas3f518502014-07-10 16:52:13 -0300230#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
231#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
232#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
233#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
234#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
235#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200236#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300237#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
238#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
239#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
240#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
241#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
242#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
243#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
244#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
245#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
246#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
247#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
248#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
249
250/* Buffer Manager registers */
251#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
252#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
253#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
254#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
255#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
256#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
257#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
258#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
259#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
260#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
261#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
Stefan Chulskieffbf5f2018-03-05 15:16:51 +0100262#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300263#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
264#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
265#define MVPP2_BM_START_MASK BIT(0)
266#define MVPP2_BM_STOP_MASK BIT(1)
267#define MVPP2_BM_STATE_MASK BIT(4)
268#define MVPP2_BM_LOW_THRESH_OFFS 8
269#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
270#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
271 MVPP2_BM_LOW_THRESH_OFFS)
272#define MVPP2_BM_HIGH_THRESH_OFFS 16
273#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
274#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
275 MVPP2_BM_HIGH_THRESH_OFFS)
276#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
277#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
278#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
279#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
280#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
281#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
282#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
283#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
284#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
285#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100286#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
287#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
288#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
289#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300290#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
291#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
292#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
293#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
294#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100295#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
296#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
Antoine Ténart81b66302017-08-22 19:08:21 +0200297#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100298#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300299
300/* TX Scheduler registers */
301#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
302#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
303#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
304#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
305#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
306#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
307#define MVPP2_TXP_SCHED_MTU_REG 0x801c
308#define MVPP2_TXP_MTU_MAX 0x7FFFF
309#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
310#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
311#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
312#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
313#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
314#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
315#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
316#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
317#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
318#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
319#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
320#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
321#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
322#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
323
324/* TX general registers */
325#define MVPP2_TX_SNOOP_REG 0x8800
326#define MVPP2_TX_PORT_FLUSH_REG 0x8810
327#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
328
329/* LMS registers */
330#define MVPP2_SRC_ADDR_MIDDLE 0x24
331#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300332#define MVPP2_PHY_AN_CFG0_REG 0x34
333#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300334#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100335#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300336
337/* Per-port registers */
338#define MVPP2_GMAC_CTRL_0_REG 0x0
Antoine Ténart81b66302017-08-22 19:08:21 +0200339#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200340#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200341#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
342#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
343#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300344#define MVPP2_GMAC_CTRL_1_REG 0x4
Antoine Ténart81b66302017-08-22 19:08:21 +0200345#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
346#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
347#define MVPP2_GMAC_PCS_LB_EN_BIT 6
348#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
349#define MVPP2_GMAC_SA_LOW_OFFS 7
Marcin Wojtas3f518502014-07-10 16:52:13 -0300350#define MVPP2_GMAC_CTRL_2_REG 0x8
Antoine Ténart81b66302017-08-22 19:08:21 +0200351#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200352#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200353#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
Antoine Tenartc7dfc8c2017-09-25 14:59:48 +0200354#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
Antoine Ténart39193572017-08-22 19:08:24 +0200355#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
Antoine Ténart81b66302017-08-22 19:08:21 +0200356#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300357#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
Antoine Ténart81b66302017-08-22 19:08:21 +0200358#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
359#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Antoine Ténart39193572017-08-22 19:08:24 +0200360#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
361#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
Antoine Ténart81b66302017-08-22 19:08:21 +0200362#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
363#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
364#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
365#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Antoine Ténart39193572017-08-22 19:08:24 +0200366#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
Antoine Ténart81b66302017-08-22 19:08:21 +0200367#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
368#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200369#define MVPP2_GMAC_STATUS0 0x10
370#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300371#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
Antoine Ténart81b66302017-08-22 19:08:21 +0200372#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
373#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
374#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
Marcin Wojtas3f518502014-07-10 16:52:13 -0300375 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200376#define MVPP22_GMAC_INT_STAT 0x20
377#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
378#define MVPP22_GMAC_INT_MASK 0x24
379#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100380#define MVPP22_GMAC_CTRL_4_REG 0x90
Antoine Ténart81b66302017-08-22 19:08:21 +0200381#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
382#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
Antoine Ténart1068ec72017-08-22 19:08:22 +0200383#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
Antoine Ténart81b66302017-08-22 19:08:21 +0200384#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200385#define MVPP22_GMAC_INT_SUM_MASK 0xa4
386#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100387
388/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
389 * relative to port->base.
390 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200391#define MVPP22_XLG_CTRL0_REG 0x100
Antoine Ténart81b66302017-08-22 19:08:21 +0200392#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
393#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
Antoine Ténart77321952017-08-22 19:08:25 +0200394#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
Antoine Ténart81b66302017-08-22 19:08:21 +0200395#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200396#define MVPP22_XLG_CTRL1_REG 0x104
Antoine Ténartec15ecd2017-08-25 15:24:46 +0200397#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200398#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200399#define MVPP22_XLG_STATUS 0x10c
400#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
401#define MVPP22_XLG_INT_STAT 0x114
402#define MVPP22_XLG_INT_STAT_LINK BIT(1)
403#define MVPP22_XLG_INT_MASK 0x118
404#define MVPP22_XLG_INT_MASK_LINK BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100405#define MVPP22_XLG_CTRL3_REG 0x11c
Antoine Ténart81b66302017-08-22 19:08:21 +0200406#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
407#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
408#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200409#define MVPP22_XLG_EXT_INT_MASK 0x15c
410#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
411#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
Antoine Ténart77321952017-08-22 19:08:25 +0200412#define MVPP22_XLG_CTRL4_REG 0x184
413#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
414#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
415#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
416
Thomas Petazzoni26975822017-03-07 16:53:14 +0100417/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
418#define MVPP22_SMI_MISC_CFG_REG 0x1204
Antoine Ténart81b66302017-08-22 19:08:21 +0200419#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300420
Thomas Petazzonia7868412017-03-07 16:53:13 +0100421#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
422
Marcin Wojtas3f518502014-07-10 16:52:13 -0300423#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
424
425/* Descriptor ring Macros */
426#define MVPP2_QUEUE_NEXT_DESC(q, index) \
427 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
428
Antoine Ténartf84bf382017-08-22 19:08:27 +0200429/* XPCS registers. PPv2.2 only */
430#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
431#define MVPP22_MPCS_CTRL 0x14
432#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
433#define MVPP22_MPCS_CLK_RESET 0x14c
434#define MAC_CLK_RESET_SD_TX BIT(0)
435#define MAC_CLK_RESET_SD_RX BIT(1)
436#define MAC_CLK_RESET_MAC BIT(2)
437#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
438#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
439
440/* XPCS registers. PPv2.2 only */
441#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
442#define MVPP22_XPCS_CFG0 0x0
443#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
444#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
445
446/* System controller registers. Accessed through a regmap. */
447#define GENCONF_SOFT_RESET1 0x1108
448#define GENCONF_SOFT_RESET1_GOP BIT(6)
449#define GENCONF_PORT_CTRL0 0x1110
450#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
451#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
452#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
453#define GENCONF_PORT_CTRL1 0x1114
454#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
455#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
456#define GENCONF_CTRL0 0x1120
457#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
458#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
459#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
460
Marcin Wojtas3f518502014-07-10 16:52:13 -0300461/* Various constants */
462
463/* Coalescing */
Antoine Tenart86162282017-12-11 09:13:29 +0100464#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200465#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200466#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300467#define MVPP2_RX_COAL_PKTS 32
Antoine Tenart86162282017-12-11 09:13:29 +0100468#define MVPP2_RX_COAL_USEC 64
Marcin Wojtas3f518502014-07-10 16:52:13 -0300469
470/* The two bytes Marvell header. Either contains a special value used
471 * by Marvell switches when a specific hardware mode is enabled (not
472 * supported by this driver) or is filled automatically by zeroes on
473 * the RX side. Those two bytes being at the front of the Ethernet
474 * header, they allow to have the IP header aligned on a 4 bytes
475 * boundary automatically: the hardware skips those two bytes on its
476 * own.
477 */
478#define MVPP2_MH_SIZE 2
479#define MVPP2_ETH_TYPE_LEN 2
480#define MVPP2_PPPOE_HDR_SIZE 8
481#define MVPP2_VLAN_TAG_LEN 4
Maxime Chevallier56beda32018-02-28 10:14:13 +0100482#define MVPP2_VLAN_TAG_EDSA_LEN 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300483
484/* Lbtd 802.3 type */
485#define MVPP2_IP_LBDT_TYPE 0xfffa
486
Marcin Wojtas3f518502014-07-10 16:52:13 -0300487#define MVPP2_TX_CSUM_MAX_SIZE 9800
488
489/* Timeout constants */
490#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
491#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
492
493#define MVPP2_TX_MTU_MAX 0x7ffff
494
495/* Maximum number of T-CONTs of PON port */
496#define MVPP2_MAX_TCONT 16
497
498/* Maximum number of supported ports */
499#define MVPP2_MAX_PORTS 4
500
501/* Maximum number of TXQs used by single port */
502#define MVPP2_MAX_TXQ 8
503
Antoine Tenart1d17db02017-10-30 11:23:31 +0100504/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
505 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
506 * multiply this value by two to count the maximum number of skb descs needed.
507 */
508#define MVPP2_MAX_TSO_SEGS 300
509#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
510
Marcin Wojtas3f518502014-07-10 16:52:13 -0300511/* Dfault number of RXQs in use */
512#define MVPP2_DEFAULT_RXQ 4
513
Marcin Wojtas3f518502014-07-10 16:52:13 -0300514/* Max number of Rx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100515#define MVPP2_MAX_RXD_MAX 1024
516#define MVPP2_MAX_RXD_DFLT 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300517
518/* Max number of Tx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100519#define MVPP2_MAX_TXD_MAX 2048
520#define MVPP2_MAX_TXD_DFLT 1024
Marcin Wojtas3f518502014-07-10 16:52:13 -0300521
522/* Amount of Tx descriptors that can be reserved at once by CPU */
523#define MVPP2_CPU_DESC_CHUNK 64
524
525/* Max number of Tx descriptors in each aggregated queue */
526#define MVPP2_AGGR_TXQ_SIZE 256
527
528/* Descriptor aligned size */
529#define MVPP2_DESC_ALIGNED_SIZE 32
530
531/* Descriptor alignment mask */
532#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
533
534/* RX FIFO constants */
Antoine Tenart2d1d7df2017-10-30 11:23:28 +0100535#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
536#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
537#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
538#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
539#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
540#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
541#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
Marcin Wojtas3f518502014-07-10 16:52:13 -0300542
Antoine Tenart7c10f972017-10-30 11:23:29 +0100543/* TX FIFO constants */
544#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
545#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
Yan Markman93ff1302018-03-05 15:16:52 +0100546#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
547#define MVPP2_TX_FIFO_THRESHOLD_10KB \
548 (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
549#define MVPP2_TX_FIFO_THRESHOLD_3KB \
550 (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
Antoine Tenart7c10f972017-10-30 11:23:29 +0100551
Marcin Wojtas3f518502014-07-10 16:52:13 -0300552/* RX buffer constants */
553#define MVPP2_SKB_SHINFO_SIZE \
554 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
555
556#define MVPP2_RX_PKT_SIZE(mtu) \
557 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800558 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300559
560#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
561#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
562#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
563 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
564
565#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
566
567/* IPv6 max L3 address size */
568#define MVPP2_MAX_L3_ADDR_SIZE 16
569
570/* Port flags */
571#define MVPP2_F_LOOPBACK BIT(0)
572
573/* Marvell tag types */
574enum mvpp2_tag_type {
575 MVPP2_TAG_TYPE_NONE = 0,
576 MVPP2_TAG_TYPE_MH = 1,
577 MVPP2_TAG_TYPE_DSA = 2,
578 MVPP2_TAG_TYPE_EDSA = 3,
579 MVPP2_TAG_TYPE_VLAN = 4,
580 MVPP2_TAG_TYPE_LAST = 5
581};
582
583/* Parser constants */
584#define MVPP2_PRS_TCAM_SRAM_SIZE 256
585#define MVPP2_PRS_TCAM_WORDS 6
586#define MVPP2_PRS_SRAM_WORDS 4
587#define MVPP2_PRS_FLOW_ID_SIZE 64
588#define MVPP2_PRS_FLOW_ID_MASK 0x3f
589#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
590#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
591#define MVPP2_PRS_IPV4_HEAD 0x40
592#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
593#define MVPP2_PRS_IPV4_MC 0xe0
594#define MVPP2_PRS_IPV4_MC_MASK 0xf0
595#define MVPP2_PRS_IPV4_BC_MASK 0xff
596#define MVPP2_PRS_IPV4_IHL 0x5
597#define MVPP2_PRS_IPV4_IHL_MASK 0xf
598#define MVPP2_PRS_IPV6_MC 0xff
599#define MVPP2_PRS_IPV6_MC_MASK 0xff
600#define MVPP2_PRS_IPV6_HOP_MASK 0xff
601#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
602#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
603#define MVPP2_PRS_DBL_VLANS_MAX 100
Maxime Chevallier10fea262018-03-07 15:18:04 +0100604#define MVPP2_PRS_CAST_MASK BIT(0)
605#define MVPP2_PRS_MCAST_VAL BIT(0)
606#define MVPP2_PRS_UCAST_VAL 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300607
608/* Tcam structure:
609 * - lookup ID - 4 bits
610 * - port ID - 1 byte
611 * - additional information - 1 byte
612 * - header data - 8 bytes
613 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
614 */
615#define MVPP2_PRS_AI_BITS 8
616#define MVPP2_PRS_PORT_MASK 0xff
617#define MVPP2_PRS_LU_MASK 0xf
618#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
619 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
620#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
621 (((offs) * 2) - ((offs) % 2) + 2)
622#define MVPP2_PRS_TCAM_AI_BYTE 16
623#define MVPP2_PRS_TCAM_PORT_BYTE 17
624#define MVPP2_PRS_TCAM_LU_BYTE 20
625#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
626#define MVPP2_PRS_TCAM_INV_WORD 5
Maxime Chevallier56beda32018-02-28 10:14:13 +0100627
628#define MVPP2_PRS_VID_TCAM_BYTE 2
629
Maxime Chevallier10fea262018-03-07 15:18:04 +0100630/* TCAM range for unicast and multicast filtering. We have 25 entries per port,
631 * with 4 dedicated to UC filtering and the rest to multicast filtering.
632 * Additionnally we reserve one entry for the broadcast address, and one for
633 * each port's own address.
634 */
635#define MVPP2_PRS_MAC_UC_MC_FILT_MAX 25
636#define MVPP2_PRS_MAC_RANGE_SIZE 80
637
638/* Number of entries per port dedicated to UC and MC filtering */
639#define MVPP2_PRS_MAC_UC_FILT_MAX 4
640#define MVPP2_PRS_MAC_MC_FILT_MAX (MVPP2_PRS_MAC_UC_MC_FILT_MAX - \
641 MVPP2_PRS_MAC_UC_FILT_MAX)
642
Maxime Chevallier56beda32018-02-28 10:14:13 +0100643/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
644 * 10 VLAN ID filter entries per port
645 * 1 default VLAN filter entry per port
646 * It is assumed that there are 3 ports for filter, not including loopback port
647 */
648#define MVPP2_PRS_VLAN_FILT_MAX 11
649#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
650
651#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
652#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
653
Marcin Wojtas3f518502014-07-10 16:52:13 -0300654/* Tcam entries ID */
655#define MVPP2_PE_DROP_ALL 0
656#define MVPP2_PE_FIRST_FREE_TID 1
Maxime Chevallier56beda32018-02-28 10:14:13 +0100657
Maxime Chevallier10fea262018-03-07 15:18:04 +0100658/* MAC filtering range */
659#define MVPP2_PE_MAC_RANGE_END (MVPP2_PE_VID_FILT_RANGE_START - 1)
660#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
661 MVPP2_PRS_MAC_RANGE_SIZE + 1)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100662/* VLAN filtering range */
663#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
664#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
665 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
666#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300667#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
Maxime Chevallier10fea262018-03-07 15:18:04 +0100668#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
669#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
670#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
671#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 22)
672#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
673#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
674#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
675#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
676#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
677#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
678#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
679#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
680#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
681#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
682#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
683#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
684#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
685#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
686#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
687#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
688#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
689/* reserved */
690#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
691#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300692#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
693
Maxime Chevallier56beda32018-02-28 10:14:13 +0100694#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
695 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
696#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
697 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
698/* Index of default vid filter for given port */
699#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
700 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
701
Marcin Wojtas3f518502014-07-10 16:52:13 -0300702/* Sram structure
703 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
704 */
705#define MVPP2_PRS_SRAM_RI_OFFS 0
706#define MVPP2_PRS_SRAM_RI_WORD 0
707#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
708#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
709#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
710#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
711#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
712#define MVPP2_PRS_SRAM_UDF_OFFS 73
713#define MVPP2_PRS_SRAM_UDF_BITS 8
714#define MVPP2_PRS_SRAM_UDF_MASK 0xff
715#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
716#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
717#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
718#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
719#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
720#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
721#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
722#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
723#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
724#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
725#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
726#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
727#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
728#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
729#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
730#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
731#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
732#define MVPP2_PRS_SRAM_AI_OFFS 90
733#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
734#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
735#define MVPP2_PRS_SRAM_AI_MASK 0xff
736#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
737#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
738#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
739#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
740
741/* Sram result info bits assignment */
742#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
743#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100744#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
745#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300746#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
747#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
748#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
749#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
750#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100751#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
752#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300753#define MVPP2_PRS_RI_L2_MCAST BIT(9)
754#define MVPP2_PRS_RI_L2_BCAST BIT(10)
755#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100756#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
757#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300758#define MVPP2_PRS_RI_L3_IP4 BIT(12)
759#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
760#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
761#define MVPP2_PRS_RI_L3_IP6 BIT(14)
762#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
763#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100764#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
765#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300766#define MVPP2_PRS_RI_L3_MCAST BIT(15)
767#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
768#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
Stefan Chulskiaff3da32017-09-25 14:59:46 +0200769#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300770#define MVPP2_PRS_RI_UDF3_MASK 0x300000
771#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
772#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
773#define MVPP2_PRS_RI_L4_TCP BIT(22)
774#define MVPP2_PRS_RI_L4_UDP BIT(23)
775#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
776#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
777#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
778#define MVPP2_PRS_RI_DROP_MASK 0x80000000
779
780/* Sram additional info bits assignment */
781#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
782#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
783#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
784#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
785#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
786#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
787#define MVPP2_PRS_SINGLE_VLAN_AI 0
788#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100789#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300790
791/* DSA/EDSA type */
792#define MVPP2_PRS_TAGGED true
793#define MVPP2_PRS_UNTAGGED false
794#define MVPP2_PRS_EDSA true
795#define MVPP2_PRS_DSA false
796
797/* MAC entries, shadow udf */
798enum mvpp2_prs_udf {
799 MVPP2_PRS_UDF_MAC_DEF,
800 MVPP2_PRS_UDF_MAC_RANGE,
801 MVPP2_PRS_UDF_L2_DEF,
802 MVPP2_PRS_UDF_L2_DEF_COPY,
803 MVPP2_PRS_UDF_L2_USER,
804};
805
806/* Lookup ID */
807enum mvpp2_prs_lookup {
808 MVPP2_PRS_LU_MH,
809 MVPP2_PRS_LU_MAC,
810 MVPP2_PRS_LU_DSA,
811 MVPP2_PRS_LU_VLAN,
Maxime Chevallier56beda32018-02-28 10:14:13 +0100812 MVPP2_PRS_LU_VID,
Marcin Wojtas3f518502014-07-10 16:52:13 -0300813 MVPP2_PRS_LU_L2,
814 MVPP2_PRS_LU_PPPOE,
815 MVPP2_PRS_LU_IP4,
816 MVPP2_PRS_LU_IP6,
817 MVPP2_PRS_LU_FLOWS,
818 MVPP2_PRS_LU_LAST,
819};
820
Maxime Chevallier10fea262018-03-07 15:18:04 +0100821/* L2 cast enum */
822enum mvpp2_prs_l2_cast {
823 MVPP2_PRS_L2_UNI_CAST,
824 MVPP2_PRS_L2_MULTI_CAST,
825};
826
Marcin Wojtas3f518502014-07-10 16:52:13 -0300827/* L3 cast enum */
828enum mvpp2_prs_l3_cast {
829 MVPP2_PRS_L3_UNI_CAST,
830 MVPP2_PRS_L3_MULTI_CAST,
831 MVPP2_PRS_L3_BROAD_CAST
832};
833
834/* Classifier constants */
835#define MVPP2_CLS_FLOWS_TBL_SIZE 512
836#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
837#define MVPP2_CLS_LKP_TBL_SIZE 64
Antoine Tenart1d7d15d2017-10-30 11:23:30 +0100838#define MVPP2_CLS_RX_QUEUES 256
839
840/* RSS constants */
841#define MVPP22_RSS_TABLE_ENTRIES 32
Marcin Wojtas3f518502014-07-10 16:52:13 -0300842
843/* BM constants */
Stefan Chulski576193f2018-03-05 15:16:54 +0100844#define MVPP2_BM_JUMBO_BUF_NUM 512
Marcin Wojtas3f518502014-07-10 16:52:13 -0300845#define MVPP2_BM_LONG_BUF_NUM 1024
846#define MVPP2_BM_SHORT_BUF_NUM 2048
847#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
848#define MVPP2_BM_POOL_PTR_ALIGN 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300849
850/* BM cookie (32 bits) definition */
851#define MVPP2_BM_COOKIE_POOL_OFFS 8
852#define MVPP2_BM_COOKIE_CPU_OFFS 24
853
Stefan Chulski01d04932018-03-05 15:16:50 +0100854#define MVPP2_BM_SHORT_FRAME_SIZE 512
855#define MVPP2_BM_LONG_FRAME_SIZE 2048
Stefan Chulski576193f2018-03-05 15:16:54 +0100856#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
Marcin Wojtas3f518502014-07-10 16:52:13 -0300857/* BM short pool packet size
858 * These value assure that for SWF the total number
859 * of bytes allocated for each buffer will be 512
860 */
Stefan Chulski01d04932018-03-05 15:16:50 +0100861#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
862#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
Stefan Chulski576193f2018-03-05 15:16:54 +0100863#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300864
Thomas Petazzonia7868412017-03-07 16:53:13 +0100865#define MVPP21_ADDR_SPACE_SZ 0
866#define MVPP22_ADDR_SPACE_SZ SZ_64K
867
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200868#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200869#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100870
Stefan Chulski01d04932018-03-05 15:16:50 +0100871enum mvpp2_bm_pool_log_num {
872 MVPP2_BM_SHORT,
873 MVPP2_BM_LONG,
Stefan Chulski576193f2018-03-05 15:16:54 +0100874 MVPP2_BM_JUMBO,
Stefan Chulski01d04932018-03-05 15:16:50 +0100875 MVPP2_BM_POOLS_NUM
Marcin Wojtas3f518502014-07-10 16:52:13 -0300876};
877
Stefan Chulski01d04932018-03-05 15:16:50 +0100878static struct {
879 int pkt_size;
880 int buf_num;
881} mvpp2_pools[MVPP2_BM_POOLS_NUM];
882
Miquel Raynal118d6292017-11-06 22:56:53 +0100883/* GMAC MIB Counters register definitions */
884#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
885#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
886#define MVPP22_MIB_COUNTERS_OFFSET 0x0
887#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
888
889#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
890#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
891#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
892#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
893#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
894#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
895#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
896#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
897#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
898#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
899#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
900#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
901#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
902#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
903#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
904#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
905#define MVPP2_MIB_FC_SENT 0x54
906#define MVPP2_MIB_FC_RCVD 0x58
907#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
908#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
909#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
910#define MVPP2_MIB_OVERSIZE_RCVD 0x68
911#define MVPP2_MIB_JABBER_RCVD 0x6c
912#define MVPP2_MIB_MAC_RCV_ERROR 0x70
913#define MVPP2_MIB_BAD_CRC_EVENT 0x74
914#define MVPP2_MIB_COLLISION 0x78
915#define MVPP2_MIB_LATE_COLLISION 0x7c
916
917#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
918
Marcin Wojtas3f518502014-07-10 16:52:13 -0300919/* Definitions */
920
921/* Shared Packet Processor resources */
922struct mvpp2 {
923 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300924 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100925 void __iomem *iface_base;
926
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200927 /* On PPv2.2, each "software thread" can access the base
928 * register through a separate address space, each 64 KB apart
929 * from each other. Typically, such address spaces will be
930 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100931 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200932 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300933
Antoine Ténartf84bf382017-08-22 19:08:27 +0200934 /* On PPv2.2, some port control registers are located into the system
935 * controller space. These registers are accessible through a regmap.
936 */
937 struct regmap *sysctrl_base;
938
Marcin Wojtas3f518502014-07-10 16:52:13 -0300939 /* Common clocks */
940 struct clk *pp_clk;
941 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100942 struct clk *mg_clk;
Gregory CLEMENT4792ea02017-09-29 14:27:39 +0200943 struct clk *axi_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300944
945 /* List of pointers to port structures */
Miquel Raynal118d6292017-11-06 22:56:53 +0100946 int port_count;
Marcin Wojtasbf147152018-01-18 13:31:42 +0100947 struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300948
949 /* Aggregated TXQs */
950 struct mvpp2_tx_queue *aggr_txqs;
951
952 /* BM pools */
953 struct mvpp2_bm_pool *bm_pools;
954
955 /* PRS shadow table */
956 struct mvpp2_prs_shadow *prs_shadow;
957 /* PRS auxiliary table for double vlan entries control */
958 bool *prs_double_vlans;
959
960 /* Tclk value */
961 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100962
963 /* HW version */
964 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100965
966 /* Maximum number of RXQs per port */
967 unsigned int max_port_rxqs;
Miquel Raynal118d6292017-11-06 22:56:53 +0100968
Miquel Raynale5c500e2017-11-08 08:59:40 +0100969 /* Workqueue to gather hardware statistics */
Miquel Raynal118d6292017-11-06 22:56:53 +0100970 char queue_name[30];
971 struct workqueue_struct *stats_queue;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300972};
973
974struct mvpp2_pcpu_stats {
975 struct u64_stats_sync syncp;
976 u64 rx_packets;
977 u64 rx_bytes;
978 u64 tx_packets;
979 u64 tx_bytes;
980};
981
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200982/* Per-CPU port control */
983struct mvpp2_port_pcpu {
984 struct hrtimer tx_done_timer;
985 bool timer_scheduled;
986 /* Tasklet for egress finalization */
987 struct tasklet_struct tx_done_tasklet;
988};
989
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200990struct mvpp2_queue_vector {
991 int irq;
992 struct napi_struct napi;
993 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
994 int sw_thread_id;
995 u16 sw_thread_mask;
996 int first_rxq;
997 int nrxqs;
998 u32 pending_cause_rx;
999 struct mvpp2_port *port;
1000};
1001
Marcin Wojtas3f518502014-07-10 16:52:13 -03001002struct mvpp2_port {
1003 u8 id;
1004
Thomas Petazzonia7868412017-03-07 16:53:13 +01001005 /* Index of the port from the "group of ports" complex point
1006 * of view
1007 */
1008 int gop_id;
1009
Antoine Tenartfd3651b2017-09-01 11:04:54 +02001010 int link_irq;
1011
Marcin Wojtas3f518502014-07-10 16:52:13 -03001012 struct mvpp2 *priv;
1013
Marcin Wojtas24812222018-01-18 13:31:43 +01001014 /* Firmware node associated to the port */
1015 struct fwnode_handle *fwnode;
1016
Marcin Wojtas3f518502014-07-10 16:52:13 -03001017 /* Per-port registers' base address */
1018 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +01001019 void __iomem *stats_base;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001020
1021 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02001022 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001023 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02001024 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001025 struct net_device *dev;
1026
1027 int pkt_size;
1028
Marcin Wojtasedc660f2015-08-06 19:00:30 +02001029 /* Per-CPU port control */
1030 struct mvpp2_port_pcpu __percpu *pcpu;
1031
Marcin Wojtas3f518502014-07-10 16:52:13 -03001032 /* Flags */
1033 unsigned long flags;
1034
1035 u16 tx_ring_size;
1036 u16 rx_ring_size;
1037 struct mvpp2_pcpu_stats __percpu *stats;
Miquel Raynal118d6292017-11-06 22:56:53 +01001038 u64 *ethtool_stats;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001039
Miquel Raynale5c500e2017-11-08 08:59:40 +01001040 /* Per-port work and its lock to gather hardware statistics */
1041 struct mutex gather_stats_lock;
1042 struct delayed_work stats_work;
1043
Marcin Wojtas3f518502014-07-10 16:52:13 -03001044 phy_interface_t phy_interface;
1045 struct device_node *phy_node;
Antoine Tenart542897d2017-08-30 10:29:15 +02001046 struct phy *comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001047 unsigned int link;
1048 unsigned int duplex;
1049 unsigned int speed;
1050
1051 struct mvpp2_bm_pool *pool_long;
1052 struct mvpp2_bm_pool *pool_short;
1053
1054 /* Index of first port's physical RXQ */
1055 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02001056
1057 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1058 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001059 bool has_tx_irqs;
1060
1061 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001062};
1063
1064/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1065 * layout of the transmit and reception DMA descriptors, and their
1066 * layout is therefore defined by the hardware design
1067 */
1068
1069#define MVPP2_TXD_L3_OFF_SHIFT 0
1070#define MVPP2_TXD_IP_HLEN_SHIFT 8
1071#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1072#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1073#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1074#define MVPP2_TXD_PADDING_DISABLE BIT(23)
1075#define MVPP2_TXD_L4_UDP BIT(24)
1076#define MVPP2_TXD_L3_IP6 BIT(26)
1077#define MVPP2_TXD_L_DESC BIT(28)
1078#define MVPP2_TXD_F_DESC BIT(29)
1079
1080#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1081#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1082#define MVPP2_RXD_ERR_CRC 0x0
1083#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1084#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1085#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1086#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1087#define MVPP2_RXD_HWF_SYNC BIT(21)
1088#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1089#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1090#define MVPP2_RXD_L4_TCP BIT(25)
1091#define MVPP2_RXD_L4_UDP BIT(26)
1092#define MVPP2_RXD_L3_IP4 BIT(28)
1093#define MVPP2_RXD_L3_IP6 BIT(30)
1094#define MVPP2_RXD_BUF_HDR BIT(31)
1095
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001096/* HW TX descriptor for PPv2.1 */
1097struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001098 u32 command; /* Options used by HW for packet transmitting.*/
1099 u8 packet_offset; /* the offset from the buffer beginning */
1100 u8 phys_txq; /* destination queue ID */
1101 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001102 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001103 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1104 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1105 u32 reserved2; /* reserved (for future use) */
1106};
1107
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001108/* HW RX descriptor for PPv2.1 */
1109struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001110 u32 status; /* info about received packet */
1111 u16 reserved1; /* parser_info (for future use, PnC) */
1112 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001113 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001114 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1115 u16 reserved2; /* gem_port_id (for future use, PON) */
1116 u16 reserved3; /* csum_l4 (for future use, PnC) */
1117 u8 reserved4; /* bm_qset (for future use, BM) */
1118 u8 reserved5;
1119 u16 reserved6; /* classify_info (for future use, PnC) */
1120 u32 reserved7; /* flow_id (for future use, PnC) */
1121 u32 reserved8;
1122};
1123
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001124/* HW TX descriptor for PPv2.2 */
1125struct mvpp22_tx_desc {
1126 u32 command;
1127 u8 packet_offset;
1128 u8 phys_txq;
1129 u16 data_size;
1130 u64 reserved1;
1131 u64 buf_dma_addr_ptp;
1132 u64 buf_cookie_misc;
1133};
1134
1135/* HW RX descriptor for PPv2.2 */
1136struct mvpp22_rx_desc {
1137 u32 status;
1138 u16 reserved1;
1139 u16 data_size;
1140 u32 reserved2;
1141 u32 reserved3;
1142 u64 buf_dma_addr_key_hash;
1143 u64 buf_cookie_misc;
1144};
1145
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001146/* Opaque type used by the driver to manipulate the HW TX and RX
1147 * descriptors
1148 */
1149struct mvpp2_tx_desc {
1150 union {
1151 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001152 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001153 };
1154};
1155
1156struct mvpp2_rx_desc {
1157 union {
1158 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001159 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001160 };
1161};
1162
Thomas Petazzoni83544912016-12-21 11:28:49 +01001163struct mvpp2_txq_pcpu_buf {
1164 /* Transmitted SKB */
1165 struct sk_buff *skb;
1166
1167 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001168 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +01001169
1170 /* Size transmitted */
1171 size_t size;
1172};
1173
Marcin Wojtas3f518502014-07-10 16:52:13 -03001174/* Per-CPU Tx queue control */
1175struct mvpp2_txq_pcpu {
1176 int cpu;
1177
1178 /* Number of Tx DMA descriptors in the descriptor ring */
1179 int size;
1180
1181 /* Number of currently used Tx DMA descriptor in the
1182 * descriptor ring
1183 */
1184 int count;
1185
Antoine Tenart1d17db02017-10-30 11:23:31 +01001186 int wake_threshold;
1187 int stop_threshold;
1188
Marcin Wojtas3f518502014-07-10 16:52:13 -03001189 /* Number of Tx DMA descriptors reserved for each CPU */
1190 int reserved_num;
1191
Thomas Petazzoni83544912016-12-21 11:28:49 +01001192 /* Infos about transmitted buffers */
1193 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001194
Marcin Wojtas3f518502014-07-10 16:52:13 -03001195 /* Index of last TX DMA descriptor that was inserted */
1196 int txq_put_index;
1197
1198 /* Index of the TX DMA descriptor to be cleaned up */
1199 int txq_get_index;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02001200
1201 /* DMA buffer for TSO headers */
1202 char *tso_headers;
1203 dma_addr_t tso_headers_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001204};
1205
1206struct mvpp2_tx_queue {
1207 /* Physical number of this Tx queue */
1208 u8 id;
1209
1210 /* Logical number of this Tx queue */
1211 u8 log_id;
1212
1213 /* Number of Tx DMA descriptors in the descriptor ring */
1214 int size;
1215
1216 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1217 int count;
1218
1219 /* Per-CPU control of physical Tx queues */
1220 struct mvpp2_txq_pcpu __percpu *pcpu;
1221
Marcin Wojtas3f518502014-07-10 16:52:13 -03001222 u32 done_pkts_coal;
1223
1224 /* Virtual address of thex Tx DMA descriptors array */
1225 struct mvpp2_tx_desc *descs;
1226
1227 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001228 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001229
1230 /* Index of the last Tx DMA descriptor */
1231 int last_desc;
1232
1233 /* Index of the next Tx DMA descriptor to process */
1234 int next_desc_to_proc;
1235};
1236
1237struct mvpp2_rx_queue {
1238 /* RX queue number, in the range 0-31 for physical RXQs */
1239 u8 id;
1240
1241 /* Num of rx descriptors in the rx descriptor ring */
1242 int size;
1243
1244 u32 pkts_coal;
1245 u32 time_coal;
1246
1247 /* Virtual address of the RX DMA descriptors array */
1248 struct mvpp2_rx_desc *descs;
1249
1250 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001251 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001252
1253 /* Index of the last RX DMA descriptor */
1254 int last_desc;
1255
1256 /* Index of the next RX DMA descriptor to process */
1257 int next_desc_to_proc;
1258
1259 /* ID of port to which physical RXQ is mapped */
1260 int port;
1261
1262 /* Port's logic RXQ number to which physical RXQ is mapped */
1263 int logic_rxq;
1264};
1265
1266union mvpp2_prs_tcam_entry {
1267 u32 word[MVPP2_PRS_TCAM_WORDS];
1268 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1269};
1270
1271union mvpp2_prs_sram_entry {
1272 u32 word[MVPP2_PRS_SRAM_WORDS];
1273 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1274};
1275
1276struct mvpp2_prs_entry {
1277 u32 index;
1278 union mvpp2_prs_tcam_entry tcam;
1279 union mvpp2_prs_sram_entry sram;
1280};
1281
1282struct mvpp2_prs_shadow {
1283 bool valid;
1284 bool finish;
1285
1286 /* Lookup ID */
1287 int lu;
1288
1289 /* User defined offset */
1290 int udf;
1291
1292 /* Result info */
1293 u32 ri;
1294 u32 ri_mask;
1295};
1296
1297struct mvpp2_cls_flow_entry {
1298 u32 index;
1299 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1300};
1301
1302struct mvpp2_cls_lookup_entry {
1303 u32 lkpid;
1304 u32 way;
1305 u32 data;
1306};
1307
1308struct mvpp2_bm_pool {
1309 /* Pool number in the range 0-7 */
1310 int id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001311
1312 /* Buffer Pointers Pool External (BPPE) size */
1313 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001314 /* BPPE size in bytes */
1315 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001316 /* Number of buffers for this pool */
1317 int buf_num;
1318 /* Pool buffer size */
1319 int buf_size;
1320 /* Packet size */
1321 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001322 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001323
1324 /* BPPE virtual base address */
1325 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001326 /* BPPE DMA base address */
1327 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001328
1329 /* Ports using BM pool */
1330 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001331};
1332
Antoine Tenart20920262017-10-23 15:24:30 +02001333#define IS_TSO_HEADER(txq_pcpu, addr) \
1334 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1335 (addr) < (txq_pcpu)->tso_headers_dma + \
1336 (txq_pcpu)->size * TSO_HEADER_SIZE)
1337
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001338/* Queue modes */
1339#define MVPP2_QDIST_SINGLE_MODE 0
1340#define MVPP2_QDIST_MULTI_MODE 1
1341
1342static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1343
1344module_param(queue_mode, int, 0444);
1345MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1346
Marcin Wojtas3f518502014-07-10 16:52:13 -03001347#define MVPP2_DRIVER_NAME "mvpp2"
1348#define MVPP2_DRIVER_VERSION "1.0"
1349
1350/* Utility/helper methods */
1351
1352static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1353{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001354 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001355}
1356
1357static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1358{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001359 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001360}
1361
1362/* These accessors should be used to access:
1363 *
1364 * - per-CPU registers, where each CPU has its own copy of the
1365 * register.
1366 *
1367 * MVPP2_BM_VIRT_ALLOC_REG
1368 * MVPP2_BM_ADDR_HIGH_ALLOC
1369 * MVPP22_BM_ADDR_HIGH_RLS_REG
1370 * MVPP2_BM_VIRT_RLS_REG
1371 * MVPP2_ISR_RX_TX_CAUSE_REG
1372 * MVPP2_ISR_RX_TX_MASK_REG
1373 * MVPP2_TXQ_NUM_REG
1374 * MVPP2_AGGR_TXQ_UPDATE_REG
1375 * MVPP2_TXQ_RSVD_REQ_REG
1376 * MVPP2_TXQ_RSVD_RSLT_REG
1377 * MVPP2_TXQ_SENT_REG
1378 * MVPP2_RXQ_NUM_REG
1379 *
1380 * - global registers that must be accessed through a specific CPU
1381 * window, because they are related to an access to a per-CPU
1382 * register
1383 *
1384 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1385 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1386 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1387 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1388 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1389 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1390 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1391 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1392 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1393 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1394 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1395 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1396 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1397 */
1398static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1399 u32 offset, u32 data)
1400{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001401 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001402}
1403
1404static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1405 u32 offset)
1406{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001407 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001408}
1409
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001410static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1411 struct mvpp2_tx_desc *tx_desc)
1412{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001413 if (port->priv->hw_version == MVPP21)
1414 return tx_desc->pp21.buf_dma_addr;
1415 else
1416 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001417}
1418
1419static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1420 struct mvpp2_tx_desc *tx_desc,
1421 dma_addr_t dma_addr)
1422{
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001423 dma_addr_t addr, offset;
1424
1425 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1426 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1427
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001428 if (port->priv->hw_version == MVPP21) {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001429 tx_desc->pp21.buf_dma_addr = addr;
1430 tx_desc->pp21.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001431 } else {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001432 u64 val = (u64)addr;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001433
1434 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1435 tx_desc->pp22.buf_dma_addr_ptp |= val;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001436 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001437 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001438}
1439
1440static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1441 struct mvpp2_tx_desc *tx_desc)
1442{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001443 if (port->priv->hw_version == MVPP21)
1444 return tx_desc->pp21.data_size;
1445 else
1446 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001447}
1448
1449static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1450 struct mvpp2_tx_desc *tx_desc,
1451 size_t size)
1452{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001453 if (port->priv->hw_version == MVPP21)
1454 tx_desc->pp21.data_size = size;
1455 else
1456 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001457}
1458
1459static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1460 struct mvpp2_tx_desc *tx_desc,
1461 unsigned int txq)
1462{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001463 if (port->priv->hw_version == MVPP21)
1464 tx_desc->pp21.phys_txq = txq;
1465 else
1466 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001467}
1468
1469static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1470 struct mvpp2_tx_desc *tx_desc,
1471 unsigned int command)
1472{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001473 if (port->priv->hw_version == MVPP21)
1474 tx_desc->pp21.command = command;
1475 else
1476 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001477}
1478
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001479static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1480 struct mvpp2_tx_desc *tx_desc)
1481{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001482 if (port->priv->hw_version == MVPP21)
1483 return tx_desc->pp21.packet_offset;
1484 else
1485 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001486}
1487
1488static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1489 struct mvpp2_rx_desc *rx_desc)
1490{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001491 if (port->priv->hw_version == MVPP21)
1492 return rx_desc->pp21.buf_dma_addr;
1493 else
1494 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001495}
1496
1497static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1498 struct mvpp2_rx_desc *rx_desc)
1499{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001500 if (port->priv->hw_version == MVPP21)
1501 return rx_desc->pp21.buf_cookie;
1502 else
1503 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001504}
1505
1506static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1507 struct mvpp2_rx_desc *rx_desc)
1508{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001509 if (port->priv->hw_version == MVPP21)
1510 return rx_desc->pp21.data_size;
1511 else
1512 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001513}
1514
1515static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1516 struct mvpp2_rx_desc *rx_desc)
1517{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001518 if (port->priv->hw_version == MVPP21)
1519 return rx_desc->pp21.status;
1520 else
1521 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001522}
1523
Marcin Wojtas3f518502014-07-10 16:52:13 -03001524static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1525{
1526 txq_pcpu->txq_get_index++;
1527 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1528 txq_pcpu->txq_get_index = 0;
1529}
1530
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001531static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1532 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001533 struct sk_buff *skb,
1534 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001535{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001536 struct mvpp2_txq_pcpu_buf *tx_buf =
1537 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1538 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001539 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1540 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1541 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001542 txq_pcpu->txq_put_index++;
1543 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1544 txq_pcpu->txq_put_index = 0;
1545}
1546
1547/* Get number of physical egress port */
1548static inline int mvpp2_egress_port(struct mvpp2_port *port)
1549{
1550 return MVPP2_MAX_TCONT + port->id;
1551}
1552
1553/* Get number of physical TXQ */
1554static inline int mvpp2_txq_phys(int port, int txq)
1555{
1556 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1557}
1558
1559/* Parser configuration routines */
1560
1561/* Update parser tcam and sram hw entries */
1562static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1563{
1564 int i;
1565
1566 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1567 return -EINVAL;
1568
1569 /* Clear entry invalidation bit */
1570 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1571
1572 /* Write tcam index - indirect access */
1573 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1574 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1575 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1576
1577 /* Write sram index - indirect access */
1578 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1579 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1580 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1581
1582 return 0;
1583}
1584
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001585/* Initialize tcam entry from hw */
1586static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
1587 struct mvpp2_prs_entry *pe, int tid)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001588{
1589 int i;
1590
1591 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1592 return -EINVAL;
1593
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001594 memset(pe, 0, sizeof(*pe));
1595 pe->index = tid;
1596
Marcin Wojtas3f518502014-07-10 16:52:13 -03001597 /* Write tcam index - indirect access */
1598 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1599
1600 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1601 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1602 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1603 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1604
1605 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1606 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1607
1608 /* Write sram index - indirect access */
1609 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1610 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1611 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1612
1613 return 0;
1614}
1615
1616/* Invalidate tcam hw entry */
1617static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1618{
1619 /* Write index - indirect access */
1620 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1621 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1622 MVPP2_PRS_TCAM_INV_MASK);
1623}
1624
1625/* Enable shadow table entry and set its lookup ID */
1626static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1627{
1628 priv->prs_shadow[index].valid = true;
1629 priv->prs_shadow[index].lu = lu;
1630}
1631
1632/* Update ri fields in shadow table entry */
1633static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1634 unsigned int ri, unsigned int ri_mask)
1635{
1636 priv->prs_shadow[index].ri_mask = ri_mask;
1637 priv->prs_shadow[index].ri = ri;
1638}
1639
1640/* Update lookup field in tcam sw entry */
1641static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1642{
1643 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1644
1645 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1646 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1647}
1648
1649/* Update mask for single port in tcam sw entry */
1650static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1651 unsigned int port, bool add)
1652{
1653 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1654
1655 if (add)
1656 pe->tcam.byte[enable_off] &= ~(1 << port);
1657 else
1658 pe->tcam.byte[enable_off] |= 1 << port;
1659}
1660
1661/* Update port map in tcam sw entry */
1662static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1663 unsigned int ports)
1664{
1665 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1666 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1667
1668 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1669 pe->tcam.byte[enable_off] &= ~port_mask;
1670 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1671}
1672
1673/* Obtain port map from tcam sw entry */
1674static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1675{
1676 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1677
1678 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1679}
1680
1681/* Set byte of data and its enable bits in tcam sw entry */
1682static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1683 unsigned int offs, unsigned char byte,
1684 unsigned char enable)
1685{
1686 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1687 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1688}
1689
1690/* Get byte of data and its enable bits from tcam sw entry */
1691static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1692 unsigned int offs, unsigned char *byte,
1693 unsigned char *enable)
1694{
1695 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1696 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1697}
1698
1699/* Compare tcam data bytes with a pattern */
1700static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1701 u16 data)
1702{
1703 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1704 u16 tcam_data;
1705
Antoine Tenartef4816f2017-10-24 11:41:26 +02001706 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
Marcin Wojtas3f518502014-07-10 16:52:13 -03001707 if (tcam_data != data)
1708 return false;
1709 return true;
1710}
1711
1712/* Update ai bits in tcam sw entry */
1713static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1714 unsigned int bits, unsigned int enable)
1715{
1716 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1717
1718 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1719
1720 if (!(enable & BIT(i)))
1721 continue;
1722
1723 if (bits & BIT(i))
1724 pe->tcam.byte[ai_idx] |= 1 << i;
1725 else
1726 pe->tcam.byte[ai_idx] &= ~(1 << i);
1727 }
1728
1729 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1730}
1731
1732/* Get ai bits from tcam sw entry */
1733static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1734{
1735 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1736}
1737
1738/* Set ethertype in tcam sw entry */
1739static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1740 unsigned short ethertype)
1741{
1742 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1743 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1744}
1745
Maxime Chevallier56beda32018-02-28 10:14:13 +01001746/* Set vid in tcam sw entry */
1747static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1748 unsigned short vid)
1749{
1750 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1751 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1752}
1753
Marcin Wojtas3f518502014-07-10 16:52:13 -03001754/* Set bits in sram sw entry */
1755static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1756 int val)
1757{
1758 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1759}
1760
1761/* Clear bits in sram sw entry */
1762static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1763 int val)
1764{
1765 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1766}
1767
1768/* Update ri bits in sram sw entry */
1769static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1770 unsigned int bits, unsigned int mask)
1771{
1772 unsigned int i;
1773
1774 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1775 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1776
1777 if (!(mask & BIT(i)))
1778 continue;
1779
1780 if (bits & BIT(i))
1781 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1782 else
1783 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1784
1785 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1786 }
1787}
1788
1789/* Obtain ri bits from sram sw entry */
1790static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1791{
1792 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1793}
1794
1795/* Update ai bits in sram sw entry */
1796static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1797 unsigned int bits, unsigned int mask)
1798{
1799 unsigned int i;
1800 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1801
1802 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1803
1804 if (!(mask & BIT(i)))
1805 continue;
1806
1807 if (bits & BIT(i))
1808 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1809 else
1810 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1811
1812 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1813 }
1814}
1815
1816/* Read ai bits from sram sw entry */
1817static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1818{
1819 u8 bits;
1820 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1821 int ai_en_off = ai_off + 1;
1822 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1823
1824 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1825 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1826
1827 return bits;
1828}
1829
1830/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1831 * lookup interation
1832 */
1833static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1834 unsigned int lu)
1835{
1836 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1837
1838 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1839 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1840 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1841}
1842
1843/* In the sram sw entry set sign and value of the next lookup offset
1844 * and the offset value generated to the classifier
1845 */
1846static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1847 unsigned int op)
1848{
1849 /* Set sign */
1850 if (shift < 0) {
1851 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1852 shift = 0 - shift;
1853 } else {
1854 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1855 }
1856
1857 /* Set value */
1858 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1859 (unsigned char)shift;
1860
1861 /* Reset and set operation */
1862 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1863 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1864 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1865
1866 /* Set base offset as current */
1867 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1868}
1869
1870/* In the sram sw entry set sign and value of the user defined offset
1871 * generated to the classifier
1872 */
1873static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1874 unsigned int type, int offset,
1875 unsigned int op)
1876{
1877 /* Set sign */
1878 if (offset < 0) {
1879 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1880 offset = 0 - offset;
1881 } else {
1882 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1883 }
1884
1885 /* Set value */
1886 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1887 MVPP2_PRS_SRAM_UDF_MASK);
1888 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1889 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1890 MVPP2_PRS_SRAM_UDF_BITS)] &=
1891 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1892 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1893 MVPP2_PRS_SRAM_UDF_BITS)] |=
1894 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1895
1896 /* Set offset type */
1897 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1898 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1899 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1900
1901 /* Set offset operation */
1902 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1903 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1904 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1905
1906 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1907 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1908 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1909 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1910
1911 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1912 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1913 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1914
1915 /* Set base offset as current */
1916 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1917}
1918
1919/* Find parser flow entry */
1920static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1921{
1922 struct mvpp2_prs_entry *pe;
1923 int tid;
1924
1925 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1926 if (!pe)
1927 return NULL;
1928 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1929
1930 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1931 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1932 u8 bits;
1933
1934 if (!priv->prs_shadow[tid].valid ||
1935 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1936 continue;
1937
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001938 mvpp2_prs_init_from_hw(priv, pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001939 bits = mvpp2_prs_sram_ai_get(pe);
1940
1941 /* Sram store classification lookup ID in AI bits [5:0] */
1942 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1943 return pe;
1944 }
1945 kfree(pe);
1946
1947 return NULL;
1948}
1949
1950/* Return first free tcam index, seeking from start to end */
1951static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1952 unsigned char end)
1953{
1954 int tid;
1955
1956 if (start > end)
1957 swap(start, end);
1958
1959 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1960 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1961
1962 for (tid = start; tid <= end; tid++) {
1963 if (!priv->prs_shadow[tid].valid)
1964 return tid;
1965 }
1966
1967 return -EINVAL;
1968}
1969
1970/* Enable/disable dropping all mac da's */
1971static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1972{
1973 struct mvpp2_prs_entry pe;
1974
1975 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1976 /* Entry exist - update port only */
Maxime Chevallier47e0e142018-03-26 15:34:22 +02001977 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001978 } else {
1979 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001980 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001981 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1982 pe.index = MVPP2_PE_DROP_ALL;
1983
1984 /* Non-promiscuous mode for all ports - DROP unknown packets */
1985 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1986 MVPP2_PRS_RI_DROP_MASK);
1987
1988 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1989 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1990
1991 /* Update shadow table */
1992 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1993
1994 /* Mask all ports */
1995 mvpp2_prs_tcam_port_map_set(&pe, 0);
1996 }
1997
1998 /* Update port mask */
1999 mvpp2_prs_tcam_port_set(&pe, port, add);
2000
2001 mvpp2_prs_hw_write(priv, &pe);
2002}
2003
Maxime Chevallier10fea262018-03-07 15:18:04 +01002004/* Set port to unicast or multicast promiscuous mode */
2005static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
2006 enum mvpp2_prs_l2_cast l2_cast, bool add)
Marcin Wojtas3f518502014-07-10 16:52:13 -03002007{
2008 struct mvpp2_prs_entry pe;
Maxime Chevallier10fea262018-03-07 15:18:04 +01002009 unsigned char cast_match;
2010 unsigned int ri;
2011 int tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002012
Maxime Chevallier10fea262018-03-07 15:18:04 +01002013 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
2014 cast_match = MVPP2_PRS_UCAST_VAL;
2015 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
2016 ri = MVPP2_PRS_RI_L2_UCAST;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002017 } else {
Maxime Chevallier10fea262018-03-07 15:18:04 +01002018 cast_match = MVPP2_PRS_MCAST_VAL;
2019 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
2020 ri = MVPP2_PRS_RI_L2_MCAST;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002021 }
2022
Maxime Chevallier10fea262018-03-07 15:18:04 +01002023 /* promiscuous mode - Accept unknown unicast or multicast packets */
2024 if (priv->prs_shadow[tid].valid) {
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002025 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002026 } else {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002027 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002028 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
Maxime Chevallier10fea262018-03-07 15:18:04 +01002029 pe.index = tid;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002030
2031 /* Continue - set next lookup */
2032 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2033
2034 /* Set result info bits */
Maxime Chevallier10fea262018-03-07 15:18:04 +01002035 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002036
Maxime Chevallier10fea262018-03-07 15:18:04 +01002037 /* Match UC or MC addresses */
2038 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
2039 MVPP2_PRS_CAST_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002040
2041 /* Shift to ethertype */
2042 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2043 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2044
2045 /* Mask all ports */
2046 mvpp2_prs_tcam_port_map_set(&pe, 0);
2047
2048 /* Update shadow table */
2049 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2050 }
2051
2052 /* Update port mask */
2053 mvpp2_prs_tcam_port_set(&pe, port, add);
2054
2055 mvpp2_prs_hw_write(priv, &pe);
2056}
2057
2058/* Set entry for dsa packets */
2059static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2060 bool tagged, bool extend)
2061{
2062 struct mvpp2_prs_entry pe;
2063 int tid, shift;
2064
2065 if (extend) {
2066 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2067 shift = 8;
2068 } else {
2069 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2070 shift = 4;
2071 }
2072
2073 if (priv->prs_shadow[tid].valid) {
2074 /* Entry exist - update port only */
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002075 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002076 } else {
2077 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002078 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002079 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2080 pe.index = tid;
2081
Marcin Wojtas3f518502014-07-10 16:52:13 -03002082 /* Update shadow table */
2083 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2084
2085 if (tagged) {
2086 /* Set tagged bit in DSA tag */
2087 mvpp2_prs_tcam_data_byte_set(&pe, 0,
Maxime Chevallier56beda32018-02-28 10:14:13 +01002088 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2089 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2090
2091 /* Set ai bits for next iteration */
2092 if (extend)
2093 mvpp2_prs_sram_ai_update(&pe, 1,
2094 MVPP2_PRS_SRAM_AI_MASK);
2095 else
2096 mvpp2_prs_sram_ai_update(&pe, 0,
2097 MVPP2_PRS_SRAM_AI_MASK);
2098
2099 /* If packet is tagged continue check vid filtering */
2100 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002101 } else {
Maxime Chevallier56beda32018-02-28 10:14:13 +01002102 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2103 mvpp2_prs_sram_shift_set(&pe, shift,
2104 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2105
Marcin Wojtas3f518502014-07-10 16:52:13 -03002106 /* Set result info bits to 'no vlans' */
2107 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2108 MVPP2_PRS_RI_VLAN_MASK);
2109 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2110 }
2111
2112 /* Mask all ports */
2113 mvpp2_prs_tcam_port_map_set(&pe, 0);
2114 }
2115
2116 /* Update port mask */
2117 mvpp2_prs_tcam_port_set(&pe, port, add);
2118
2119 mvpp2_prs_hw_write(priv, &pe);
2120}
2121
2122/* Set entry for dsa ethertype */
2123static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2124 bool add, bool tagged, bool extend)
2125{
2126 struct mvpp2_prs_entry pe;
2127 int tid, shift, port_mask;
2128
2129 if (extend) {
2130 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2131 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2132 port_mask = 0;
2133 shift = 8;
2134 } else {
2135 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2136 MVPP2_PE_ETYPE_DSA_UNTAGGED;
2137 port_mask = MVPP2_PRS_PORT_MASK;
2138 shift = 4;
2139 }
2140
2141 if (priv->prs_shadow[tid].valid) {
2142 /* Entry exist - update port only */
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002143 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002144 } else {
2145 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002146 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002147 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2148 pe.index = tid;
2149
2150 /* Set ethertype */
2151 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2152 mvpp2_prs_match_etype(&pe, 2, 0);
2153
2154 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2155 MVPP2_PRS_RI_DSA_MASK);
2156 /* Shift ethertype + 2 byte reserved + tag*/
2157 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2158 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2159
2160 /* Update shadow table */
2161 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2162
2163 if (tagged) {
2164 /* Set tagged bit in DSA tag */
2165 mvpp2_prs_tcam_data_byte_set(&pe,
2166 MVPP2_ETH_TYPE_LEN + 2 + 3,
2167 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2168 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2169 /* Clear all ai bits for next iteration */
2170 mvpp2_prs_sram_ai_update(&pe, 0,
2171 MVPP2_PRS_SRAM_AI_MASK);
2172 /* If packet is tagged continue check vlans */
2173 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2174 } else {
2175 /* Set result info bits to 'no vlans' */
2176 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2177 MVPP2_PRS_RI_VLAN_MASK);
2178 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2179 }
2180 /* Mask/unmask all ports, depending on dsa type */
2181 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2182 }
2183
2184 /* Update port mask */
2185 mvpp2_prs_tcam_port_set(&pe, port, add);
2186
2187 mvpp2_prs_hw_write(priv, &pe);
2188}
2189
2190/* Search for existing single/triple vlan entry */
2191static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2192 unsigned short tpid, int ai)
2193{
2194 struct mvpp2_prs_entry *pe;
2195 int tid;
2196
2197 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2198 if (!pe)
2199 return NULL;
2200 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2201
2202 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2203 for (tid = MVPP2_PE_FIRST_FREE_TID;
2204 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2205 unsigned int ri_bits, ai_bits;
2206 bool match;
2207
2208 if (!priv->prs_shadow[tid].valid ||
2209 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2210 continue;
2211
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002212 mvpp2_prs_init_from_hw(priv, pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002213 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2214 if (!match)
2215 continue;
2216
2217 /* Get vlan type */
2218 ri_bits = mvpp2_prs_sram_ri_get(pe);
2219 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2220
2221 /* Get current ai value from tcam */
2222 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2223 /* Clear double vlan bit */
2224 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2225
2226 if (ai != ai_bits)
2227 continue;
2228
2229 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2230 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2231 return pe;
2232 }
2233 kfree(pe);
2234
2235 return NULL;
2236}
2237
2238/* Add/update single/triple vlan entry */
2239static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2240 unsigned int port_map)
2241{
2242 struct mvpp2_prs_entry *pe;
2243 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302244 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002245
2246 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2247
2248 if (!pe) {
2249 /* Create new tcam entry */
2250 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2251 MVPP2_PE_FIRST_FREE_TID);
2252 if (tid < 0)
2253 return tid;
2254
2255 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2256 if (!pe)
2257 return -ENOMEM;
2258
2259 /* Get last double vlan tid */
2260 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2261 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2262 unsigned int ri_bits;
2263
2264 if (!priv->prs_shadow[tid_aux].valid ||
2265 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2266 continue;
2267
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002268 mvpp2_prs_init_from_hw(priv, pe, tid_aux);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002269 ri_bits = mvpp2_prs_sram_ri_get(pe);
2270 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2271 MVPP2_PRS_RI_VLAN_DOUBLE)
2272 break;
2273 }
2274
Sudip Mukherjee43737472014-11-01 16:59:34 +05302275 if (tid <= tid_aux) {
2276 ret = -EINVAL;
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002277 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302278 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002279
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002280 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002281 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2282 pe->index = tid;
2283
2284 mvpp2_prs_match_etype(pe, 0, tpid);
2285
Maxime Chevallier56beda32018-02-28 10:14:13 +01002286 /* VLAN tag detected, proceed with VID filtering */
2287 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VID);
2288
Marcin Wojtas3f518502014-07-10 16:52:13 -03002289 /* Clear all ai bits for next iteration */
2290 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2291
2292 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2293 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2294 MVPP2_PRS_RI_VLAN_MASK);
2295 } else {
2296 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2297 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2298 MVPP2_PRS_RI_VLAN_MASK);
2299 }
2300 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2301
2302 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2303 }
2304 /* Update ports' mask */
2305 mvpp2_prs_tcam_port_map_set(pe, port_map);
2306
2307 mvpp2_prs_hw_write(priv, pe);
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002308free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002309 kfree(pe);
2310
Sudip Mukherjee43737472014-11-01 16:59:34 +05302311 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002312}
2313
2314/* Get first free double vlan ai number */
2315static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2316{
2317 int i;
2318
2319 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2320 if (!priv->prs_double_vlans[i])
2321 return i;
2322 }
2323
2324 return -EINVAL;
2325}
2326
2327/* Search for existing double vlan entry */
2328static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2329 unsigned short tpid1,
2330 unsigned short tpid2)
2331{
2332 struct mvpp2_prs_entry *pe;
2333 int tid;
2334
2335 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2336 if (!pe)
2337 return NULL;
2338 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2339
2340 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2341 for (tid = MVPP2_PE_FIRST_FREE_TID;
2342 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2343 unsigned int ri_mask;
2344 bool match;
2345
2346 if (!priv->prs_shadow[tid].valid ||
2347 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2348 continue;
2349
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002350 mvpp2_prs_init_from_hw(priv, pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002351
2352 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2353 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2354
2355 if (!match)
2356 continue;
2357
2358 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2359 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2360 return pe;
2361 }
2362 kfree(pe);
2363
2364 return NULL;
2365}
2366
2367/* Add or update double vlan entry */
2368static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2369 unsigned short tpid2,
2370 unsigned int port_map)
2371{
2372 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302373 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002374
2375 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2376
2377 if (!pe) {
2378 /* Create new tcam entry */
2379 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2380 MVPP2_PE_LAST_FREE_TID);
2381 if (tid < 0)
2382 return tid;
2383
2384 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2385 if (!pe)
2386 return -ENOMEM;
2387
2388 /* Set ai value for new double vlan entry */
2389 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302390 if (ai < 0) {
2391 ret = ai;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002392 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302393 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002394
2395 /* Get first single/triple vlan tid */
2396 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2397 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2398 unsigned int ri_bits;
2399
2400 if (!priv->prs_shadow[tid_aux].valid ||
2401 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2402 continue;
2403
Maxime Chevallier47e0e142018-03-26 15:34:22 +02002404 mvpp2_prs_init_from_hw(priv, pe, tid_aux);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002405 ri_bits = mvpp2_prs_sram_ri_get(pe);
2406 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2407 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2408 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2409 break;
2410 }
2411
Sudip Mukherjee43737472014-11-01 16:59:34 +05302412 if (tid >= tid_aux) {
2413 ret = -ERANGE;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002414 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302415 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002416
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002417 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002418 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2419 pe->index = tid;
2420
2421 priv->prs_double_vlans[ai] = true;
2422
2423 mvpp2_prs_match_etype(pe, 0, tpid1);
2424 mvpp2_prs_match_etype(pe, 4, tpid2);
2425
2426 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
Maxime Chevallier56beda32018-02-28 10:14:13 +01002427 /* Shift 4 bytes - skip outer vlan tag */
2428 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002429 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2430 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2431 MVPP2_PRS_RI_VLAN_MASK);
2432 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2433 MVPP2_PRS_SRAM_AI_MASK);
2434
2435 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2436 }
2437
2438 /* Update ports' mask */
2439 mvpp2_prs_tcam_port_map_set(pe, port_map);
2440 mvpp2_prs_hw_write(priv, pe);
Markus Elfringc9a7e122017-04-17 13:03:49 +02002441free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002442 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302443 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002444}
2445
2446/* IPv4 header parsing for fragmentation and L4 offset */
2447static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2448 unsigned int ri, unsigned int ri_mask)
2449{
2450 struct mvpp2_prs_entry pe;
2451 int tid;
2452
2453 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2454 (proto != IPPROTO_IGMP))
2455 return -EINVAL;
2456
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002457 /* Not fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002458 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2459 MVPP2_PE_LAST_FREE_TID);
2460 if (tid < 0)
2461 return tid;
2462
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002463 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002464 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2465 pe.index = tid;
2466
2467 /* Set next lu to IPv4 */
2468 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2469 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2470 /* Set L4 offset */
2471 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2472 sizeof(struct iphdr) - 4,
2473 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2474 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2475 MVPP2_PRS_IPV4_DIP_AI_BIT);
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002476 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2477
2478 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2479 MVPP2_PRS_TCAM_PROTO_MASK_L);
2480 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2481 MVPP2_PRS_TCAM_PROTO_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002482
2483 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2484 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2485 /* Unmask all ports */
2486 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2487
2488 /* Update shadow table and hw entry */
2489 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2490 mvpp2_prs_hw_write(priv, &pe);
2491
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002492 /* Fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002493 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2494 MVPP2_PE_LAST_FREE_TID);
2495 if (tid < 0)
2496 return tid;
2497
2498 pe.index = tid;
2499 /* Clear ri before updating */
2500 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2501 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2502 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2503
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002504 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2505 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2506
2507 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2508 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002509
2510 /* Update shadow table and hw entry */
2511 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2512 mvpp2_prs_hw_write(priv, &pe);
2513
2514 return 0;
2515}
2516
2517/* IPv4 L3 multicast or broadcast */
2518static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2519{
2520 struct mvpp2_prs_entry pe;
2521 int mask, tid;
2522
2523 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2524 MVPP2_PE_LAST_FREE_TID);
2525 if (tid < 0)
2526 return tid;
2527
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002528 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002529 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2530 pe.index = tid;
2531
2532 switch (l3_cast) {
2533 case MVPP2_PRS_L3_MULTI_CAST:
2534 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2535 MVPP2_PRS_IPV4_MC_MASK);
2536 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2537 MVPP2_PRS_RI_L3_ADDR_MASK);
2538 break;
2539 case MVPP2_PRS_L3_BROAD_CAST:
2540 mask = MVPP2_PRS_IPV4_BC_MASK;
2541 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2542 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2543 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2544 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2545 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2546 MVPP2_PRS_RI_L3_ADDR_MASK);
2547 break;
2548 default:
2549 return -EINVAL;
2550 }
2551
2552 /* Finished: go to flowid generation */
2553 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2554 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2555
2556 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2557 MVPP2_PRS_IPV4_DIP_AI_BIT);
2558 /* Unmask all ports */
2559 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2560
2561 /* Update shadow table and hw entry */
2562 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2563 mvpp2_prs_hw_write(priv, &pe);
2564
2565 return 0;
2566}
2567
2568/* Set entries for protocols over IPv6 */
2569static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2570 unsigned int ri, unsigned int ri_mask)
2571{
2572 struct mvpp2_prs_entry pe;
2573 int tid;
2574
2575 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2576 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2577 return -EINVAL;
2578
2579 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2580 MVPP2_PE_LAST_FREE_TID);
2581 if (tid < 0)
2582 return tid;
2583
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002584 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002585 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2586 pe.index = tid;
2587
2588 /* Finished: go to flowid generation */
2589 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2590 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2591 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2592 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2593 sizeof(struct ipv6hdr) - 6,
2594 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2595
2596 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2597 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2598 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2599 /* Unmask all ports */
2600 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2601
2602 /* Write HW */
2603 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2604 mvpp2_prs_hw_write(priv, &pe);
2605
2606 return 0;
2607}
2608
2609/* IPv6 L3 multicast entry */
2610static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2611{
2612 struct mvpp2_prs_entry pe;
2613 int tid;
2614
2615 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2616 return -EINVAL;
2617
2618 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2619 MVPP2_PE_LAST_FREE_TID);
2620 if (tid < 0)
2621 return tid;
2622
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002623 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002624 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2625 pe.index = tid;
2626
2627 /* Finished: go to flowid generation */
2628 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2629 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2630 MVPP2_PRS_RI_L3_ADDR_MASK);
2631 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2632 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2633 /* Shift back to IPv6 NH */
2634 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2635
2636 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2637 MVPP2_PRS_IPV6_MC_MASK);
2638 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2639 /* Unmask all ports */
2640 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2641
2642 /* Update shadow table and hw entry */
2643 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2644 mvpp2_prs_hw_write(priv, &pe);
2645
2646 return 0;
2647}
2648
2649/* Parser per-port initialization */
2650static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2651 int lu_max, int offset)
2652{
2653 u32 val;
2654
2655 /* Set lookup ID */
2656 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2657 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2658 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2659 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2660
2661 /* Set maximum number of loops for packet received from port */
2662 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2663 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2664 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2665 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2666
2667 /* Set initial offset for packet header extraction for the first
2668 * searching loop
2669 */
2670 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2671 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2672 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2673 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2674}
2675
2676/* Default flow entries initialization for all ports */
2677static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2678{
2679 struct mvpp2_prs_entry pe;
2680 int port;
2681
2682 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002683 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002684 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2685 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2686
2687 /* Mask all ports */
2688 mvpp2_prs_tcam_port_map_set(&pe, 0);
2689
2690 /* Set flow ID*/
2691 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2692 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2693
2694 /* Update shadow table and hw entry */
2695 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2696 mvpp2_prs_hw_write(priv, &pe);
2697 }
2698}
2699
2700/* Set default entry for Marvell Header field */
2701static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2702{
2703 struct mvpp2_prs_entry pe;
2704
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002705 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002706
2707 pe.index = MVPP2_PE_MH_DEFAULT;
2708 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2709 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2710 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2711 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2712
2713 /* Unmask all ports */
2714 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2715
2716 /* Update shadow table and hw entry */
2717 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2718 mvpp2_prs_hw_write(priv, &pe);
2719}
2720
2721/* Set default entires (place holder) for promiscuous, non-promiscuous and
2722 * multicast MAC addresses
2723 */
2724static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2725{
2726 struct mvpp2_prs_entry pe;
2727
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002728 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002729
2730 /* Non-promiscuous mode for all ports - DROP unknown packets */
2731 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2732 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2733
2734 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2735 MVPP2_PRS_RI_DROP_MASK);
2736 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2737 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2738
2739 /* Unmask all ports */
2740 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2741
2742 /* Update shadow table and hw entry */
2743 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2744 mvpp2_prs_hw_write(priv, &pe);
2745
Maxime Chevallier10fea262018-03-07 15:18:04 +01002746 /* Create dummy entries for drop all and promiscuous modes */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002747 mvpp2_prs_mac_drop_all_set(priv, 0, false);
Maxime Chevallier10fea262018-03-07 15:18:04 +01002748 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
2749 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002750}
2751
2752/* Set default entries for various types of dsa packets */
2753static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2754{
2755 struct mvpp2_prs_entry pe;
2756
2757 /* None tagged EDSA entry - place holder */
2758 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2759 MVPP2_PRS_EDSA);
2760
2761 /* Tagged EDSA entry - place holder */
2762 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2763
2764 /* None tagged DSA entry - place holder */
2765 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2766 MVPP2_PRS_DSA);
2767
2768 /* Tagged DSA entry - place holder */
2769 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2770
2771 /* None tagged EDSA ethertype entry - place holder*/
2772 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2773 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2774
2775 /* Tagged EDSA ethertype entry - place holder*/
2776 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2777 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2778
2779 /* None tagged DSA ethertype entry */
2780 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2781 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2782
2783 /* Tagged DSA ethertype entry */
2784 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2785 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2786
2787 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002788 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002789 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2790 pe.index = MVPP2_PE_DSA_DEFAULT;
2791 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2792
2793 /* Shift 0 bytes */
2794 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2795 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2796
2797 /* Clear all sram ai bits for next iteration */
2798 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2799
2800 /* Unmask all ports */
2801 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2802
2803 mvpp2_prs_hw_write(priv, &pe);
2804}
2805
Maxime Chevallier56beda32018-02-28 10:14:13 +01002806/* Initialize parser entries for VID filtering */
2807static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2808{
2809 struct mvpp2_prs_entry pe;
2810
2811 memset(&pe, 0, sizeof(pe));
2812
2813 /* Set default vid entry */
2814 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2815 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2816
2817 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2818
2819 /* Skip VLAN header - Set offset to 4 bytes */
2820 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2821 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2822
2823 /* Clear all ai bits for next iteration */
2824 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2825
2826 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2827
2828 /* Unmask all ports */
2829 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2830
2831 /* Update shadow table and hw entry */
2832 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2833 mvpp2_prs_hw_write(priv, &pe);
2834
2835 /* Set default vid entry for extended DSA*/
2836 memset(&pe, 0, sizeof(pe));
2837
2838 /* Set default vid entry */
2839 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2840 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2841
2842 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2843 MVPP2_PRS_EDSA_VID_AI_BIT);
2844
2845 /* Skip VLAN header - Set offset to 8 bytes */
2846 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2847 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2848
2849 /* Clear all ai bits for next iteration */
2850 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2851
2852 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2853
2854 /* Unmask all ports */
2855 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2856
2857 /* Update shadow table and hw entry */
2858 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2859 mvpp2_prs_hw_write(priv, &pe);
2860}
2861
Marcin Wojtas3f518502014-07-10 16:52:13 -03002862/* Match basic ethertypes */
2863static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2864{
2865 struct mvpp2_prs_entry pe;
2866 int tid;
2867
2868 /* Ethertype: PPPoE */
2869 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2870 MVPP2_PE_LAST_FREE_TID);
2871 if (tid < 0)
2872 return tid;
2873
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002874 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002875 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2876 pe.index = tid;
2877
2878 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2879
2880 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2884 MVPP2_PRS_RI_PPPOE_MASK);
2885
2886 /* Update shadow table and hw entry */
2887 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2888 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2889 priv->prs_shadow[pe.index].finish = false;
2890 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2891 MVPP2_PRS_RI_PPPOE_MASK);
2892 mvpp2_prs_hw_write(priv, &pe);
2893
2894 /* Ethertype: ARP */
2895 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2896 MVPP2_PE_LAST_FREE_TID);
2897 if (tid < 0)
2898 return tid;
2899
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002900 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002901 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2902 pe.index = tid;
2903
2904 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2905
2906 /* Generate flow in the next iteration*/
2907 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2908 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2909 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2910 MVPP2_PRS_RI_L3_PROTO_MASK);
2911 /* Set L3 offset */
2912 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2913 MVPP2_ETH_TYPE_LEN,
2914 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2915
2916 /* Update shadow table and hw entry */
2917 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2918 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2919 priv->prs_shadow[pe.index].finish = true;
2920 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2921 MVPP2_PRS_RI_L3_PROTO_MASK);
2922 mvpp2_prs_hw_write(priv, &pe);
2923
2924 /* Ethertype: LBTD */
2925 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2926 MVPP2_PE_LAST_FREE_TID);
2927 if (tid < 0)
2928 return tid;
2929
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002930 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002931 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2932 pe.index = tid;
2933
2934 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2935
2936 /* Generate flow in the next iteration*/
2937 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2938 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2939 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2940 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2941 MVPP2_PRS_RI_CPU_CODE_MASK |
2942 MVPP2_PRS_RI_UDF3_MASK);
2943 /* Set L3 offset */
2944 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2945 MVPP2_ETH_TYPE_LEN,
2946 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2947
2948 /* Update shadow table and hw entry */
2949 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2950 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2951 priv->prs_shadow[pe.index].finish = true;
2952 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2953 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2954 MVPP2_PRS_RI_CPU_CODE_MASK |
2955 MVPP2_PRS_RI_UDF3_MASK);
2956 mvpp2_prs_hw_write(priv, &pe);
2957
2958 /* Ethertype: IPv4 without options */
2959 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2960 MVPP2_PE_LAST_FREE_TID);
2961 if (tid < 0)
2962 return tid;
2963
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002964 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002965 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2966 pe.index = tid;
2967
2968 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2969 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2970 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2971 MVPP2_PRS_IPV4_HEAD_MASK |
2972 MVPP2_PRS_IPV4_IHL_MASK);
2973
2974 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2975 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2976 MVPP2_PRS_RI_L3_PROTO_MASK);
2977 /* Skip eth_type + 4 bytes of IP header */
2978 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2979 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2980 /* Set L3 offset */
2981 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2982 MVPP2_ETH_TYPE_LEN,
2983 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2984
2985 /* Update shadow table and hw entry */
2986 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2987 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2988 priv->prs_shadow[pe.index].finish = false;
2989 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2990 MVPP2_PRS_RI_L3_PROTO_MASK);
2991 mvpp2_prs_hw_write(priv, &pe);
2992
2993 /* Ethertype: IPv4 with options */
2994 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2995 MVPP2_PE_LAST_FREE_TID);
2996 if (tid < 0)
2997 return tid;
2998
2999 pe.index = tid;
3000
3001 /* Clear tcam data before updating */
3002 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
3003 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
3004
3005 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3006 MVPP2_PRS_IPV4_HEAD,
3007 MVPP2_PRS_IPV4_HEAD_MASK);
3008
3009 /* Clear ri before updating */
3010 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3011 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3012 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3013 MVPP2_PRS_RI_L3_PROTO_MASK);
3014
3015 /* Update shadow table and hw entry */
3016 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3017 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3018 priv->prs_shadow[pe.index].finish = false;
3019 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3020 MVPP2_PRS_RI_L3_PROTO_MASK);
3021 mvpp2_prs_hw_write(priv, &pe);
3022
3023 /* Ethertype: IPv6 without options */
3024 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3025 MVPP2_PE_LAST_FREE_TID);
3026 if (tid < 0)
3027 return tid;
3028
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003029 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003030 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3031 pe.index = tid;
3032
3033 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3034
3035 /* Skip DIP of IPV6 header */
3036 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3037 MVPP2_MAX_L3_ADDR_SIZE,
3038 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3039 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3040 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3041 MVPP2_PRS_RI_L3_PROTO_MASK);
3042 /* Set L3 offset */
3043 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3044 MVPP2_ETH_TYPE_LEN,
3045 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3046
3047 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3048 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3049 priv->prs_shadow[pe.index].finish = false;
3050 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3051 MVPP2_PRS_RI_L3_PROTO_MASK);
3052 mvpp2_prs_hw_write(priv, &pe);
3053
3054 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3055 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3056 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3057 pe.index = MVPP2_PE_ETH_TYPE_UN;
3058
3059 /* Unmask all ports */
3060 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3061
3062 /* Generate flow in the next iteration*/
3063 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3064 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3065 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3066 MVPP2_PRS_RI_L3_PROTO_MASK);
3067 /* Set L3 offset even it's unknown L3 */
3068 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3069 MVPP2_ETH_TYPE_LEN,
3070 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3071
3072 /* Update shadow table and hw entry */
3073 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3074 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3075 priv->prs_shadow[pe.index].finish = true;
3076 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3077 MVPP2_PRS_RI_L3_PROTO_MASK);
3078 mvpp2_prs_hw_write(priv, &pe);
3079
3080 return 0;
3081}
3082
3083/* Configure vlan entries and detect up to 2 successive VLAN tags.
3084 * Possible options:
3085 * 0x8100, 0x88A8
3086 * 0x8100, 0x8100
3087 * 0x8100
3088 * 0x88A8
3089 */
3090static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3091{
3092 struct mvpp2_prs_entry pe;
3093 int err;
3094
3095 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3096 MVPP2_PRS_DBL_VLANS_MAX,
3097 GFP_KERNEL);
3098 if (!priv->prs_double_vlans)
3099 return -ENOMEM;
3100
3101 /* Double VLAN: 0x8100, 0x88A8 */
3102 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3103 MVPP2_PRS_PORT_MASK);
3104 if (err)
3105 return err;
3106
3107 /* Double VLAN: 0x8100, 0x8100 */
3108 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3109 MVPP2_PRS_PORT_MASK);
3110 if (err)
3111 return err;
3112
3113 /* Single VLAN: 0x88a8 */
3114 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3115 MVPP2_PRS_PORT_MASK);
3116 if (err)
3117 return err;
3118
3119 /* Single VLAN: 0x8100 */
3120 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3121 MVPP2_PRS_PORT_MASK);
3122 if (err)
3123 return err;
3124
3125 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003126 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003127 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3128 pe.index = MVPP2_PE_VLAN_DBL;
3129
Maxime Chevallier56beda32018-02-28 10:14:13 +01003130 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3131
Marcin Wojtas3f518502014-07-10 16:52:13 -03003132 /* Clear ai for next iterations */
3133 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3134 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3135 MVPP2_PRS_RI_VLAN_MASK);
3136
3137 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3138 MVPP2_PRS_DBL_VLAN_AI_BIT);
3139 /* Unmask all ports */
3140 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3141
3142 /* Update shadow table and hw entry */
3143 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3144 mvpp2_prs_hw_write(priv, &pe);
3145
3146 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003147 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003148 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3149 pe.index = MVPP2_PE_VLAN_NONE;
3150
3151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3152 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3153 MVPP2_PRS_RI_VLAN_MASK);
3154
3155 /* Unmask all ports */
3156 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3157
3158 /* Update shadow table and hw entry */
3159 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3160 mvpp2_prs_hw_write(priv, &pe);
3161
3162 return 0;
3163}
3164
3165/* Set entries for PPPoE ethertype */
3166static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3167{
3168 struct mvpp2_prs_entry pe;
3169 int tid;
3170
3171 /* IPv4 over PPPoE with options */
3172 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3173 MVPP2_PE_LAST_FREE_TID);
3174 if (tid < 0)
3175 return tid;
3176
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003177 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003178 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3179 pe.index = tid;
3180
3181 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3182
3183 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3184 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3185 MVPP2_PRS_RI_L3_PROTO_MASK);
3186 /* Skip eth_type + 4 bytes of IP header */
3187 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3188 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3189 /* Set L3 offset */
3190 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3191 MVPP2_ETH_TYPE_LEN,
3192 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3193
3194 /* Update shadow table and hw entry */
3195 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3196 mvpp2_prs_hw_write(priv, &pe);
3197
3198 /* IPv4 over PPPoE without options */
3199 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3200 MVPP2_PE_LAST_FREE_TID);
3201 if (tid < 0)
3202 return tid;
3203
3204 pe.index = tid;
3205
3206 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3207 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3208 MVPP2_PRS_IPV4_HEAD_MASK |
3209 MVPP2_PRS_IPV4_IHL_MASK);
3210
3211 /* Clear ri before updating */
3212 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3213 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3214 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3215 MVPP2_PRS_RI_L3_PROTO_MASK);
3216
3217 /* Update shadow table and hw entry */
3218 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3219 mvpp2_prs_hw_write(priv, &pe);
3220
3221 /* IPv6 over PPPoE */
3222 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3223 MVPP2_PE_LAST_FREE_TID);
3224 if (tid < 0)
3225 return tid;
3226
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003227 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003228 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3229 pe.index = tid;
3230
3231 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3232
3233 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3234 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3235 MVPP2_PRS_RI_L3_PROTO_MASK);
3236 /* Skip eth_type + 4 bytes of IPv6 header */
3237 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3238 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3239 /* Set L3 offset */
3240 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3241 MVPP2_ETH_TYPE_LEN,
3242 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3243
3244 /* Update shadow table and hw entry */
3245 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3246 mvpp2_prs_hw_write(priv, &pe);
3247
3248 /* Non-IP over PPPoE */
3249 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3250 MVPP2_PE_LAST_FREE_TID);
3251 if (tid < 0)
3252 return tid;
3253
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003254 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003255 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3256 pe.index = tid;
3257
3258 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3259 MVPP2_PRS_RI_L3_PROTO_MASK);
3260
3261 /* Finished: go to flowid generation */
3262 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3263 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3264 /* Set L3 offset even if it's unknown L3 */
3265 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3266 MVPP2_ETH_TYPE_LEN,
3267 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3268
3269 /* Update shadow table and hw entry */
3270 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3271 mvpp2_prs_hw_write(priv, &pe);
3272
3273 return 0;
3274}
3275
3276/* Initialize entries for IPv4 */
3277static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3278{
3279 struct mvpp2_prs_entry pe;
3280 int err;
3281
3282 /* Set entries for TCP, UDP and IGMP over IPv4 */
3283 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3284 MVPP2_PRS_RI_L4_PROTO_MASK);
3285 if (err)
3286 return err;
3287
3288 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3289 MVPP2_PRS_RI_L4_PROTO_MASK);
3290 if (err)
3291 return err;
3292
3293 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3294 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3295 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3296 MVPP2_PRS_RI_CPU_CODE_MASK |
3297 MVPP2_PRS_RI_UDF3_MASK);
3298 if (err)
3299 return err;
3300
3301 /* IPv4 Broadcast */
3302 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3303 if (err)
3304 return err;
3305
3306 /* IPv4 Multicast */
3307 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3308 if (err)
3309 return err;
3310
3311 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003312 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003313 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3314 pe.index = MVPP2_PE_IP4_PROTO_UN;
3315
3316 /* Set next lu to IPv4 */
3317 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3318 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3319 /* Set L4 offset */
3320 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3321 sizeof(struct iphdr) - 4,
3322 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3323 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3324 MVPP2_PRS_IPV4_DIP_AI_BIT);
3325 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3326 MVPP2_PRS_RI_L4_PROTO_MASK);
3327
3328 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3329 /* Unmask all ports */
3330 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3331
3332 /* Update shadow table and hw entry */
3333 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3334 mvpp2_prs_hw_write(priv, &pe);
3335
3336 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003337 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003338 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3339 pe.index = MVPP2_PE_IP4_ADDR_UN;
3340
3341 /* Finished: go to flowid generation */
3342 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3343 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3344 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3345 MVPP2_PRS_RI_L3_ADDR_MASK);
3346
3347 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3348 MVPP2_PRS_IPV4_DIP_AI_BIT);
3349 /* Unmask all ports */
3350 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3351
3352 /* Update shadow table and hw entry */
3353 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3354 mvpp2_prs_hw_write(priv, &pe);
3355
3356 return 0;
3357}
3358
3359/* Initialize entries for IPv6 */
3360static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3361{
3362 struct mvpp2_prs_entry pe;
3363 int tid, err;
3364
3365 /* Set entries for TCP, UDP and ICMP over IPv6 */
3366 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3367 MVPP2_PRS_RI_L4_TCP,
3368 MVPP2_PRS_RI_L4_PROTO_MASK);
3369 if (err)
3370 return err;
3371
3372 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3373 MVPP2_PRS_RI_L4_UDP,
3374 MVPP2_PRS_RI_L4_PROTO_MASK);
3375 if (err)
3376 return err;
3377
3378 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3379 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3380 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3381 MVPP2_PRS_RI_CPU_CODE_MASK |
3382 MVPP2_PRS_RI_UDF3_MASK);
3383 if (err)
3384 return err;
3385
3386 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3387 /* Result Info: UDF7=1, DS lite */
3388 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3389 MVPP2_PRS_RI_UDF7_IP6_LITE,
3390 MVPP2_PRS_RI_UDF7_MASK);
3391 if (err)
3392 return err;
3393
3394 /* IPv6 multicast */
3395 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3396 if (err)
3397 return err;
3398
3399 /* Entry for checking hop limit */
3400 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3401 MVPP2_PE_LAST_FREE_TID);
3402 if (tid < 0)
3403 return tid;
3404
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003405 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003406 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3407 pe.index = tid;
3408
3409 /* Finished: go to flowid generation */
3410 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3411 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3412 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3413 MVPP2_PRS_RI_DROP_MASK,
3414 MVPP2_PRS_RI_L3_PROTO_MASK |
3415 MVPP2_PRS_RI_DROP_MASK);
3416
3417 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3418 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3419 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3420
3421 /* Update shadow table and hw entry */
3422 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3423 mvpp2_prs_hw_write(priv, &pe);
3424
3425 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003426 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003427 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3428 pe.index = MVPP2_PE_IP6_PROTO_UN;
3429
3430 /* Finished: go to flowid generation */
3431 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3432 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3433 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3434 MVPP2_PRS_RI_L4_PROTO_MASK);
3435 /* Set L4 offset relatively to our current place */
3436 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3437 sizeof(struct ipv6hdr) - 4,
3438 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3439
3440 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3441 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3442 /* Unmask all ports */
3443 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3444
3445 /* Update shadow table and hw entry */
3446 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3447 mvpp2_prs_hw_write(priv, &pe);
3448
3449 /* Default IPv6 entry for unknown ext protocols */
3450 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3451 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3452 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3453
3454 /* Finished: go to flowid generation */
3455 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3456 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3457 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3458 MVPP2_PRS_RI_L4_PROTO_MASK);
3459
3460 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3461 MVPP2_PRS_IPV6_EXT_AI_BIT);
3462 /* Unmask all ports */
3463 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3464
3465 /* Update shadow table and hw entry */
3466 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3467 mvpp2_prs_hw_write(priv, &pe);
3468
3469 /* Default IPv6 entry for unicast address */
3470 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3471 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3472 pe.index = MVPP2_PE_IP6_ADDR_UN;
3473
3474 /* Finished: go to IPv6 again */
3475 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3476 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3477 MVPP2_PRS_RI_L3_ADDR_MASK);
3478 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3479 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3480 /* Shift back to IPV6 NH */
3481 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3482
3483 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3484 /* Unmask all ports */
3485 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3486
3487 /* Update shadow table and hw entry */
3488 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3489 mvpp2_prs_hw_write(priv, &pe);
3490
3491 return 0;
3492}
3493
Maxime Chevallier56beda32018-02-28 10:14:13 +01003494/* Find tcam entry with matched pair <vid,port> */
3495static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3496 u16 mask)
3497{
3498 unsigned char byte[2], enable[2];
3499 struct mvpp2_prs_entry pe;
3500 u16 rvid, rmask;
3501 int tid;
3502
3503 /* Go through the all entries with MVPP2_PRS_LU_VID */
3504 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3505 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3506 if (!priv->prs_shadow[tid].valid ||
3507 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3508 continue;
3509
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003510 mvpp2_prs_init_from_hw(priv, &pe, tid);
Maxime Chevallier56beda32018-02-28 10:14:13 +01003511 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3512 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3513
3514 rvid = ((byte[0] & 0xf) << 8) + byte[1];
3515 rmask = ((enable[0] & 0xf) << 8) + enable[1];
3516
3517 if (rvid != vid || rmask != mask)
3518 continue;
3519
3520 return tid;
3521 }
3522
3523 return 0;
3524}
3525
3526/* Write parser entry for VID filtering */
3527static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3528{
3529 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3530 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3531 unsigned int mask = 0xfff, reg_val, shift;
3532 struct mvpp2 *priv = port->priv;
3533 struct mvpp2_prs_entry pe;
3534 int tid;
3535
3536 /* Scan TCAM and see if entry with this <vid,port> already exist */
3537 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3538
3539 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3540 if (reg_val & MVPP2_DSA_EXTENDED)
3541 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3542 else
3543 shift = MVPP2_VLAN_TAG_LEN;
3544
3545 /* No such entry */
3546 if (!tid) {
3547 memset(&pe, 0, sizeof(pe));
3548
3549 /* Go through all entries from first to last in vlan range */
3550 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3551 vid_start +
3552 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3553
3554 /* There isn't room for a new VID filter */
3555 if (tid < 0)
3556 return tid;
3557
3558 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3559 pe.index = tid;
3560
3561 /* Mask all ports */
3562 mvpp2_prs_tcam_port_map_set(&pe, 0);
3563 } else {
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003564 mvpp2_prs_init_from_hw(priv, &pe, tid);
Maxime Chevallier56beda32018-02-28 10:14:13 +01003565 }
3566
3567 /* Enable the current port */
3568 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3569
3570 /* Continue - set next lookup */
3571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3572
3573 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3574 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3575
3576 /* Set match on VID */
3577 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3578
3579 /* Clear all ai bits for next iteration */
3580 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3581
3582 /* Update shadow table */
3583 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3584 mvpp2_prs_hw_write(priv, &pe);
3585
3586 return 0;
3587}
3588
3589/* Write parser entry for VID filtering */
3590static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3591{
3592 struct mvpp2 *priv = port->priv;
3593 int tid;
3594
3595 /* Scan TCAM and see if entry with this <vid,port> already exist */
3596 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3597
3598 /* No such entry */
3599 if (tid)
3600 return;
3601
3602 mvpp2_prs_hw_inv(priv, tid);
3603 priv->prs_shadow[tid].valid = false;
3604}
3605
3606/* Remove all existing VID filters on this port */
3607static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3608{
3609 struct mvpp2 *priv = port->priv;
3610 int tid;
3611
3612 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3613 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3614 if (priv->prs_shadow[tid].valid)
3615 mvpp2_prs_vid_entry_remove(port, tid);
3616 }
3617}
3618
3619/* Remove VID filering entry for this port */
3620static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3621{
3622 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3623 struct mvpp2 *priv = port->priv;
3624
3625 /* Invalidate the guard entry */
3626 mvpp2_prs_hw_inv(priv, tid);
3627
3628 priv->prs_shadow[tid].valid = false;
3629}
3630
3631/* Add guard entry that drops packets when no VID is matched on this port */
3632static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3633{
3634 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3635 struct mvpp2 *priv = port->priv;
3636 unsigned int reg_val, shift;
3637 struct mvpp2_prs_entry pe;
3638
3639 if (priv->prs_shadow[tid].valid)
3640 return;
3641
3642 memset(&pe, 0, sizeof(pe));
3643
3644 pe.index = tid;
3645
3646 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3647 if (reg_val & MVPP2_DSA_EXTENDED)
3648 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3649 else
3650 shift = MVPP2_VLAN_TAG_LEN;
3651
3652 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3653
3654 /* Mask all ports */
3655 mvpp2_prs_tcam_port_map_set(&pe, 0);
3656
3657 /* Update port mask */
3658 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3659
3660 /* Continue - set next lookup */
3661 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3662
3663 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3664 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3665
3666 /* Drop VLAN packets that don't belong to any VIDs on this port */
3667 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3668 MVPP2_PRS_RI_DROP_MASK);
3669
3670 /* Clear all ai bits for next iteration */
3671 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3672
3673 /* Update shadow table */
3674 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3675 mvpp2_prs_hw_write(priv, &pe);
3676}
3677
Marcin Wojtas3f518502014-07-10 16:52:13 -03003678/* Parser default initialization */
3679static int mvpp2_prs_default_init(struct platform_device *pdev,
3680 struct mvpp2 *priv)
3681{
3682 int err, index, i;
3683
3684 /* Enable tcam table */
3685 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3686
3687 /* Clear all tcam and sram entries */
3688 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3689 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3690 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3691 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3692
3693 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3694 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3695 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3696 }
3697
3698 /* Invalidate all tcam entries */
3699 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3700 mvpp2_prs_hw_inv(priv, index);
3701
3702 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003703 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003704 GFP_KERNEL);
3705 if (!priv->prs_shadow)
3706 return -ENOMEM;
3707
3708 /* Always start from lookup = 0 */
3709 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3710 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3711 MVPP2_PRS_PORT_LU_MAX, 0);
3712
3713 mvpp2_prs_def_flow_init(priv);
3714
3715 mvpp2_prs_mh_init(priv);
3716
3717 mvpp2_prs_mac_init(priv);
3718
3719 mvpp2_prs_dsa_init(priv);
3720
Maxime Chevallier56beda32018-02-28 10:14:13 +01003721 mvpp2_prs_vid_init(priv);
3722
Marcin Wojtas3f518502014-07-10 16:52:13 -03003723 err = mvpp2_prs_etype_init(priv);
3724 if (err)
3725 return err;
3726
3727 err = mvpp2_prs_vlan_init(pdev, priv);
3728 if (err)
3729 return err;
3730
3731 err = mvpp2_prs_pppoe_init(priv);
3732 if (err)
3733 return err;
3734
3735 err = mvpp2_prs_ip6_init(priv);
3736 if (err)
3737 return err;
3738
3739 err = mvpp2_prs_ip4_init(priv);
3740 if (err)
3741 return err;
3742
3743 return 0;
3744}
3745
3746/* Compare MAC DA with tcam entry data */
3747static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3748 const u8 *da, unsigned char *mask)
3749{
3750 unsigned char tcam_byte, tcam_mask;
3751 int index;
3752
3753 for (index = 0; index < ETH_ALEN; index++) {
3754 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3755 if (tcam_mask != mask[index])
3756 return false;
3757
3758 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3759 return false;
3760 }
3761
3762 return true;
3763}
3764
3765/* Find tcam entry with matched pair <MAC DA, port> */
3766static struct mvpp2_prs_entry *
3767mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3768 unsigned char *mask, int udf_type)
3769{
3770 struct mvpp2_prs_entry *pe;
3771 int tid;
3772
Antoine Tenart239dd4e2017-10-24 11:41:28 +02003773 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003774 if (!pe)
3775 return NULL;
3776 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3777
3778 /* Go through the all entires with MVPP2_PRS_LU_MAC */
Maxime Chevallier10fea262018-03-07 15:18:04 +01003779 for (tid = MVPP2_PE_MAC_RANGE_START;
3780 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003781 unsigned int entry_pmap;
3782
3783 if (!priv->prs_shadow[tid].valid ||
3784 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3785 (priv->prs_shadow[tid].udf != udf_type))
3786 continue;
3787
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003788 mvpp2_prs_init_from_hw(priv, pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003789 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3790
3791 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3792 entry_pmap == pmap)
3793 return pe;
3794 }
3795 kfree(pe);
3796
3797 return NULL;
3798}
3799
3800/* Update parser's mac da entry */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003801static int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da,
3802 bool add)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003803{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003804 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003805 struct mvpp2 *priv = port->priv;
3806 unsigned int pmap, len, ri;
3807 struct mvpp2_prs_entry *pe;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003808 int tid;
3809
3810 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003811 pe = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003812 MVPP2_PRS_UDF_MAC_DEF);
3813
3814 /* No such entry */
3815 if (!pe) {
3816 if (!add)
3817 return 0;
3818
3819 /* Create new TCAM entry */
Marcin Wojtas3f518502014-07-10 16:52:13 -03003820 /* Go through the all entries from first to last */
Maxime Chevallier10fea262018-03-07 15:18:04 +01003821 tid = mvpp2_prs_tcam_first_free(priv,
3822 MVPP2_PE_MAC_RANGE_START,
3823 MVPP2_PE_MAC_RANGE_END);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003824 if (tid < 0)
3825 return tid;
3826
Antoine Tenart239dd4e2017-10-24 11:41:28 +02003827 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003828 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303829 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003830 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3831 pe->index = tid;
3832
3833 /* Mask all ports */
3834 mvpp2_prs_tcam_port_map_set(pe, 0);
3835 }
3836
3837 /* Update port mask */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003838 mvpp2_prs_tcam_port_set(pe, port->id, add);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003839
3840 /* Invalidate the entry if no ports are left enabled */
3841 pmap = mvpp2_prs_tcam_port_map_get(pe);
3842 if (pmap == 0) {
3843 if (add) {
3844 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303845 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003846 }
3847 mvpp2_prs_hw_inv(priv, pe->index);
3848 priv->prs_shadow[pe->index].valid = false;
3849 kfree(pe);
3850 return 0;
3851 }
3852
3853 /* Continue - set next lookup */
3854 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3855
3856 /* Set match on DA */
3857 len = ETH_ALEN;
3858 while (len--)
3859 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3860
3861 /* Set result info bits */
Maxime Chevallier10fea262018-03-07 15:18:04 +01003862 if (is_broadcast_ether_addr(da)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003863 ri = MVPP2_PRS_RI_L2_BCAST;
Maxime Chevallier10fea262018-03-07 15:18:04 +01003864 } else if (is_multicast_ether_addr(da)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003865 ri = MVPP2_PRS_RI_L2_MCAST;
Maxime Chevallier10fea262018-03-07 15:18:04 +01003866 } else {
3867 ri = MVPP2_PRS_RI_L2_UCAST;
3868
3869 if (ether_addr_equal(da, port->dev->dev_addr))
3870 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
3871 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03003872
3873 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3874 MVPP2_PRS_RI_MAC_ME_MASK);
3875 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3876 MVPP2_PRS_RI_MAC_ME_MASK);
3877
3878 /* Shift to ethertype */
3879 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3880 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3881
3882 /* Update shadow table and hw entry */
3883 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3884 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3885 mvpp2_prs_hw_write(priv, pe);
3886
3887 kfree(pe);
3888
3889 return 0;
3890}
3891
3892static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3893{
3894 struct mvpp2_port *port = netdev_priv(dev);
3895 int err;
3896
3897 /* Remove old parser entry */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003898 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003899 if (err)
3900 return err;
3901
3902 /* Add new parser entry */
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01003903 err = mvpp2_prs_mac_da_accept(port, da, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003904 if (err)
3905 return err;
3906
3907 /* Set addr in the device */
3908 ether_addr_copy(dev->dev_addr, da);
3909
3910 return 0;
3911}
3912
Maxime Chevallier10fea262018-03-07 15:18:04 +01003913static void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003914{
Maxime Chevallier10fea262018-03-07 15:18:04 +01003915 struct mvpp2 *priv = port->priv;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003916 struct mvpp2_prs_entry pe;
Maxime Chevallier10fea262018-03-07 15:18:04 +01003917 unsigned long pmap;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003918 int index, tid;
3919
Maxime Chevallier10fea262018-03-07 15:18:04 +01003920 for (tid = MVPP2_PE_MAC_RANGE_START;
3921 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003922 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3923
3924 if (!priv->prs_shadow[tid].valid ||
3925 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3926 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3927 continue;
3928
Maxime Chevallier47e0e142018-03-26 15:34:22 +02003929 mvpp2_prs_init_from_hw(priv, &pe, tid);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003930
Maxime Chevallier10fea262018-03-07 15:18:04 +01003931 pmap = mvpp2_prs_tcam_port_map_get(&pe);
3932
3933 /* We only want entries active on this port */
3934 if (!test_bit(port->id, &pmap))
3935 continue;
3936
Marcin Wojtas3f518502014-07-10 16:52:13 -03003937 /* Read mac addr from entry */
3938 for (index = 0; index < ETH_ALEN; index++)
3939 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3940 &da_mask[index]);
3941
Maxime Chevallier10fea262018-03-07 15:18:04 +01003942 /* Special cases : Don't remove broadcast and port's own
3943 * address
3944 */
3945 if (is_broadcast_ether_addr(da) ||
3946 ether_addr_equal(da, port->dev->dev_addr))
3947 continue;
3948
3949 /* Remove entry from TCAM */
3950 mvpp2_prs_mac_da_accept(port, da, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003951 }
3952}
3953
3954static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3955{
3956 switch (type) {
3957 case MVPP2_TAG_TYPE_EDSA:
3958 /* Add port to EDSA entries */
3959 mvpp2_prs_dsa_tag_set(priv, port, true,
3960 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3961 mvpp2_prs_dsa_tag_set(priv, port, true,
3962 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3963 /* Remove port from DSA entries */
3964 mvpp2_prs_dsa_tag_set(priv, port, false,
3965 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3966 mvpp2_prs_dsa_tag_set(priv, port, false,
3967 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3968 break;
3969
3970 case MVPP2_TAG_TYPE_DSA:
3971 /* Add port to DSA entries */
3972 mvpp2_prs_dsa_tag_set(priv, port, true,
3973 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3974 mvpp2_prs_dsa_tag_set(priv, port, true,
3975 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3976 /* Remove port from EDSA entries */
3977 mvpp2_prs_dsa_tag_set(priv, port, false,
3978 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3979 mvpp2_prs_dsa_tag_set(priv, port, false,
3980 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3981 break;
3982
3983 case MVPP2_TAG_TYPE_MH:
3984 case MVPP2_TAG_TYPE_NONE:
3985 /* Remove port form EDSA and DSA entries */
3986 mvpp2_prs_dsa_tag_set(priv, port, false,
3987 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3988 mvpp2_prs_dsa_tag_set(priv, port, false,
3989 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3990 mvpp2_prs_dsa_tag_set(priv, port, false,
3991 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3992 mvpp2_prs_dsa_tag_set(priv, port, false,
3993 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3994 break;
3995
3996 default:
3997 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3998 return -EINVAL;
3999 }
4000
4001 return 0;
4002}
4003
4004/* Set prs flow for the port */
4005static int mvpp2_prs_def_flow(struct mvpp2_port *port)
4006{
4007 struct mvpp2_prs_entry *pe;
4008 int tid;
4009
4010 pe = mvpp2_prs_flow_find(port->priv, port->id);
4011
4012 /* Such entry not exist */
4013 if (!pe) {
4014 /* Go through the all entires from last to first */
4015 tid = mvpp2_prs_tcam_first_free(port->priv,
4016 MVPP2_PE_LAST_FREE_TID,
4017 MVPP2_PE_FIRST_FREE_TID);
4018 if (tid < 0)
4019 return tid;
4020
4021 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
4022 if (!pe)
4023 return -ENOMEM;
4024
4025 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
4026 pe->index = tid;
4027
4028 /* Set flow ID*/
4029 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4030 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4031
4032 /* Update shadow table */
4033 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
4034 }
4035
4036 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
4037 mvpp2_prs_hw_write(port->priv, pe);
4038 kfree(pe);
4039
4040 return 0;
4041}
4042
4043/* Classifier configuration routines */
4044
4045/* Update classification flow table registers */
4046static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4047 struct mvpp2_cls_flow_entry *fe)
4048{
4049 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4050 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4051 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4052 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4053}
4054
4055/* Update classification lookup table register */
4056static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4057 struct mvpp2_cls_lookup_entry *le)
4058{
4059 u32 val;
4060
4061 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4062 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4063 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4064}
4065
4066/* Classifier default initialization */
4067static void mvpp2_cls_init(struct mvpp2 *priv)
4068{
4069 struct mvpp2_cls_lookup_entry le;
4070 struct mvpp2_cls_flow_entry fe;
4071 int index;
4072
4073 /* Enable classifier */
4074 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4075
4076 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01004077 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004078 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4079 fe.index = index;
4080 mvpp2_cls_flow_write(priv, &fe);
4081 }
4082
4083 /* Clear classifier lookup table */
4084 le.data = 0;
4085 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4086 le.lkpid = index;
4087 le.way = 0;
4088 mvpp2_cls_lookup_write(priv, &le);
4089
4090 le.way = 1;
4091 mvpp2_cls_lookup_write(priv, &le);
4092 }
4093}
4094
4095static void mvpp2_cls_port_config(struct mvpp2_port *port)
4096{
4097 struct mvpp2_cls_lookup_entry le;
4098 u32 val;
4099
4100 /* Set way for the port */
4101 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4102 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4103 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4104
4105 /* Pick the entry to be accessed in lookup ID decoding table
4106 * according to the way and lkpid.
4107 */
4108 le.lkpid = port->id;
4109 le.way = 0;
4110 le.data = 0;
4111
4112 /* Set initial CPU queue for receiving packets */
4113 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4114 le.data |= port->first_rxq;
4115
4116 /* Disable classification engines */
4117 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4118
4119 /* Update lookup ID table entry */
4120 mvpp2_cls_lookup_write(port->priv, &le);
4121}
4122
4123/* Set CPU queue number for oversize packets */
4124static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4125{
4126 u32 val;
4127
4128 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4129 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4130
4131 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4132 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4133
4134 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4135 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4136 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4137}
4138
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004139static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4140{
4141 if (likely(pool->frag_size <= PAGE_SIZE))
4142 return netdev_alloc_frag(pool->frag_size);
4143 else
4144 return kmalloc(pool->frag_size, GFP_ATOMIC);
4145}
4146
4147static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4148{
4149 if (likely(pool->frag_size <= PAGE_SIZE))
4150 skb_free_frag(data);
4151 else
4152 kfree(data);
4153}
4154
Marcin Wojtas3f518502014-07-10 16:52:13 -03004155/* Buffer Manager configuration routines */
4156
4157/* Create pool */
4158static int mvpp2_bm_pool_create(struct platform_device *pdev,
4159 struct mvpp2 *priv,
4160 struct mvpp2_bm_pool *bm_pool, int size)
4161{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004162 u32 val;
4163
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004164 /* Number of buffer pointers must be a multiple of 16, as per
4165 * hardware constraints
4166 */
4167 if (!IS_ALIGNED(size, 16))
4168 return -EINVAL;
4169
4170 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4171 * bytes per buffer pointer
4172 */
4173 if (priv->hw_version == MVPP21)
4174 bm_pool->size_bytes = 2 * sizeof(u32) * size;
4175 else
4176 bm_pool->size_bytes = 2 * sizeof(u64) * size;
4177
4178 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004179 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004180 GFP_KERNEL);
4181 if (!bm_pool->virt_addr)
4182 return -ENOMEM;
4183
Thomas Petazzonid3158802017-02-21 11:28:13 +01004184 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4185 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004186 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4187 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004188 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4189 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4190 return -ENOMEM;
4191 }
4192
4193 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004194 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004195 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4196
4197 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4198 val |= MVPP2_BM_START_MASK;
4199 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4200
Marcin Wojtas3f518502014-07-10 16:52:13 -03004201 bm_pool->size = size;
4202 bm_pool->pkt_size = 0;
4203 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004204
4205 return 0;
4206}
4207
4208/* Set pool buffer size */
4209static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4210 struct mvpp2_bm_pool *bm_pool,
4211 int buf_size)
4212{
4213 u32 val;
4214
4215 bm_pool->buf_size = buf_size;
4216
4217 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4218 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4219}
4220
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004221static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4222 struct mvpp2_bm_pool *bm_pool,
4223 dma_addr_t *dma_addr,
4224 phys_addr_t *phys_addr)
4225{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004226 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004227
4228 *dma_addr = mvpp2_percpu_read(priv, cpu,
4229 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4230 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004231
4232 if (priv->hw_version == MVPP22) {
4233 u32 val;
4234 u32 dma_addr_highbits, phys_addr_highbits;
4235
Thomas Petazzonia7868412017-03-07 16:53:13 +01004236 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004237 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4238 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4239 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4240
4241 if (sizeof(dma_addr_t) == 8)
4242 *dma_addr |= (u64)dma_addr_highbits << 32;
4243
4244 if (sizeof(phys_addr_t) == 8)
4245 *phys_addr |= (u64)phys_addr_highbits << 32;
4246 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004247
4248 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004249}
4250
Ezequiel Garcia7861f122014-07-21 13:48:14 -03004251/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004252static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004253 struct mvpp2_bm_pool *bm_pool, int buf_num)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004254{
4255 int i;
4256
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004257 if (buf_num > bm_pool->buf_num) {
4258 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
4259 bm_pool->id, buf_num);
4260 buf_num = bm_pool->buf_num;
4261 }
4262
4263 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01004264 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004265 phys_addr_t buf_phys_addr;
4266 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004267
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004268 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4269 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01004270
Thomas Petazzoni20396132017-03-07 16:53:00 +01004271 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01004272 bm_pool->buf_size, DMA_FROM_DEVICE);
4273
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004274 data = (void *)phys_to_virt(buf_phys_addr);
4275 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004276 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004277
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004278 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004279 }
4280
4281 /* Update BM driver with number of buffers removed from pool */
4282 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004283}
4284
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004285/* Check number of buffers in BM pool */
kbuild test robot6e61e102018-03-06 13:05:06 +08004286static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004287{
4288 int buf_num = 0;
4289
4290 buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
4291 MVPP22_BM_POOL_PTRS_NUM_MASK;
4292 buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
4293 MVPP2_BM_BPPI_PTR_NUM_MASK;
4294
4295 /* HW has one buffer ready which is not reflected in the counters */
4296 if (buf_num)
4297 buf_num += 1;
4298
4299 return buf_num;
4300}
4301
Marcin Wojtas3f518502014-07-10 16:52:13 -03004302/* Cleanup pool */
4303static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4304 struct mvpp2 *priv,
4305 struct mvpp2_bm_pool *bm_pool)
4306{
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004307 int buf_num;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004308 u32 val;
4309
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004310 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4311 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
4312
4313 /* Check buffer counters after free */
4314 buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
4315 if (buf_num) {
4316 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
4317 bm_pool->id, bm_pool->buf_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004318 return 0;
4319 }
4320
4321 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4322 val |= MVPP2_BM_STOP_MASK;
4323 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4324
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004325 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004326 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004327 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004328 return 0;
4329}
4330
4331static int mvpp2_bm_pools_init(struct platform_device *pdev,
4332 struct mvpp2 *priv)
4333{
4334 int i, err, size;
4335 struct mvpp2_bm_pool *bm_pool;
4336
4337 /* Create all pools with maximum size */
4338 size = MVPP2_BM_POOL_SIZE_MAX;
4339 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4340 bm_pool = &priv->bm_pools[i];
4341 bm_pool->id = i;
4342 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4343 if (err)
4344 goto err_unroll_pools;
4345 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4346 }
4347 return 0;
4348
4349err_unroll_pools:
4350 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4351 for (i = i - 1; i >= 0; i--)
4352 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4353 return err;
4354}
4355
4356static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4357{
4358 int i, err;
4359
4360 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4361 /* Mask BM all interrupts */
4362 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4363 /* Clear BM cause register */
4364 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4365 }
4366
4367 /* Allocate and initialize BM pools */
4368 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02004369 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004370 if (!priv->bm_pools)
4371 return -ENOMEM;
4372
4373 err = mvpp2_bm_pools_init(pdev, priv);
4374 if (err < 0)
4375 return err;
4376 return 0;
4377}
4378
Stefan Chulski01d04932018-03-05 15:16:50 +01004379static void mvpp2_setup_bm_pool(void)
4380{
4381 /* Short pool */
4382 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
4383 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
4384
4385 /* Long pool */
4386 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
4387 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
Stefan Chulski576193f2018-03-05 15:16:54 +01004388
4389 /* Jumbo pool */
4390 mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
4391 mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
Stefan Chulski01d04932018-03-05 15:16:50 +01004392}
4393
Marcin Wojtas3f518502014-07-10 16:52:13 -03004394/* Attach long pool to rxq */
4395static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4396 int lrxq, int long_pool)
4397{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004398 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004399 int prxq;
4400
4401 /* Get queue physical ID */
4402 prxq = port->rxqs[lrxq]->id;
4403
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004404 if (port->priv->hw_version == MVPP21)
4405 mask = MVPP21_RXQ_POOL_LONG_MASK;
4406 else
4407 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004408
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004409 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4410 val &= ~mask;
4411 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004412 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4413}
4414
4415/* Attach short pool to rxq */
4416static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4417 int lrxq, int short_pool)
4418{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004419 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004420 int prxq;
4421
4422 /* Get queue physical ID */
4423 prxq = port->rxqs[lrxq]->id;
4424
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004425 if (port->priv->hw_version == MVPP21)
4426 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4427 else
4428 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004429
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004430 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4431 val &= ~mask;
4432 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004433 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4434}
4435
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004436static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4437 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004438 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004439 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004440 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004441{
Thomas Petazzoni20396132017-03-07 16:53:00 +01004442 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004443 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004444
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004445 data = mvpp2_frag_alloc(bm_pool);
4446 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004447 return NULL;
4448
Thomas Petazzoni20396132017-03-07 16:53:00 +01004449 dma_addr = dma_map_single(port->dev->dev.parent, data,
4450 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4451 DMA_FROM_DEVICE);
4452 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004453 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004454 return NULL;
4455 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01004456 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004457 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004458
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004459 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004460}
4461
Marcin Wojtas3f518502014-07-10 16:52:13 -03004462/* Release buffer to BM */
4463static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004464 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004465 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004466{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004467 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004468
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004469 if (port->priv->hw_version == MVPP22) {
4470 u32 val = 0;
4471
4472 if (sizeof(dma_addr_t) == 8)
4473 val |= upper_32_bits(buf_dma_addr) &
4474 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4475
4476 if (sizeof(phys_addr_t) == 8)
4477 val |= (upper_32_bits(buf_phys_addr)
4478 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4479 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4480
Thomas Petazzonia7868412017-03-07 16:53:13 +01004481 mvpp2_percpu_write(port->priv, cpu,
4482 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004483 }
4484
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004485 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4486 * returned in the "cookie" field of the RX
4487 * descriptor. Instead of storing the virtual address, we
4488 * store the physical address
4489 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004490 mvpp2_percpu_write(port->priv, cpu,
4491 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4492 mvpp2_percpu_write(port->priv, cpu,
4493 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004494
4495 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004496}
4497
Marcin Wojtas3f518502014-07-10 16:52:13 -03004498/* Allocate buffers for the pool */
4499static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4500 struct mvpp2_bm_pool *bm_pool, int buf_num)
4501{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004502 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01004503 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004504 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004505 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004506
4507 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4508 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4509
4510 if (buf_num < 0 ||
4511 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4512 netdev_err(port->dev,
4513 "cannot allocate %d buffers for pool %d\n",
4514 buf_num, bm_pool->id);
4515 return 0;
4516 }
4517
Marcin Wojtas3f518502014-07-10 16:52:13 -03004518 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004519 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4520 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004521 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004522 break;
4523
Thomas Petazzoni20396132017-03-07 16:53:00 +01004524 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004525 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004526 }
4527
4528 /* Update BM driver with number of buffers added to pool */
4529 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004530
4531 netdev_dbg(port->dev,
Stefan Chulski01d04932018-03-05 15:16:50 +01004532 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
Marcin Wojtas3f518502014-07-10 16:52:13 -03004533 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4534
4535 netdev_dbg(port->dev,
Stefan Chulski01d04932018-03-05 15:16:50 +01004536 "pool %d: %d of %d buffers added\n",
Marcin Wojtas3f518502014-07-10 16:52:13 -03004537 bm_pool->id, i, buf_num);
4538 return i;
4539}
4540
4541/* Notify the driver that BM pool is being used as specific type and return the
4542 * pool pointer on success
4543 */
4544static struct mvpp2_bm_pool *
Stefan Chulski01d04932018-03-05 15:16:50 +01004545mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004546{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004547 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4548 int num;
4549
Stefan Chulski01d04932018-03-05 15:16:50 +01004550 if (pool >= MVPP2_BM_POOLS_NUM) {
4551 netdev_err(port->dev, "Invalid pool %d\n", pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004552 return NULL;
4553 }
4554
Marcin Wojtas3f518502014-07-10 16:52:13 -03004555 /* Allocate buffers in case BM pool is used as long pool, but packet
4556 * size doesn't match MTU or BM pool hasn't being used yet
4557 */
Stefan Chulski01d04932018-03-05 15:16:50 +01004558 if (new_pool->pkt_size == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004559 int pkts_num;
4560
4561 /* Set default buffer number or free all the buffers in case
4562 * the pool is not empty
4563 */
4564 pkts_num = new_pool->buf_num;
4565 if (pkts_num == 0)
Stefan Chulski01d04932018-03-05 15:16:50 +01004566 pkts_num = mvpp2_pools[pool].buf_num;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004567 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004568 mvpp2_bm_bufs_free(port->dev->dev.parent,
Stefan Chulskieffbf5f2018-03-05 15:16:51 +01004569 port->priv, new_pool, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004570
4571 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004572 new_pool->frag_size =
4573 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4574 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004575
4576 /* Allocate buffers for this pool */
4577 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4578 if (num != pkts_num) {
4579 WARN(1, "pool %d: %d of %d allocated\n",
4580 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004581 return NULL;
4582 }
4583 }
4584
4585 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4586 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4587
Marcin Wojtas3f518502014-07-10 16:52:13 -03004588 return new_pool;
4589}
4590
4591/* Initialize pools for swf */
4592static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4593{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004594 int rxq;
Stefan Chulski576193f2018-03-05 15:16:54 +01004595 enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
4596
4597 /* If port pkt_size is higher than 1518B:
4598 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4599 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4600 */
4601 if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
4602 long_log_pool = MVPP2_BM_JUMBO;
4603 short_log_pool = MVPP2_BM_LONG;
4604 } else {
4605 long_log_pool = MVPP2_BM_LONG;
4606 short_log_pool = MVPP2_BM_SHORT;
4607 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004608
4609 if (!port->pool_long) {
4610 port->pool_long =
Stefan Chulski576193f2018-03-05 15:16:54 +01004611 mvpp2_bm_pool_use(port, long_log_pool,
4612 mvpp2_pools[long_log_pool].pkt_size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004613 if (!port->pool_long)
4614 return -ENOMEM;
4615
Stefan Chulski576193f2018-03-05 15:16:54 +01004616 port->pool_long->port_map |= BIT(port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004617
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004618 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004619 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4620 }
4621
4622 if (!port->pool_short) {
4623 port->pool_short =
Stefan Chulski576193f2018-03-05 15:16:54 +01004624 mvpp2_bm_pool_use(port, short_log_pool,
Colin Ian Kinge2e03162018-03-21 17:31:15 +00004625 mvpp2_pools[short_log_pool].pkt_size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004626 if (!port->pool_short)
4627 return -ENOMEM;
4628
Stefan Chulski576193f2018-03-05 15:16:54 +01004629 port->pool_short->port_map |= BIT(port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004630
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004631 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004632 mvpp2_rxq_short_pool_set(port, rxq,
4633 port->pool_short->id);
4634 }
4635
4636 return 0;
4637}
4638
4639static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4640{
4641 struct mvpp2_port *port = netdev_priv(dev);
Stefan Chulski576193f2018-03-05 15:16:54 +01004642 enum mvpp2_bm_pool_log_num new_long_pool;
4643 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004644
Stefan Chulski576193f2018-03-05 15:16:54 +01004645 /* If port MTU is higher than 1518B:
4646 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
4647 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
4648 */
4649 if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
4650 new_long_pool = MVPP2_BM_JUMBO;
4651 else
4652 new_long_pool = MVPP2_BM_LONG;
4653
4654 if (new_long_pool != port->pool_long->id) {
4655 /* Remove port from old short & long pool */
4656 port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
4657 port->pool_long->pkt_size);
4658 port->pool_long->port_map &= ~BIT(port->id);
4659 port->pool_long = NULL;
4660
4661 port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
4662 port->pool_short->pkt_size);
4663 port->pool_short->port_map &= ~BIT(port->id);
4664 port->pool_short = NULL;
4665
4666 port->pkt_size = pkt_size;
4667
4668 /* Add port to new short & long pool */
4669 mvpp2_swf_bm_pool_init(port);
4670
4671 /* Update L4 checksum when jumbo enable/disable on port */
4672 if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
4673 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4674 dev->hw_features &= ~(NETIF_F_IP_CSUM |
4675 NETIF_F_IPV6_CSUM);
4676 } else {
4677 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4678 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4679 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004680 }
4681
Marcin Wojtas3f518502014-07-10 16:52:13 -03004682 dev->mtu = mtu;
Stefan Chulski576193f2018-03-05 15:16:54 +01004683 dev->wanted_features = dev->features;
4684
Marcin Wojtas3f518502014-07-10 16:52:13 -03004685 netdev_update_features(dev);
4686 return 0;
4687}
4688
4689static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4690{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004691 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004692
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004693 for (i = 0; i < port->nqvecs; i++)
4694 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4695
Marcin Wojtas3f518502014-07-10 16:52:13 -03004696 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004697 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004698}
4699
4700static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4701{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004702 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004703
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004704 for (i = 0; i < port->nqvecs; i++)
4705 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4706
Marcin Wojtas3f518502014-07-10 16:52:13 -03004707 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004708 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4709}
4710
4711static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4712{
4713 struct mvpp2_port *port = qvec->port;
4714
4715 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4716 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4717}
4718
4719static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4720{
4721 struct mvpp2_port *port = qvec->port;
4722
4723 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4724 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004725}
4726
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004727/* Mask the current CPU's Rx/Tx interrupts
4728 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4729 * using smp_processor_id() is OK.
4730 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004731static void mvpp2_interrupts_mask(void *arg)
4732{
4733 struct mvpp2_port *port = arg;
4734
Thomas Petazzonia7868412017-03-07 16:53:13 +01004735 mvpp2_percpu_write(port->priv, smp_processor_id(),
4736 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004737}
4738
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004739/* Unmask the current CPU's Rx/Tx interrupts.
4740 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4741 * using smp_processor_id() is OK.
4742 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004743static void mvpp2_interrupts_unmask(void *arg)
4744{
4745 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004746 u32 val;
4747
4748 val = MVPP2_CAUSE_MISC_SUM_MASK |
4749 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4750 if (port->has_tx_irqs)
4751 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004752
Thomas Petazzonia7868412017-03-07 16:53:13 +01004753 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004754 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4755}
4756
4757static void
4758mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4759{
4760 u32 val;
4761 int i;
4762
4763 if (port->priv->hw_version != MVPP22)
4764 return;
4765
4766 if (mask)
4767 val = 0;
4768 else
4769 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4770
4771 for (i = 0; i < port->nqvecs; i++) {
4772 struct mvpp2_queue_vector *v = port->qvecs + i;
4773
4774 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4775 continue;
4776
4777 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4778 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4779 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004780}
4781
4782/* Port configuration routines */
4783
Antoine Ténartf84bf382017-08-22 19:08:27 +02004784static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4785{
4786 struct mvpp2 *priv = port->priv;
4787 u32 val;
4788
4789 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4790 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4791 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4792
4793 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4794 if (port->gop_id == 2)
4795 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4796 else if (port->gop_id == 3)
4797 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4798 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4799}
4800
4801static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4802{
4803 struct mvpp2 *priv = port->priv;
4804 u32 val;
4805
4806 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4807 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4808 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4809 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4810
4811 if (port->gop_id > 1) {
4812 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4813 if (port->gop_id == 2)
4814 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4815 else if (port->gop_id == 3)
4816 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4817 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4818 }
4819}
4820
4821static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4822{
4823 struct mvpp2 *priv = port->priv;
4824 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4825 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4826 u32 val;
4827
4828 /* XPCS */
4829 val = readl(xpcs + MVPP22_XPCS_CFG0);
4830 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4831 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4832 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4833 writel(val, xpcs + MVPP22_XPCS_CFG0);
4834
4835 /* MPCS */
4836 val = readl(mpcs + MVPP22_MPCS_CTRL);
4837 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4838 writel(val, mpcs + MVPP22_MPCS_CTRL);
4839
4840 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4841 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4842 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4843 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4844 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4845
4846 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4847 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4848 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4849}
4850
4851static int mvpp22_gop_init(struct mvpp2_port *port)
4852{
4853 struct mvpp2 *priv = port->priv;
4854 u32 val;
4855
4856 if (!priv->sysctrl_base)
4857 return 0;
4858
4859 switch (port->phy_interface) {
4860 case PHY_INTERFACE_MODE_RGMII:
4861 case PHY_INTERFACE_MODE_RGMII_ID:
4862 case PHY_INTERFACE_MODE_RGMII_RXID:
4863 case PHY_INTERFACE_MODE_RGMII_TXID:
4864 if (port->gop_id == 0)
4865 goto invalid_conf;
4866 mvpp22_gop_init_rgmii(port);
4867 break;
4868 case PHY_INTERFACE_MODE_SGMII:
4869 mvpp22_gop_init_sgmii(port);
4870 break;
4871 case PHY_INTERFACE_MODE_10GKR:
4872 if (port->gop_id != 0)
4873 goto invalid_conf;
4874 mvpp22_gop_init_10gkr(port);
4875 break;
4876 default:
4877 goto unsupported_conf;
4878 }
4879
4880 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4881 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4882 GENCONF_PORT_CTRL1_EN(port->gop_id);
4883 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4884
4885 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4886 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4887 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4888
4889 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4890 val |= GENCONF_SOFT_RESET1_GOP;
4891 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4892
4893unsupported_conf:
4894 return 0;
4895
4896invalid_conf:
4897 netdev_err(port->dev, "Invalid port configuration\n");
4898 return -EINVAL;
4899}
4900
Antoine Tenartfd3651b2017-09-01 11:04:54 +02004901static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4902{
4903 u32 val;
4904
4905 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4906 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4907 /* Enable the GMAC link status irq for this port */
4908 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4909 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4910 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4911 }
4912
4913 if (port->gop_id == 0) {
4914 /* Enable the XLG/GIG irqs for this port */
4915 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4916 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4917 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4918 else
4919 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4920 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4921 }
4922}
4923
4924static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4925{
4926 u32 val;
4927
4928 if (port->gop_id == 0) {
4929 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4930 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4931 MVPP22_XLG_EXT_INT_MASK_GIG);
4932 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4933 }
4934
4935 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4936 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4937 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4938 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4939 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4940 }
4941}
4942
4943static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4944{
4945 u32 val;
4946
4947 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4948 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4949 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4950 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4951 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4952 }
4953
4954 if (port->gop_id == 0) {
4955 val = readl(port->base + MVPP22_XLG_INT_MASK);
4956 val |= MVPP22_XLG_INT_MASK_LINK;
4957 writel(val, port->base + MVPP22_XLG_INT_MASK);
4958 }
4959
4960 mvpp22_gop_unmask_irq(port);
4961}
4962
Antoine Tenart542897d2017-08-30 10:29:15 +02004963static int mvpp22_comphy_init(struct mvpp2_port *port)
4964{
4965 enum phy_mode mode;
4966 int ret;
4967
4968 if (!port->comphy)
4969 return 0;
4970
4971 switch (port->phy_interface) {
4972 case PHY_INTERFACE_MODE_SGMII:
4973 mode = PHY_MODE_SGMII;
4974 break;
4975 case PHY_INTERFACE_MODE_10GKR:
4976 mode = PHY_MODE_10GKR;
4977 break;
4978 default:
4979 return -EINVAL;
4980 }
4981
4982 ret = phy_set_mode(port->comphy, mode);
4983 if (ret)
4984 return ret;
4985
4986 return phy_power_on(port->comphy);
4987}
4988
Antoine Ténart39193572017-08-22 19:08:24 +02004989static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4990{
4991 u32 val;
4992
4993 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4994 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4995 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4996 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4997 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4998 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Tenart1df22702017-09-01 11:04:52 +02004999 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02005000 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
5001 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5002 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5003 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5004 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
5005 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Ténart39193572017-08-22 19:08:24 +02005006 }
5007
5008 /* The port is connected to a copper PHY */
5009 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5010 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
5011 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5012
5013 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5014 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
5015 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5016 MVPP2_GMAC_AN_DUPLEX_EN;
5017 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5018 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
5019 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5020}
5021
5022static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
5023{
5024 u32 val;
5025
5026 /* Force link down */
5027 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5028 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5029 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5030 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5031
5032 /* Set the GMAC in a reset state */
5033 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5034 val |= MVPP2_GMAC_PORT_RESET_MASK;
5035 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5036
5037 /* Configure the PCS and in-band AN */
5038 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5039 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5040 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Tenart1df22702017-09-01 11:04:52 +02005041 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02005042 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Ténart39193572017-08-22 19:08:24 +02005043 }
5044 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5045
5046 mvpp2_port_mii_gmac_configure_mode(port);
5047
5048 /* Unset the GMAC reset state */
5049 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5050 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
5051 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5052
5053 /* Stop forcing link down */
5054 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5055 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
5056 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5057}
5058
Antoine Ténart77321952017-08-22 19:08:25 +02005059static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
5060{
5061 u32 val;
5062
5063 if (port->gop_id != 0)
5064 return;
5065
5066 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5067 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5068 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5069
5070 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5071 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5072 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5073 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5074}
5075
Thomas Petazzoni26975822017-03-07 16:53:14 +01005076static void mvpp22_port_mii_set(struct mvpp2_port *port)
5077{
5078 u32 val;
5079
Thomas Petazzoni26975822017-03-07 16:53:14 +01005080 /* Only GOP port 0 has an XLG MAC */
5081 if (port->gop_id == 0) {
5082 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5083 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02005084
5085 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5086 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5087 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5088 else
5089 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5090
Thomas Petazzoni26975822017-03-07 16:53:14 +01005091 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5092 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01005093}
5094
Marcin Wojtas3f518502014-07-10 16:52:13 -03005095static void mvpp2_port_mii_set(struct mvpp2_port *port)
5096{
Thomas Petazzoni26975822017-03-07 16:53:14 +01005097 if (port->priv->hw_version == MVPP22)
5098 mvpp22_port_mii_set(port);
5099
Antoine Tenart1df22702017-09-01 11:04:52 +02005100 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
Antoine Ténart39193572017-08-22 19:08:24 +02005101 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5102 mvpp2_port_mii_gmac_configure(port);
Antoine Ténart77321952017-08-22 19:08:25 +02005103 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5104 mvpp2_port_mii_xlg_configure(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03005105}
5106
5107static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5108{
5109 u32 val;
5110
5111 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5112 val |= MVPP2_GMAC_FC_ADV_EN;
5113 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005114}
5115
5116static void mvpp2_port_enable(struct mvpp2_port *port)
5117{
5118 u32 val;
5119
Antoine Ténart725757a2017-06-12 16:01:39 +02005120 /* Only GOP port 0 has an XLG MAC */
5121 if (port->gop_id == 0 &&
5122 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5123 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5124 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5125 val |= MVPP22_XLG_CTRL0_PORT_EN |
5126 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5127 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5128 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5129 } else {
5130 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5131 val |= MVPP2_GMAC_PORT_EN_MASK;
5132 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5133 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5134 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005135}
5136
5137static void mvpp2_port_disable(struct mvpp2_port *port)
5138{
5139 u32 val;
5140
Antoine Ténart725757a2017-06-12 16:01:39 +02005141 /* Only GOP port 0 has an XLG MAC */
5142 if (port->gop_id == 0 &&
5143 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5144 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5145 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5146 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5147 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5148 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5149 } else {
5150 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5151 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5152 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5153 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005154}
5155
5156/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5157static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5158{
5159 u32 val;
5160
5161 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5162 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5163 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5164}
5165
5166/* Configure loopback port */
5167static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5168{
5169 u32 val;
5170
5171 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5172
5173 if (port->speed == 1000)
5174 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5175 else
5176 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5177
5178 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5179 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5180 else
5181 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5182
5183 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5184}
5185
Miquel Raynal118d6292017-11-06 22:56:53 +01005186struct mvpp2_ethtool_counter {
5187 unsigned int offset;
5188 const char string[ETH_GSTRING_LEN];
5189 bool reg_is_64b;
5190};
5191
5192static u64 mvpp2_read_count(struct mvpp2_port *port,
5193 const struct mvpp2_ethtool_counter *counter)
5194{
5195 u64 val;
5196
5197 val = readl(port->stats_base + counter->offset);
5198 if (counter->reg_is_64b)
5199 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5200
5201 return val;
5202}
5203
5204/* Due to the fact that software statistics and hardware statistics are, by
5205 * design, incremented at different moments in the chain of packet processing,
5206 * it is very likely that incoming packets could have been dropped after being
5207 * counted by hardware but before reaching software statistics (most probably
5208 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5209 * are added in between as well as TSO skb will be split and header bytes added.
5210 * Hence, statistics gathered from userspace with ifconfig (software) and
5211 * ethtool (hardware) cannot be compared.
5212 */
5213static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5214 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5215 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5216 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5217 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5218 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5219 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5220 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5221 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5222 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5223 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5224 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5225 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5226 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5227 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5228 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5229 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5230 { MVPP2_MIB_FC_SENT, "fc_sent" },
5231 { MVPP2_MIB_FC_RCVD, "fc_received" },
5232 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5233 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5234 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5235 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5236 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5237 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5238 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5239 { MVPP2_MIB_COLLISION, "collision" },
5240 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
5241};
5242
5243static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5244 u8 *data)
5245{
5246 if (sset == ETH_SS_STATS) {
5247 int i;
5248
5249 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5250 memcpy(data + i * ETH_GSTRING_LEN,
5251 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5252 }
5253}
5254
5255static void mvpp2_gather_hw_statistics(struct work_struct *work)
5256{
5257 struct delayed_work *del_work = to_delayed_work(work);
Miquel Raynale5c500e2017-11-08 08:59:40 +01005258 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5259 stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005260 u64 *pstats;
Miquel Raynale5c500e2017-11-08 08:59:40 +01005261 int i;
Miquel Raynal118d6292017-11-06 22:56:53 +01005262
Miquel Raynale5c500e2017-11-08 08:59:40 +01005263 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005264
Miquel Raynale5c500e2017-11-08 08:59:40 +01005265 pstats = port->ethtool_stats;
5266 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5267 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Miquel Raynal118d6292017-11-06 22:56:53 +01005268
5269 /* No need to read again the counters right after this function if it
5270 * was called asynchronously by the user (ie. use of ethtool).
5271 */
Miquel Raynale5c500e2017-11-08 08:59:40 +01005272 cancel_delayed_work(&port->stats_work);
5273 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01005274 MVPP2_MIB_COUNTERS_STATS_DELAY);
5275
Miquel Raynale5c500e2017-11-08 08:59:40 +01005276 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005277}
5278
5279static void mvpp2_ethtool_get_stats(struct net_device *dev,
5280 struct ethtool_stats *stats, u64 *data)
5281{
5282 struct mvpp2_port *port = netdev_priv(dev);
5283
Miquel Raynale5c500e2017-11-08 08:59:40 +01005284 /* Update statistics for the given port, then take the lock to avoid
5285 * concurrent accesses on the ethtool_stats structure during its copy.
5286 */
5287 mvpp2_gather_hw_statistics(&port->stats_work.work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005288
Miquel Raynale5c500e2017-11-08 08:59:40 +01005289 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005290 memcpy(data, port->ethtool_stats,
5291 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
Miquel Raynale5c500e2017-11-08 08:59:40 +01005292 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005293}
5294
5295static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5296{
5297 if (sset == ETH_SS_STATS)
5298 return ARRAY_SIZE(mvpp2_ethtool_regs);
5299
5300 return -EOPNOTSUPP;
5301}
5302
Marcin Wojtas3f518502014-07-10 16:52:13 -03005303static void mvpp2_port_reset(struct mvpp2_port *port)
5304{
5305 u32 val;
Miquel Raynal118d6292017-11-06 22:56:53 +01005306 unsigned int i;
5307
5308 /* Read the GOP statistics to reset the hardware counters */
5309 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5310 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005311
5312 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5313 ~MVPP2_GMAC_PORT_RESET_MASK;
5314 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5315
5316 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5317 MVPP2_GMAC_PORT_RESET_MASK)
5318 continue;
5319}
5320
5321/* Change maximum receive size of the port */
5322static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5323{
5324 u32 val;
5325
5326 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5327 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5328 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5329 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5330 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5331}
5332
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005333/* Change maximum receive size of the port */
5334static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5335{
5336 u32 val;
5337
5338 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
5339 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5340 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
Antoine Ténartec15ecd2017-08-25 15:24:46 +02005341 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005342 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5343}
5344
Marcin Wojtas3f518502014-07-10 16:52:13 -03005345/* Set defaults to the MVPP2 port */
5346static void mvpp2_defaults_set(struct mvpp2_port *port)
5347{
5348 int tx_port_num, val, queue, ptxq, lrxq;
5349
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005350 if (port->priv->hw_version == MVPP21) {
5351 /* Configure port to loopback if needed */
5352 if (port->flags & MVPP2_F_LOOPBACK)
5353 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005354
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005355 /* Update TX FIFO MIN Threshold */
5356 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5357 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5358 /* Min. TX threshold must be less than minimal packet length */
5359 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5360 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5361 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005362
5363 /* Disable Legacy WRR, Disable EJP, Release from reset */
5364 tx_port_num = mvpp2_egress_port(port);
5365 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5366 tx_port_num);
5367 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5368
5369 /* Close bandwidth for all queues */
5370 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5371 ptxq = mvpp2_txq_phys(port->id, queue);
5372 mvpp2_write(port->priv,
5373 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5374 }
5375
5376 /* Set refill period to 1 usec, refill tokens
5377 * and bucket size to maximum
5378 */
5379 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5380 port->priv->tclk / USEC_PER_SEC);
5381 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5382 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5383 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5384 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5385 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5386 val = MVPP2_TXP_TOKEN_SIZE_MAX;
5387 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5388
5389 /* Set MaximumLowLatencyPacketSize value to 256 */
5390 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5391 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5392 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5393
5394 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005395 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005396 queue = port->rxqs[lrxq]->id;
5397 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5398 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5399 MVPP2_SNOOP_BUF_HDR_MASK;
5400 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5401 }
5402
5403 /* At default, mask all interrupts to all present cpus */
5404 mvpp2_interrupts_disable(port);
5405}
5406
5407/* Enable/disable receiving packets */
5408static void mvpp2_ingress_enable(struct mvpp2_port *port)
5409{
5410 u32 val;
5411 int lrxq, queue;
5412
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005413 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005414 queue = port->rxqs[lrxq]->id;
5415 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5416 val &= ~MVPP2_RXQ_DISABLE_MASK;
5417 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5418 }
5419}
5420
5421static void mvpp2_ingress_disable(struct mvpp2_port *port)
5422{
5423 u32 val;
5424 int lrxq, queue;
5425
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005426 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005427 queue = port->rxqs[lrxq]->id;
5428 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5429 val |= MVPP2_RXQ_DISABLE_MASK;
5430 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5431 }
5432}
5433
5434/* Enable transmit via physical egress queue
5435 * - HW starts take descriptors from DRAM
5436 */
5437static void mvpp2_egress_enable(struct mvpp2_port *port)
5438{
5439 u32 qmap;
5440 int queue;
5441 int tx_port_num = mvpp2_egress_port(port);
5442
5443 /* Enable all initialized TXs. */
5444 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005445 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005446 struct mvpp2_tx_queue *txq = port->txqs[queue];
5447
Markus Elfringdbbb2f02017-04-17 14:07:52 +02005448 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005449 qmap |= (1 << queue);
5450 }
5451
5452 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5453 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5454}
5455
5456/* Disable transmit via physical egress queue
5457 * - HW doesn't take descriptors from DRAM
5458 */
5459static void mvpp2_egress_disable(struct mvpp2_port *port)
5460{
5461 u32 reg_data;
5462 int delay;
5463 int tx_port_num = mvpp2_egress_port(port);
5464
5465 /* Issue stop command for active channels only */
5466 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5467 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5468 MVPP2_TXP_SCHED_ENQ_MASK;
5469 if (reg_data != 0)
5470 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5471 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5472
5473 /* Wait for all Tx activity to terminate. */
5474 delay = 0;
5475 do {
5476 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5477 netdev_warn(port->dev,
5478 "Tx stop timed out, status=0x%08x\n",
5479 reg_data);
5480 break;
5481 }
5482 mdelay(1);
5483 delay++;
5484
5485 /* Check port TX Command register that all
5486 * Tx queues are stopped
5487 */
5488 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5489 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5490}
5491
5492/* Rx descriptors helper methods */
5493
5494/* Get number of Rx descriptors occupied by received packets */
5495static inline int
5496mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5497{
5498 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5499
5500 return val & MVPP2_RXQ_OCCUPIED_MASK;
5501}
5502
5503/* Update Rx queue status with the number of occupied and available
5504 * Rx descriptor slots.
5505 */
5506static inline void
5507mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5508 int used_count, int free_count)
5509{
5510 /* Decrement the number of used descriptors and increment count
5511 * increment the number of free descriptors.
5512 */
5513 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5514
5515 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5516}
5517
5518/* Get pointer to next RX descriptor to be processed by SW */
5519static inline struct mvpp2_rx_desc *
5520mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5521{
5522 int rx_desc = rxq->next_desc_to_proc;
5523
5524 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5525 prefetch(rxq->descs + rxq->next_desc_to_proc);
5526 return rxq->descs + rx_desc;
5527}
5528
5529/* Set rx queue offset */
5530static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5531 int prxq, int offset)
5532{
5533 u32 val;
5534
5535 /* Convert offset from bytes to units of 32 bytes */
5536 offset = offset >> 5;
5537
5538 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5539 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5540
5541 /* Offset is in */
5542 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5543 MVPP2_RXQ_PACKET_OFFSET_MASK);
5544
5545 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5546}
5547
Marcin Wojtas3f518502014-07-10 16:52:13 -03005548/* Tx descriptors helper methods */
5549
Marcin Wojtas3f518502014-07-10 16:52:13 -03005550/* Get pointer to next Tx descriptor to be processed (send) by HW */
5551static struct mvpp2_tx_desc *
5552mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5553{
5554 int tx_desc = txq->next_desc_to_proc;
5555
5556 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5557 return txq->descs + tx_desc;
5558}
5559
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005560/* Update HW with number of aggregated Tx descriptors to be sent
5561 *
5562 * Called only from mvpp2_tx(), so migration is disabled, using
5563 * smp_processor_id() is OK.
5564 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005565static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5566{
5567 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005568 mvpp2_percpu_write(port->priv, smp_processor_id(),
5569 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005570}
5571
5572
5573/* Check if there are enough free descriptors in aggregated txq.
5574 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005575 *
5576 * Called only from mvpp2_tx(), so migration is disabled, using
5577 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005578 */
5579static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5580 struct mvpp2_tx_queue *aggr_txq, int num)
5581{
Antoine Tenart02856a32017-10-30 11:23:32 +01005582 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005583 /* Update number of occupied aggregated Tx descriptors */
5584 int cpu = smp_processor_id();
5585 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5586
5587 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5588 }
5589
Antoine Tenart02856a32017-10-30 11:23:32 +01005590 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005591 return -ENOMEM;
5592
5593 return 0;
5594}
5595
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005596/* Reserved Tx descriptors allocation request
5597 *
5598 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5599 * only by mvpp2_tx(), so migration is disabled, using
5600 * smp_processor_id() is OK.
5601 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005602static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5603 struct mvpp2_tx_queue *txq, int num)
5604{
5605 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005606 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005607
5608 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005609 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005610
Thomas Petazzonia7868412017-03-07 16:53:13 +01005611 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005612
5613 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5614}
5615
5616/* Check if there are enough reserved descriptors for transmission.
5617 * If not, request chunk of reserved descriptors and check again.
5618 */
5619static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5620 struct mvpp2_tx_queue *txq,
5621 struct mvpp2_txq_pcpu *txq_pcpu,
5622 int num)
5623{
5624 int req, cpu, desc_count;
5625
5626 if (txq_pcpu->reserved_num >= num)
5627 return 0;
5628
5629 /* Not enough descriptors reserved! Update the reserved descriptor
5630 * count and check again.
5631 */
5632
5633 desc_count = 0;
5634 /* Compute total of used descriptors */
5635 for_each_present_cpu(cpu) {
5636 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5637
5638 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5639 desc_count += txq_pcpu_aux->count;
5640 desc_count += txq_pcpu_aux->reserved_num;
5641 }
5642
5643 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5644 desc_count += req;
5645
5646 if (desc_count >
5647 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5648 return -ENOMEM;
5649
5650 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5651
5652 /* OK, the descriptor cound has been updated: check again. */
5653 if (txq_pcpu->reserved_num < num)
5654 return -ENOMEM;
5655 return 0;
5656}
5657
5658/* Release the last allocated Tx descriptor. Useful to handle DMA
5659 * mapping failures in the Tx path.
5660 */
5661static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5662{
5663 if (txq->next_desc_to_proc == 0)
5664 txq->next_desc_to_proc = txq->last_desc - 1;
5665 else
5666 txq->next_desc_to_proc--;
5667}
5668
5669/* Set Tx descriptors fields relevant for CSUM calculation */
5670static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5671 int ip_hdr_len, int l4_proto)
5672{
5673 u32 command;
5674
5675 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5676 * G_L4_chk, L4_type required only for checksum calculation
5677 */
5678 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5679 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5680 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5681
5682 if (l3_proto == swab16(ETH_P_IP)) {
5683 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5684 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5685 } else {
5686 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5687 }
5688
5689 if (l4_proto == IPPROTO_TCP) {
5690 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5691 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5692 } else if (l4_proto == IPPROTO_UDP) {
5693 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5694 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5695 } else {
5696 command |= MVPP2_TXD_L4_CSUM_NOT;
5697 }
5698
5699 return command;
5700}
5701
5702/* Get number of sent descriptors and decrement counter.
5703 * The number of sent descriptors is returned.
5704 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005705 *
5706 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5707 * (migration disabled) and from the TX completion tasklet (migration
5708 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005709 */
5710static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5711 struct mvpp2_tx_queue *txq)
5712{
5713 u32 val;
5714
5715 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005716 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5717 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005718
5719 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5720 MVPP2_TRANSMITTED_COUNT_OFFSET;
5721}
5722
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005723/* Called through on_each_cpu(), so runs on all CPUs, with migration
5724 * disabled, therefore using smp_processor_id() is OK.
5725 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005726static void mvpp2_txq_sent_counter_clear(void *arg)
5727{
5728 struct mvpp2_port *port = arg;
5729 int queue;
5730
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005731 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005732 int id = port->txqs[queue]->id;
5733
Thomas Petazzonia7868412017-03-07 16:53:13 +01005734 mvpp2_percpu_read(port->priv, smp_processor_id(),
5735 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005736 }
5737}
5738
5739/* Set max sizes for Tx queues */
5740static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5741{
5742 u32 val, size, mtu;
5743 int txq, tx_port_num;
5744
5745 mtu = port->pkt_size * 8;
5746 if (mtu > MVPP2_TXP_MTU_MAX)
5747 mtu = MVPP2_TXP_MTU_MAX;
5748
5749 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5750 mtu = 3 * mtu;
5751
5752 /* Indirect access to registers */
5753 tx_port_num = mvpp2_egress_port(port);
5754 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5755
5756 /* Set MTU */
5757 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5758 val &= ~MVPP2_TXP_MTU_MAX;
5759 val |= mtu;
5760 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5761
5762 /* TXP token size and all TXQs token size must be larger that MTU */
5763 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5764 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5765 if (size < mtu) {
5766 size = mtu;
5767 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5768 val |= size;
5769 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5770 }
5771
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005772 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005773 val = mvpp2_read(port->priv,
5774 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5775 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5776
5777 if (size < mtu) {
5778 size = mtu;
5779 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5780 val |= size;
5781 mvpp2_write(port->priv,
5782 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5783 val);
5784 }
5785 }
5786}
5787
5788/* Set the number of packets that will be received before Rx interrupt
5789 * will be generated by HW.
5790 */
5791static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005792 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005793{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005794 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005795
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01005796 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5797 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005798
Thomas Petazzonia7868412017-03-07 16:53:13 +01005799 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5800 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5801 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005802
5803 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005804}
5805
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005806/* For some reason in the LSP this is done on each CPU. Why ? */
5807static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5808 struct mvpp2_tx_queue *txq)
5809{
5810 int cpu = get_cpu();
5811 u32 val;
5812
5813 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5814 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5815
5816 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5817 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5818 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5819
5820 put_cpu();
5821}
5822
Thomas Petazzoniab426762017-02-21 11:28:04 +01005823static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5824{
5825 u64 tmp = (u64)clk_hz * usec;
5826
5827 do_div(tmp, USEC_PER_SEC);
5828
5829 return tmp > U32_MAX ? U32_MAX : tmp;
5830}
5831
5832static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5833{
5834 u64 tmp = (u64)cycles * USEC_PER_SEC;
5835
5836 do_div(tmp, clk_hz);
5837
5838 return tmp > U32_MAX ? U32_MAX : tmp;
5839}
5840
Marcin Wojtas3f518502014-07-10 16:52:13 -03005841/* Set the time delay in usec before Rx interrupt */
5842static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005843 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005844{
Thomas Petazzoniab426762017-02-21 11:28:04 +01005845 unsigned long freq = port->priv->tclk;
5846 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005847
Thomas Petazzoniab426762017-02-21 11:28:04 +01005848 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5849 rxq->time_coal =
5850 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5851
5852 /* re-evaluate to get actual register value */
5853 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5854 }
5855
Marcin Wojtas3f518502014-07-10 16:52:13 -03005856 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005857}
5858
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005859static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5860{
5861 unsigned long freq = port->priv->tclk;
5862 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5863
5864 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5865 port->tx_time_coal =
5866 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5867
5868 /* re-evaluate to get actual register value */
5869 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5870 }
5871
5872 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5873}
5874
Marcin Wojtas3f518502014-07-10 16:52:13 -03005875/* Free Tx queue skbuffs */
5876static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5877 struct mvpp2_tx_queue *txq,
5878 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5879{
5880 int i;
5881
5882 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01005883 struct mvpp2_txq_pcpu_buf *tx_buf =
5884 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005885
Antoine Tenart20920262017-10-23 15:24:30 +02005886 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5887 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5888 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01005889 if (tx_buf->skb)
5890 dev_kfree_skb_any(tx_buf->skb);
5891
5892 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005893 }
5894}
5895
5896static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5897 u32 cause)
5898{
5899 int queue = fls(cause) - 1;
5900
5901 return port->rxqs[queue];
5902}
5903
5904static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5905 u32 cause)
5906{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005907 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005908
5909 return port->txqs[queue];
5910}
5911
5912/* Handle end of transmission */
5913static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5914 struct mvpp2_txq_pcpu *txq_pcpu)
5915{
5916 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5917 int tx_done;
5918
5919 if (txq_pcpu->cpu != smp_processor_id())
5920 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5921
5922 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5923 if (!tx_done)
5924 return;
5925 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5926
5927 txq_pcpu->count -= tx_done;
5928
5929 if (netif_tx_queue_stopped(nq))
Antoine Tenart1d17db02017-10-30 11:23:31 +01005930 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005931 netif_tx_wake_queue(nq);
5932}
5933
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005934static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5935 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005936{
5937 struct mvpp2_tx_queue *txq;
5938 struct mvpp2_txq_pcpu *txq_pcpu;
5939 unsigned int tx_todo = 0;
5940
5941 while (cause) {
5942 txq = mvpp2_get_tx_queue(port, cause);
5943 if (!txq)
5944 break;
5945
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005946 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005947
5948 if (txq_pcpu->count) {
5949 mvpp2_txq_done(port, txq, txq_pcpu);
5950 tx_todo += txq_pcpu->count;
5951 }
5952
5953 cause &= ~(1 << txq->log_id);
5954 }
5955 return tx_todo;
5956}
5957
Marcin Wojtas3f518502014-07-10 16:52:13 -03005958/* Rx/Tx queue initialization/cleanup methods */
5959
5960/* Allocate and initialize descriptors for aggr TXQ */
5961static int mvpp2_aggr_txq_init(struct platform_device *pdev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005962 struct mvpp2_tx_queue *aggr_txq, int cpu,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005963 struct mvpp2 *priv)
5964{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005965 u32 txq_dma;
5966
Marcin Wojtas3f518502014-07-10 16:52:13 -03005967 /* Allocate memory for TX descriptors */
Yan Markmana154f8e2017-11-30 10:49:46 +01005968 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005969 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005970 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005971 if (!aggr_txq->descs)
5972 return -ENOMEM;
5973
Antoine Tenart02856a32017-10-30 11:23:32 +01005974 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005975
5976 /* Aggr TXQ no reset WA */
5977 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5978 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5979
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005980 /* Set Tx descriptors queue starting address indirect
5981 * access
5982 */
5983 if (priv->hw_version == MVPP21)
5984 txq_dma = aggr_txq->descs_dma;
5985 else
5986 txq_dma = aggr_txq->descs_dma >>
5987 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5988
5989 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Antoine Ténart85affd72017-08-23 09:46:55 +02005990 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5991 MVPP2_AGGR_TXQ_SIZE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005992
5993 return 0;
5994}
5995
5996/* Create a specified Rx queue */
5997static int mvpp2_rxq_init(struct mvpp2_port *port,
5998 struct mvpp2_rx_queue *rxq)
5999
6000{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01006001 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006002 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01006003
Marcin Wojtas3f518502014-07-10 16:52:13 -03006004 rxq->size = port->rx_ring_size;
6005
6006 /* Allocate memory for RX descriptors */
6007 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
6008 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006009 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006010 if (!rxq->descs)
6011 return -ENOMEM;
6012
Marcin Wojtas3f518502014-07-10 16:52:13 -03006013 rxq->last_desc = rxq->size - 1;
6014
6015 /* Zero occupied and non-occupied counters - direct access */
6016 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
6017
6018 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006019 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006020 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01006021 if (port->priv->hw_version == MVPP21)
6022 rxq_dma = rxq->descs_dma;
6023 else
6024 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006025 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
6026 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
6027 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006028 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006029
6030 /* Set Offset */
6031 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
6032
6033 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01006034 mvpp2_rx_pkts_coal_set(port, rxq);
6035 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006036
6037 /* Add number of descriptors ready for receiving packets */
6038 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
6039
6040 return 0;
6041}
6042
6043/* Push packets received by the RXQ to BM pool */
6044static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
6045 struct mvpp2_rx_queue *rxq)
6046{
6047 int rx_received, i;
6048
6049 rx_received = mvpp2_rxq_received(port, rxq->id);
6050 if (!rx_received)
6051 return;
6052
6053 for (i = 0; i < rx_received; i++) {
6054 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006055 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6056 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006057
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006058 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6059 MVPP2_RXD_BM_POOL_ID_OFFS;
6060
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006061 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006062 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
6063 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006064 }
6065 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
6066}
6067
6068/* Cleanup Rx queue */
6069static void mvpp2_rxq_deinit(struct mvpp2_port *port,
6070 struct mvpp2_rx_queue *rxq)
6071{
Thomas Petazzonia7868412017-03-07 16:53:13 +01006072 int cpu;
6073
Marcin Wojtas3f518502014-07-10 16:52:13 -03006074 mvpp2_rxq_drop_pkts(port, rxq);
6075
6076 if (rxq->descs)
6077 dma_free_coherent(port->dev->dev.parent,
6078 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6079 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006080 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006081
6082 rxq->descs = NULL;
6083 rxq->last_desc = 0;
6084 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006085 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006086
6087 /* Clear Rx descriptors queue starting address and size;
6088 * free descriptor number
6089 */
6090 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006091 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006092 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6093 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6094 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006095 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006096}
6097
6098/* Create and initialize a Tx queue */
6099static int mvpp2_txq_init(struct mvpp2_port *port,
6100 struct mvpp2_tx_queue *txq)
6101{
6102 u32 val;
6103 int cpu, desc, desc_per_txq, tx_port_num;
6104 struct mvpp2_txq_pcpu *txq_pcpu;
6105
6106 txq->size = port->tx_ring_size;
6107
6108 /* Allocate memory for Tx descriptors */
6109 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6110 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006111 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006112 if (!txq->descs)
6113 return -ENOMEM;
6114
Marcin Wojtas3f518502014-07-10 16:52:13 -03006115 txq->last_desc = txq->size - 1;
6116
6117 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006118 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006119 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6120 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6121 txq->descs_dma);
6122 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6123 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6124 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6125 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6126 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6127 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006128 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006129 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006130
6131 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
6132 * for each existing TXQ.
6133 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6134 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6135 */
6136 desc_per_txq = 16;
6137 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6138 (txq->log_id * desc_per_txq);
6139
Thomas Petazzonia7868412017-03-07 16:53:13 +01006140 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6141 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6142 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006143 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006144
6145 /* WRR / EJP configuration - indirect access */
6146 tx_port_num = mvpp2_egress_port(port);
6147 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6148
6149 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6150 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6151 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6152 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6153 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6154
6155 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6156 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6157 val);
6158
6159 for_each_present_cpu(cpu) {
6160 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6161 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02006162 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6163 sizeof(*txq_pcpu->buffs),
6164 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006165 if (!txq_pcpu->buffs)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006166 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006167
6168 txq_pcpu->count = 0;
6169 txq_pcpu->reserved_num = 0;
6170 txq_pcpu->txq_put_index = 0;
6171 txq_pcpu->txq_get_index = 0;
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006172 txq_pcpu->tso_headers = NULL;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006173
Antoine Tenart1d17db02017-10-30 11:23:31 +01006174 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6175 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6176
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006177 txq_pcpu->tso_headers =
6178 dma_alloc_coherent(port->dev->dev.parent,
Yan Markman822eaf72017-10-23 15:24:29 +02006179 txq_pcpu->size * TSO_HEADER_SIZE,
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006180 &txq_pcpu->tso_headers_dma,
6181 GFP_KERNEL);
6182 if (!txq_pcpu->tso_headers)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006183 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006184 }
6185
6186 return 0;
6187}
6188
6189/* Free allocated TXQ resources */
6190static void mvpp2_txq_deinit(struct mvpp2_port *port,
6191 struct mvpp2_tx_queue *txq)
6192{
6193 struct mvpp2_txq_pcpu *txq_pcpu;
6194 int cpu;
6195
6196 for_each_present_cpu(cpu) {
6197 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006198 kfree(txq_pcpu->buffs);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006199
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006200 if (txq_pcpu->tso_headers)
6201 dma_free_coherent(port->dev->dev.parent,
6202 txq_pcpu->size * TSO_HEADER_SIZE,
6203 txq_pcpu->tso_headers,
6204 txq_pcpu->tso_headers_dma);
6205
6206 txq_pcpu->tso_headers = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006207 }
6208
6209 if (txq->descs)
6210 dma_free_coherent(port->dev->dev.parent,
6211 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006212 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006213
6214 txq->descs = NULL;
6215 txq->last_desc = 0;
6216 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006217 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006218
6219 /* Set minimum bandwidth for disabled TXQs */
6220 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6221
6222 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006223 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006224 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6225 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6226 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006227 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006228}
6229
6230/* Cleanup Tx ports */
6231static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6232{
6233 struct mvpp2_txq_pcpu *txq_pcpu;
6234 int delay, pending, cpu;
6235 u32 val;
6236
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006237 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006238 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6239 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006240 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006241 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006242
6243 /* The napi queue has been stopped so wait for all packets
6244 * to be transmitted.
6245 */
6246 delay = 0;
6247 do {
6248 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6249 netdev_warn(port->dev,
6250 "port %d: cleaning queue %d timed out\n",
6251 port->id, txq->log_id);
6252 break;
6253 }
6254 mdelay(1);
6255 delay++;
6256
Thomas Petazzonia7868412017-03-07 16:53:13 +01006257 pending = mvpp2_percpu_read(port->priv, cpu,
6258 MVPP2_TXQ_PENDING_REG);
6259 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006260 } while (pending);
6261
6262 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006263 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006264 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006265
6266 for_each_present_cpu(cpu) {
6267 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6268
6269 /* Release all packets */
6270 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6271
6272 /* Reset queue */
6273 txq_pcpu->count = 0;
6274 txq_pcpu->txq_put_index = 0;
6275 txq_pcpu->txq_get_index = 0;
6276 }
6277}
6278
6279/* Cleanup all Tx queues */
6280static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6281{
6282 struct mvpp2_tx_queue *txq;
6283 int queue;
6284 u32 val;
6285
6286 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6287
6288 /* Reset Tx ports and delete Tx queues */
6289 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6290 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6291
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006292 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006293 txq = port->txqs[queue];
6294 mvpp2_txq_clean(port, txq);
6295 mvpp2_txq_deinit(port, txq);
6296 }
6297
6298 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6299
6300 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6301 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6302}
6303
6304/* Cleanup all Rx queues */
6305static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6306{
6307 int queue;
6308
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006309 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006310 mvpp2_rxq_deinit(port, port->rxqs[queue]);
6311}
6312
6313/* Init all Rx queues for port */
6314static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6315{
6316 int queue, err;
6317
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006318 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006319 err = mvpp2_rxq_init(port, port->rxqs[queue]);
6320 if (err)
6321 goto err_cleanup;
6322 }
6323 return 0;
6324
6325err_cleanup:
6326 mvpp2_cleanup_rxqs(port);
6327 return err;
6328}
6329
6330/* Init all tx queues for port */
6331static int mvpp2_setup_txqs(struct mvpp2_port *port)
6332{
6333 struct mvpp2_tx_queue *txq;
6334 int queue, err;
6335
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006336 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006337 txq = port->txqs[queue];
6338 err = mvpp2_txq_init(port, txq);
6339 if (err)
6340 goto err_cleanup;
6341 }
6342
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006343 if (port->has_tx_irqs) {
6344 mvpp2_tx_time_coal_set(port);
6345 for (queue = 0; queue < port->ntxqs; queue++) {
6346 txq = port->txqs[queue];
6347 mvpp2_tx_pkts_coal_set(port, txq);
6348 }
6349 }
6350
Marcin Wojtas3f518502014-07-10 16:52:13 -03006351 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6352 return 0;
6353
6354err_cleanup:
6355 mvpp2_cleanup_txqs(port);
6356 return err;
6357}
6358
6359/* The callback for per-port interrupt */
6360static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6361{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006362 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006363
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006364 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006365
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006366 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006367
6368 return IRQ_HANDLED;
6369}
6370
Antoine Tenartfd3651b2017-09-01 11:04:54 +02006371/* Per-port interrupt for link status changes */
6372static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6373{
6374 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6375 struct net_device *dev = port->dev;
6376 bool event = false, link = false;
6377 u32 val;
6378
6379 mvpp22_gop_mask_irq(port);
6380
6381 if (port->gop_id == 0 &&
6382 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6383 val = readl(port->base + MVPP22_XLG_INT_STAT);
6384 if (val & MVPP22_XLG_INT_STAT_LINK) {
6385 event = true;
6386 val = readl(port->base + MVPP22_XLG_STATUS);
6387 if (val & MVPP22_XLG_STATUS_LINK_UP)
6388 link = true;
6389 }
6390 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6391 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6392 val = readl(port->base + MVPP22_GMAC_INT_STAT);
6393 if (val & MVPP22_GMAC_INT_STAT_LINK) {
6394 event = true;
6395 val = readl(port->base + MVPP2_GMAC_STATUS0);
6396 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6397 link = true;
6398 }
6399 }
6400
6401 if (!netif_running(dev) || !event)
6402 goto handled;
6403
6404 if (link) {
6405 mvpp2_interrupts_enable(port);
6406
6407 mvpp2_egress_enable(port);
6408 mvpp2_ingress_enable(port);
6409 netif_carrier_on(dev);
6410 netif_tx_wake_all_queues(dev);
6411 } else {
6412 netif_tx_stop_all_queues(dev);
6413 netif_carrier_off(dev);
6414 mvpp2_ingress_disable(port);
6415 mvpp2_egress_disable(port);
6416
6417 mvpp2_interrupts_disable(port);
6418 }
6419
6420handled:
6421 mvpp22_gop_unmask_irq(port);
6422 return IRQ_HANDLED;
6423}
6424
Antoine Tenart65a2c092017-08-30 10:29:18 +02006425static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6426 struct phy_device *phydev)
6427{
6428 u32 val;
6429
6430 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6431 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6432 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6433 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6434 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6435 return;
6436
6437 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6438 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6439 MVPP2_GMAC_CONFIG_GMII_SPEED |
6440 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6441 MVPP2_GMAC_AN_SPEED_EN |
6442 MVPP2_GMAC_AN_DUPLEX_EN);
6443
6444 if (phydev->duplex)
6445 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6446
6447 if (phydev->speed == SPEED_1000)
6448 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6449 else if (phydev->speed == SPEED_100)
6450 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6451
6452 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Antoine Tenart65a2c092017-08-30 10:29:18 +02006453}
6454
Marcin Wojtas3f518502014-07-10 16:52:13 -03006455/* Adjust link */
6456static void mvpp2_link_event(struct net_device *dev)
6457{
6458 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02006459 struct phy_device *phydev = dev->phydev;
Antoine Tenart89273bc2017-08-30 10:29:19 +02006460 bool link_reconfigured = false;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006461 u32 val;
6462
6463 if (phydev->link) {
Antoine Tenart89273bc2017-08-30 10:29:19 +02006464 if (port->phy_interface != phydev->interface && port->comphy) {
6465 /* disable current port for reconfiguration */
6466 mvpp2_interrupts_disable(port);
6467 netif_carrier_off(port->dev);
6468 mvpp2_port_disable(port);
6469 phy_power_off(port->comphy);
6470
6471 /* comphy reconfiguration */
6472 port->phy_interface = phydev->interface;
6473 mvpp22_comphy_init(port);
6474
6475 /* gop/mac reconfiguration */
6476 mvpp22_gop_init(port);
6477 mvpp2_port_mii_set(port);
6478
6479 link_reconfigured = true;
6480 }
6481
Marcin Wojtas3f518502014-07-10 16:52:13 -03006482 if ((port->speed != phydev->speed) ||
6483 (port->duplex != phydev->duplex)) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006484 mvpp2_gmac_set_autoneg(port, phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006485
6486 port->duplex = phydev->duplex;
6487 port->speed = phydev->speed;
6488 }
6489 }
6490
Antoine Tenart89273bc2017-08-30 10:29:19 +02006491 if (phydev->link != port->link || link_reconfigured) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006492 port->link = phydev->link;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006493
Marcin Wojtas3f518502014-07-10 16:52:13 -03006494 if (phydev->link) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006495 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6496 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6497 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6498 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6499 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6500 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6501 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6502 MVPP2_GMAC_FORCE_LINK_DOWN);
6503 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6504 }
Antoine Tenartf55744a2017-08-30 10:29:17 +02006505
6506 mvpp2_interrupts_enable(port);
6507 mvpp2_port_enable(port);
6508
Marcin Wojtas3f518502014-07-10 16:52:13 -03006509 mvpp2_egress_enable(port);
6510 mvpp2_ingress_enable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006511 netif_carrier_on(dev);
6512 netif_tx_wake_all_queues(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006513 } else {
Antoine Tenart968b2112017-08-30 10:29:16 +02006514 port->duplex = -1;
6515 port->speed = 0;
6516
Antoine Tenartf55744a2017-08-30 10:29:17 +02006517 netif_tx_stop_all_queues(dev);
6518 netif_carrier_off(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006519 mvpp2_ingress_disable(port);
6520 mvpp2_egress_disable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006521
6522 mvpp2_port_disable(port);
6523 mvpp2_interrupts_disable(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006524 }
Antoine Tenart968b2112017-08-30 10:29:16 +02006525
Marcin Wojtas3f518502014-07-10 16:52:13 -03006526 phy_print_status(phydev);
6527 }
6528}
6529
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006530static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6531{
6532 ktime_t interval;
6533
6534 if (!port_pcpu->timer_scheduled) {
6535 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01006536 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006537 hrtimer_start(&port_pcpu->tx_done_timer, interval,
6538 HRTIMER_MODE_REL_PINNED);
6539 }
6540}
6541
6542static void mvpp2_tx_proc_cb(unsigned long data)
6543{
6544 struct net_device *dev = (struct net_device *)data;
6545 struct mvpp2_port *port = netdev_priv(dev);
6546 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6547 unsigned int tx_todo, cause;
6548
6549 if (!netif_running(dev))
6550 return;
6551 port_pcpu->timer_scheduled = false;
6552
6553 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006554 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006555 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006556
6557 /* Set the timer in case not all the packets were processed */
6558 if (tx_todo)
6559 mvpp2_timer_set(port_pcpu);
6560}
6561
6562static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6563{
6564 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6565 struct mvpp2_port_pcpu,
6566 tx_done_timer);
6567
6568 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6569
6570 return HRTIMER_NORESTART;
6571}
6572
Marcin Wojtas3f518502014-07-10 16:52:13 -03006573/* Main RX/TX processing routines */
6574
6575/* Display more error info */
6576static void mvpp2_rx_error(struct mvpp2_port *port,
6577 struct mvpp2_rx_desc *rx_desc)
6578{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006579 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6580 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006581
6582 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6583 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006584 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6585 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006586 break;
6587 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006588 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6589 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006590 break;
6591 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006592 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6593 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006594 break;
6595 }
6596}
6597
6598/* Handle RX checksum offload */
6599static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6600 struct sk_buff *skb)
6601{
6602 if (((status & MVPP2_RXD_L3_IP4) &&
6603 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6604 (status & MVPP2_RXD_L3_IP6))
6605 if (((status & MVPP2_RXD_L4_UDP) ||
6606 (status & MVPP2_RXD_L4_TCP)) &&
6607 (status & MVPP2_RXD_L4_CSUM_OK)) {
6608 skb->csum = 0;
6609 skb->ip_summed = CHECKSUM_UNNECESSARY;
6610 return;
6611 }
6612
6613 skb->ip_summed = CHECKSUM_NONE;
6614}
6615
6616/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6617static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006618 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006619{
Thomas Petazzoni20396132017-03-07 16:53:00 +01006620 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006621 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006622 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006623
Marcin Wojtas3f518502014-07-10 16:52:13 -03006624 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006625 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6626 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006627 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006628 return -ENOMEM;
6629
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006630 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01006631
Marcin Wojtas3f518502014-07-10 16:52:13 -03006632 return 0;
6633}
6634
6635/* Handle tx checksum */
6636static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6637{
6638 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6639 int ip_hdr_len = 0;
6640 u8 l4_proto;
6641
6642 if (skb->protocol == htons(ETH_P_IP)) {
6643 struct iphdr *ip4h = ip_hdr(skb);
6644
6645 /* Calculate IPv4 checksum and L4 checksum */
6646 ip_hdr_len = ip4h->ihl;
6647 l4_proto = ip4h->protocol;
6648 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6649 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6650
6651 /* Read l4_protocol from one of IPv6 extra headers */
6652 if (skb_network_header_len(skb) > 0)
6653 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6654 l4_proto = ip6h->nexthdr;
6655 } else {
6656 return MVPP2_TXD_L4_CSUM_NOT;
6657 }
6658
6659 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6660 skb->protocol, ip_hdr_len, l4_proto);
6661 }
6662
6663 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6664}
6665
Marcin Wojtas3f518502014-07-10 16:52:13 -03006666/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006667static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6668 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006669{
6670 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01006671 int rx_received;
6672 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006673 u32 rcvd_pkts = 0;
6674 u32 rcvd_bytes = 0;
6675
6676 /* Get number of received packets and clamp the to-do */
6677 rx_received = mvpp2_rxq_received(port, rxq->id);
6678 if (rx_todo > rx_received)
6679 rx_todo = rx_received;
6680
Marcin Wojtasb5015852015-12-03 15:20:51 +01006681 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006682 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6683 struct mvpp2_bm_pool *bm_pool;
6684 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006685 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006686 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006687 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006688 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006689 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006690 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006691
Marcin Wojtasb5015852015-12-03 15:20:51 +01006692 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006693 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6694 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6695 rx_bytes -= MVPP2_MH_SIZE;
6696 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6697 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6698 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006699
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006700 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6701 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006702 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03006703
6704 /* In case of an error, release the requested buffer pointer
6705 * to the Buffer Manager. This request process is controlled
6706 * by the hardware, and the information about the buffer is
6707 * comprised by the RX descriptor.
6708 */
6709 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02006710err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006711 dev->stats.rx_errors++;
6712 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006713 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006714 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006715 continue;
6716 }
6717
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006718 if (bm_pool->frag_size > PAGE_SIZE)
6719 frag_size = 0;
6720 else
6721 frag_size = bm_pool->frag_size;
6722
6723 skb = build_skb(data, frag_size);
6724 if (!skb) {
6725 netdev_warn(port->dev, "skb build failed\n");
6726 goto err_drop_frame;
6727 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006728
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006729 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006730 if (err) {
6731 netdev_err(port->dev, "failed to refill BM pools\n");
6732 goto err_drop_frame;
6733 }
6734
Thomas Petazzoni20396132017-03-07 16:53:00 +01006735 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01006736 bm_pool->buf_size, DMA_FROM_DEVICE);
6737
Marcin Wojtas3f518502014-07-10 16:52:13 -03006738 rcvd_pkts++;
6739 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006740
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006741 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006742 skb_put(skb, rx_bytes);
6743 skb->protocol = eth_type_trans(skb, dev);
6744 mvpp2_rx_csum(port, rx_status, skb);
6745
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006746 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006747 }
6748
6749 if (rcvd_pkts) {
6750 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6751
6752 u64_stats_update_begin(&stats->syncp);
6753 stats->rx_packets += rcvd_pkts;
6754 stats->rx_bytes += rcvd_bytes;
6755 u64_stats_update_end(&stats->syncp);
6756 }
6757
6758 /* Update Rx queue management counters */
6759 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01006760 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006761
6762 return rx_todo;
6763}
6764
6765static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006766tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006767 struct mvpp2_tx_desc *desc)
6768{
Antoine Tenart20920262017-10-23 15:24:30 +02006769 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6770
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006771 dma_addr_t buf_dma_addr =
6772 mvpp2_txdesc_dma_addr_get(port, desc);
6773 size_t buf_sz =
6774 mvpp2_txdesc_size_get(port, desc);
Antoine Tenart20920262017-10-23 15:24:30 +02006775 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6776 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6777 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006778 mvpp2_txq_desc_put(txq);
6779}
6780
6781/* Handle tx fragmentation processing */
6782static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6783 struct mvpp2_tx_queue *aggr_txq,
6784 struct mvpp2_tx_queue *txq)
6785{
6786 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6787 struct mvpp2_tx_desc *tx_desc;
6788 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006789 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006790
6791 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6792 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6793 void *addr = page_address(frag->page.p) + frag->page_offset;
6794
6795 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006796 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6797 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006798
Thomas Petazzoni20396132017-03-07 16:53:00 +01006799 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006800 frag->size,
6801 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006802 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006803 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02006804 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006805 }
6806
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006807 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006808
6809 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6810 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006811 mvpp2_txdesc_cmd_set(port, tx_desc,
6812 MVPP2_TXD_L_DESC);
6813 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006814 } else {
6815 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006816 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6817 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006818 }
6819 }
6820
6821 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02006822cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006823 /* Release all descriptors that were used to map fragments of
6824 * this packet, as well as the corresponding DMA mappings
6825 */
6826 for (i = i - 1; i >= 0; i--) {
6827 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006828 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006829 }
6830
6831 return -ENOMEM;
6832}
6833
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006834static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6835 struct net_device *dev,
6836 struct mvpp2_tx_queue *txq,
6837 struct mvpp2_tx_queue *aggr_txq,
6838 struct mvpp2_txq_pcpu *txq_pcpu,
6839 int hdr_sz)
6840{
6841 struct mvpp2_port *port = netdev_priv(dev);
6842 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6843 dma_addr_t addr;
6844
6845 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6846 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6847
6848 addr = txq_pcpu->tso_headers_dma +
6849 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006850 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006851
6852 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6853 MVPP2_TXD_F_DESC |
6854 MVPP2_TXD_PADDING_DISABLE);
6855 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6856}
6857
6858static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6859 struct net_device *dev, struct tso_t *tso,
6860 struct mvpp2_tx_queue *txq,
6861 struct mvpp2_tx_queue *aggr_txq,
6862 struct mvpp2_txq_pcpu *txq_pcpu,
6863 int sz, bool left, bool last)
6864{
6865 struct mvpp2_port *port = netdev_priv(dev);
6866 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6867 dma_addr_t buf_dma_addr;
6868
6869 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6870 mvpp2_txdesc_size_set(port, tx_desc, sz);
6871
6872 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6873 DMA_TO_DEVICE);
6874 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6875 mvpp2_txq_desc_put(txq);
6876 return -ENOMEM;
6877 }
6878
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006879 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006880
6881 if (!left) {
6882 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6883 if (last) {
6884 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6885 return 0;
6886 }
6887 } else {
6888 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6889 }
6890
6891 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6892 return 0;
6893}
6894
6895static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6896 struct mvpp2_tx_queue *txq,
6897 struct mvpp2_tx_queue *aggr_txq,
6898 struct mvpp2_txq_pcpu *txq_pcpu)
6899{
6900 struct mvpp2_port *port = netdev_priv(dev);
6901 struct tso_t tso;
6902 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6903 int i, len, descs = 0;
6904
6905 /* Check number of available descriptors */
6906 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6907 tso_count_descs(skb)) ||
6908 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6909 tso_count_descs(skb)))
6910 return 0;
6911
6912 tso_start(skb, &tso);
6913 len = skb->len - hdr_sz;
6914 while (len > 0) {
6915 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6916 char *hdr = txq_pcpu->tso_headers +
6917 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6918
6919 len -= left;
6920 descs++;
6921
6922 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6923 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6924
6925 while (left > 0) {
6926 int sz = min_t(int, tso.size, left);
6927 left -= sz;
6928 descs++;
6929
6930 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6931 txq_pcpu, sz, left, len == 0))
6932 goto release;
6933 tso_build_data(skb, &tso, sz);
6934 }
6935 }
6936
6937 return descs;
6938
6939release:
6940 for (i = descs - 1; i >= 0; i--) {
6941 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6942 tx_desc_unmap_put(port, txq, tx_desc);
6943 }
6944 return 0;
6945}
6946
Marcin Wojtas3f518502014-07-10 16:52:13 -03006947/* Main tx processing */
6948static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6949{
6950 struct mvpp2_port *port = netdev_priv(dev);
6951 struct mvpp2_tx_queue *txq, *aggr_txq;
6952 struct mvpp2_txq_pcpu *txq_pcpu;
6953 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006954 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006955 int frags = 0;
6956 u16 txq_id;
6957 u32 tx_cmd;
6958
6959 txq_id = skb_get_queue_mapping(skb);
6960 txq = port->txqs[txq_id];
6961 txq_pcpu = this_cpu_ptr(txq->pcpu);
6962 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6963
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006964 if (skb_is_gso(skb)) {
6965 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6966 goto out;
6967 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006968 frags = skb_shinfo(skb)->nr_frags + 1;
6969
6970 /* Check number of available descriptors */
6971 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6972 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6973 txq_pcpu, frags)) {
6974 frags = 0;
6975 goto out;
6976 }
6977
6978 /* Get a descriptor for the first part of the packet */
6979 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006980 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6981 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006982
Thomas Petazzoni20396132017-03-07 16:53:00 +01006983 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006984 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006985 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006986 mvpp2_txq_desc_put(txq);
6987 frags = 0;
6988 goto out;
6989 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006990
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006991 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006992
6993 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6994
6995 if (frags == 1) {
6996 /* First and Last descriptor */
6997 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006998 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6999 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007000 } else {
7001 /* First but not Last */
7002 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01007003 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
7004 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007005
7006 /* Continue with other skb fragments */
7007 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01007008 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007009 frags = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007010 }
7011 }
7012
Marcin Wojtas3f518502014-07-10 16:52:13 -03007013out:
7014 if (frags > 0) {
7015 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02007016 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
7017
7018 txq_pcpu->reserved_num -= frags;
7019 txq_pcpu->count += frags;
7020 aggr_txq->count += frags;
7021
7022 /* Enable transmit */
7023 wmb();
7024 mvpp2_aggr_txq_pend_desc_add(port, frags);
7025
Antoine Tenart1d17db02017-10-30 11:23:31 +01007026 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
Antoine Ténart186cd4d2017-08-23 09:46:56 +02007027 netif_tx_stop_queue(nq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007028
7029 u64_stats_update_begin(&stats->syncp);
7030 stats->tx_packets++;
7031 stats->tx_bytes += skb->len;
7032 u64_stats_update_end(&stats->syncp);
7033 } else {
7034 dev->stats.tx_dropped++;
7035 dev_kfree_skb_any(skb);
7036 }
7037
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007038 /* Finalize TX processing */
Antoine Tenart082297e2017-10-23 15:24:31 +02007039 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007040 mvpp2_txq_done(port, txq, txq_pcpu);
7041
7042 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007043 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
7044 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007045 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
7046
7047 mvpp2_timer_set(port_pcpu);
7048 }
7049
Marcin Wojtas3f518502014-07-10 16:52:13 -03007050 return NETDEV_TX_OK;
7051}
7052
7053static inline void mvpp2_cause_error(struct net_device *dev, int cause)
7054{
7055 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
7056 netdev_err(dev, "FCS error\n");
7057 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
7058 netdev_err(dev, "rx fifo overrun error\n");
7059 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
7060 netdev_err(dev, "tx fifo underrun error\n");
7061}
7062
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007063static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007064{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007065 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007066 int rx_done = 0;
7067 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007068 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007069 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03007070
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007071 qv = container_of(napi, struct mvpp2_queue_vector, napi);
7072
Marcin Wojtas3f518502014-07-10 16:52:13 -03007073 /* Rx/Tx cause register
7074 *
7075 * Bits 0-15: each bit indicates received packets on the Rx queue
7076 * (bit 0 is for Rx queue 0).
7077 *
7078 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7079 * (bit 16 is for Tx queue 0).
7080 *
7081 * Each CPU has its own Rx/Tx cause register
7082 */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007083 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
Thomas Petazzonia7868412017-03-07 16:53:13 +01007084 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03007085
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007086 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007087 if (cause_misc) {
7088 mvpp2_cause_error(port->dev, cause_misc);
7089
7090 /* Clear the cause register */
7091 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007092 mvpp2_percpu_write(port->priv, cpu,
7093 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7094 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007095 }
7096
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007097 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7098 if (cause_tx) {
7099 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7100 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7101 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007102
7103 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007104 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7105 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007106 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007107 while (cause_rx && budget > 0) {
7108 int count;
7109 struct mvpp2_rx_queue *rxq;
7110
7111 rxq = mvpp2_get_rx_queue(port, cause_rx);
7112 if (!rxq)
7113 break;
7114
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007115 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007116 rx_done += count;
7117 budget -= count;
7118 if (budget > 0) {
7119 /* Clear the bit associated to this Rx queue
7120 * so that next iteration will continue from
7121 * the next Rx queue.
7122 */
7123 cause_rx &= ~(1 << rxq->logic_rxq);
7124 }
7125 }
7126
7127 if (budget > 0) {
7128 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08007129 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007130
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007131 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007132 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007133 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007134 return rx_done;
7135}
7136
7137/* Set hw internals when starting port */
7138static void mvpp2_start_dev(struct mvpp2_port *port)
7139{
Philippe Reynes8e072692016-06-28 00:08:11 +02007140 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007141 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007142
Stefan Chulski76eb1b12017-08-22 19:08:26 +02007143 if (port->gop_id == 0 &&
7144 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7145 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7146 mvpp2_xlg_max_rx_size_set(port);
7147 else
7148 mvpp2_gmac_max_rx_size_set(port);
7149
Marcin Wojtas3f518502014-07-10 16:52:13 -03007150 mvpp2_txp_max_tx_size_set(port);
7151
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007152 for (i = 0; i < port->nqvecs; i++)
7153 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007154
7155 /* Enable interrupts on all CPUs */
7156 mvpp2_interrupts_enable(port);
7157
Antoine Tenart542897d2017-08-30 10:29:15 +02007158 if (port->priv->hw_version == MVPP22) {
7159 mvpp22_comphy_init(port);
Antoine Ténartf84bf382017-08-22 19:08:27 +02007160 mvpp22_gop_init(port);
Antoine Tenart542897d2017-08-30 10:29:15 +02007161 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02007162
Antoine Ténart2055d622017-08-22 19:08:23 +02007163 mvpp2_port_mii_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007164 mvpp2_port_enable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007165 if (ndev->phydev)
7166 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007167 netif_tx_start_all_queues(port->dev);
7168}
7169
7170/* Set hw internals when stopping port */
7171static void mvpp2_stop_dev(struct mvpp2_port *port)
7172{
Philippe Reynes8e072692016-06-28 00:08:11 +02007173 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007174 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007175
Marcin Wojtas3f518502014-07-10 16:52:13 -03007176 /* Stop new packets from arriving to RXQs */
7177 mvpp2_ingress_disable(port);
7178
7179 mdelay(10);
7180
7181 /* Disable interrupts on all CPUs */
7182 mvpp2_interrupts_disable(port);
7183
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007184 for (i = 0; i < port->nqvecs; i++)
7185 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007186
7187 netif_carrier_off(port->dev);
7188 netif_tx_stop_all_queues(port->dev);
7189
7190 mvpp2_egress_disable(port);
7191 mvpp2_port_disable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007192 if (ndev->phydev)
7193 phy_stop(ndev->phydev);
Antoine Tenart542897d2017-08-30 10:29:15 +02007194 phy_power_off(port->comphy);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007195}
7196
Marcin Wojtas3f518502014-07-10 16:52:13 -03007197static int mvpp2_check_ringparam_valid(struct net_device *dev,
7198 struct ethtool_ringparam *ring)
7199{
7200 u16 new_rx_pending = ring->rx_pending;
7201 u16 new_tx_pending = ring->tx_pending;
7202
7203 if (ring->rx_pending == 0 || ring->tx_pending == 0)
7204 return -EINVAL;
7205
Yan Markman7cf87e42017-12-11 09:13:26 +01007206 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7207 new_rx_pending = MVPP2_MAX_RXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007208 else if (!IS_ALIGNED(ring->rx_pending, 16))
7209 new_rx_pending = ALIGN(ring->rx_pending, 16);
7210
Yan Markman7cf87e42017-12-11 09:13:26 +01007211 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7212 new_tx_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007213 else if (!IS_ALIGNED(ring->tx_pending, 32))
7214 new_tx_pending = ALIGN(ring->tx_pending, 32);
7215
Antoine Tenart76e583c2017-11-28 14:19:51 +01007216 /* The Tx ring size cannot be smaller than the minimum number of
7217 * descriptors needed for TSO.
7218 */
7219 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7220 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7221
Marcin Wojtas3f518502014-07-10 16:52:13 -03007222 if (ring->rx_pending != new_rx_pending) {
7223 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7224 ring->rx_pending, new_rx_pending);
7225 ring->rx_pending = new_rx_pending;
7226 }
7227
7228 if (ring->tx_pending != new_tx_pending) {
7229 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7230 ring->tx_pending, new_tx_pending);
7231 ring->tx_pending = new_tx_pending;
7232 }
7233
7234 return 0;
7235}
7236
Thomas Petazzoni26975822017-03-07 16:53:14 +01007237static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007238{
7239 u32 mac_addr_l, mac_addr_m, mac_addr_h;
7240
7241 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7242 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7243 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7244 addr[0] = (mac_addr_h >> 24) & 0xFF;
7245 addr[1] = (mac_addr_h >> 16) & 0xFF;
7246 addr[2] = (mac_addr_h >> 8) & 0xFF;
7247 addr[3] = mac_addr_h & 0xFF;
7248 addr[4] = mac_addr_m & 0xFF;
7249 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7250}
7251
7252static int mvpp2_phy_connect(struct mvpp2_port *port)
7253{
7254 struct phy_device *phy_dev;
7255
Antoine Tenart5997c862017-09-01 11:04:53 +02007256 /* No PHY is attached */
7257 if (!port->phy_node)
7258 return 0;
7259
Marcin Wojtas3f518502014-07-10 16:52:13 -03007260 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7261 port->phy_interface);
7262 if (!phy_dev) {
7263 netdev_err(port->dev, "cannot connect to phy\n");
7264 return -ENODEV;
7265 }
7266 phy_dev->supported &= PHY_GBIT_FEATURES;
7267 phy_dev->advertising = phy_dev->supported;
7268
Marcin Wojtas3f518502014-07-10 16:52:13 -03007269 port->link = 0;
7270 port->duplex = 0;
7271 port->speed = 0;
7272
7273 return 0;
7274}
7275
7276static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7277{
Philippe Reynes8e072692016-06-28 00:08:11 +02007278 struct net_device *ndev = port->dev;
7279
Antoine Tenart5997c862017-09-01 11:04:53 +02007280 if (!ndev->phydev)
7281 return;
7282
Philippe Reynes8e072692016-06-28 00:08:11 +02007283 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007284}
7285
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007286static int mvpp2_irqs_init(struct mvpp2_port *port)
7287{
7288 int err, i;
7289
7290 for (i = 0; i < port->nqvecs; i++) {
7291 struct mvpp2_queue_vector *qv = port->qvecs + i;
7292
Marc Zyngier13c249a2017-11-04 12:33:47 +00007293 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7294 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7295
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007296 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7297 if (err)
7298 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007299
7300 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7301 irq_set_affinity_hint(qv->irq,
7302 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007303 }
7304
7305 return 0;
7306err:
7307 for (i = 0; i < port->nqvecs; i++) {
7308 struct mvpp2_queue_vector *qv = port->qvecs + i;
7309
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007310 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007311 free_irq(qv->irq, qv);
7312 }
7313
7314 return err;
7315}
7316
7317static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7318{
7319 int i;
7320
7321 for (i = 0; i < port->nqvecs; i++) {
7322 struct mvpp2_queue_vector *qv = port->qvecs + i;
7323
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007324 irq_set_affinity_hint(qv->irq, NULL);
Marc Zyngier13c249a2017-11-04 12:33:47 +00007325 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007326 free_irq(qv->irq, qv);
7327 }
7328}
7329
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007330static void mvpp22_init_rss(struct mvpp2_port *port)
7331{
7332 struct mvpp2 *priv = port->priv;
7333 int i;
7334
7335 /* Set the table width: replace the whole classifier Rx queue number
7336 * with the ones configured in RSS table entries.
7337 */
7338 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7339 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7340
7341 /* Loop through the classifier Rx Queues and map them to a RSS table.
7342 * Map them all to the first table (0) by default.
7343 */
7344 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7345 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7346 mvpp2_write(priv, MVPP22_RSS_TABLE,
7347 MVPP22_RSS_TABLE_POINTER(0));
7348 }
7349
7350 /* Configure the first table to evenly distribute the packets across
7351 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7352 */
7353 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7354 u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7355 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7356 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7357
7358 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7359 }
7360
7361}
7362
Marcin Wojtas3f518502014-07-10 16:52:13 -03007363static int mvpp2_open(struct net_device *dev)
7364{
7365 struct mvpp2_port *port = netdev_priv(dev);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007366 struct mvpp2 *priv = port->priv;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007367 unsigned char mac_bcast[ETH_ALEN] = {
7368 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7369 int err;
7370
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01007371 err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007372 if (err) {
7373 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7374 return err;
7375 }
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01007376 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007377 if (err) {
Maxime Chevallierce2a27c2018-03-07 15:18:03 +01007378 netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007379 return err;
7380 }
7381 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7382 if (err) {
7383 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7384 return err;
7385 }
7386 err = mvpp2_prs_def_flow(port);
7387 if (err) {
7388 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7389 return err;
7390 }
7391
7392 /* Allocate the Rx/Tx queues */
7393 err = mvpp2_setup_rxqs(port);
7394 if (err) {
7395 netdev_err(port->dev, "cannot allocate Rx queues\n");
7396 return err;
7397 }
7398
7399 err = mvpp2_setup_txqs(port);
7400 if (err) {
7401 netdev_err(port->dev, "cannot allocate Tx queues\n");
7402 goto err_cleanup_rxqs;
7403 }
7404
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007405 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007406 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007407 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007408 goto err_cleanup_txqs;
7409 }
7410
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007411 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7412 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7413 dev->name, port);
7414 if (err) {
7415 netdev_err(port->dev, "cannot request link IRQ %d\n",
7416 port->link_irq);
7417 goto err_free_irq;
7418 }
7419
7420 mvpp22_gop_setup_irq(port);
7421 }
7422
Marcin Wojtas3f518502014-07-10 16:52:13 -03007423 /* In default link is down */
7424 netif_carrier_off(port->dev);
7425
7426 err = mvpp2_phy_connect(port);
7427 if (err < 0)
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007428 goto err_free_link_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007429
7430 /* Unmask interrupts on all CPUs */
7431 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007432 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007433
7434 mvpp2_start_dev(port);
7435
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007436 if (priv->hw_version == MVPP22)
7437 mvpp22_init_rss(port);
7438
Miquel Raynal118d6292017-11-06 22:56:53 +01007439 /* Start hardware statistics gathering */
Miquel Raynale5c500e2017-11-08 08:59:40 +01007440 queue_delayed_work(priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01007441 MVPP2_MIB_COUNTERS_STATS_DELAY);
7442
Marcin Wojtas3f518502014-07-10 16:52:13 -03007443 return 0;
7444
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007445err_free_link_irq:
7446 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7447 free_irq(port->link_irq, port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007448err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007449 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007450err_cleanup_txqs:
7451 mvpp2_cleanup_txqs(port);
7452err_cleanup_rxqs:
7453 mvpp2_cleanup_rxqs(port);
7454 return err;
7455}
7456
7457static int mvpp2_stop(struct net_device *dev)
7458{
7459 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007460 struct mvpp2_port_pcpu *port_pcpu;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007461 struct mvpp2 *priv = port->priv;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007462 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007463
7464 mvpp2_stop_dev(port);
7465 mvpp2_phy_disconnect(port);
7466
7467 /* Mask interrupts on all CPUs */
7468 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007469 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007470
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007471 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7472 free_irq(port->link_irq, port);
7473
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007474 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007475 if (!port->has_tx_irqs) {
7476 for_each_present_cpu(cpu) {
7477 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007478
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007479 hrtimer_cancel(&port_pcpu->tx_done_timer);
7480 port_pcpu->timer_scheduled = false;
7481 tasklet_kill(&port_pcpu->tx_done_tasklet);
7482 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007483 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007484 mvpp2_cleanup_rxqs(port);
7485 mvpp2_cleanup_txqs(port);
7486
Miquel Raynale5c500e2017-11-08 08:59:40 +01007487 cancel_delayed_work_sync(&port->stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01007488
Marcin Wojtas3f518502014-07-10 16:52:13 -03007489 return 0;
7490}
7491
Maxime Chevallier10fea262018-03-07 15:18:04 +01007492static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
7493 struct netdev_hw_addr_list *list)
7494{
7495 struct netdev_hw_addr *ha;
7496 int ret;
7497
7498 netdev_hw_addr_list_for_each(ha, list) {
7499 ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
7500 if (ret)
7501 return ret;
7502 }
7503
7504 return 0;
7505}
7506
7507static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
7508{
7509 if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
7510 mvpp2_prs_vid_enable_filtering(port);
7511 else
7512 mvpp2_prs_vid_disable_filtering(port);
7513
7514 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7515 MVPP2_PRS_L2_UNI_CAST, enable);
7516
7517 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7518 MVPP2_PRS_L2_MULTI_CAST, enable);
7519}
7520
Marcin Wojtas3f518502014-07-10 16:52:13 -03007521static void mvpp2_set_rx_mode(struct net_device *dev)
7522{
7523 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007524
Maxime Chevallier10fea262018-03-07 15:18:04 +01007525 /* Clear the whole UC and MC list */
7526 mvpp2_prs_mac_del_all(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007527
Maxime Chevallier10fea262018-03-07 15:18:04 +01007528 if (dev->flags & IFF_PROMISC) {
7529 mvpp2_set_rx_promisc(port, true);
7530 return;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007531 }
Maxime Chevallier56beda32018-02-28 10:14:13 +01007532
Maxime Chevallier10fea262018-03-07 15:18:04 +01007533 mvpp2_set_rx_promisc(port, false);
7534
7535 if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
7536 mvpp2_prs_mac_da_accept_list(port, &dev->uc))
7537 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7538 MVPP2_PRS_L2_UNI_CAST, true);
7539
7540 if (dev->flags & IFF_ALLMULTI) {
7541 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7542 MVPP2_PRS_L2_MULTI_CAST, true);
7543 return;
7544 }
7545
7546 if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
7547 mvpp2_prs_mac_da_accept_list(port, &dev->mc))
7548 mvpp2_prs_mac_promisc_set(port->priv, port->id,
7549 MVPP2_PRS_L2_MULTI_CAST, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007550}
7551
7552static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7553{
7554 struct mvpp2_port *port = netdev_priv(dev);
7555 const struct sockaddr *addr = p;
7556 int err;
7557
7558 if (!is_valid_ether_addr(addr->sa_data)) {
7559 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02007560 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007561 }
7562
7563 if (!netif_running(dev)) {
7564 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7565 if (!err)
7566 return 0;
7567 /* Reconfigure parser to accept the original MAC address */
7568 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7569 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007570 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007571 }
7572
7573 mvpp2_stop_dev(port);
7574
7575 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7576 if (!err)
7577 goto out_start;
7578
7579 /* Reconfigure parser accept the original MAC address */
7580 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7581 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007582 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007583out_start:
7584 mvpp2_start_dev(port);
7585 mvpp2_egress_enable(port);
7586 mvpp2_ingress_enable(port);
7587 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007588log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007589 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007590 return err;
7591}
7592
7593static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7594{
7595 struct mvpp2_port *port = netdev_priv(dev);
7596 int err;
7597
Jarod Wilson57779872016-10-17 15:54:06 -04007598 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7599 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7600 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7601 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007602 }
7603
7604 if (!netif_running(dev)) {
7605 err = mvpp2_bm_update_mtu(dev, mtu);
7606 if (!err) {
7607 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7608 return 0;
7609 }
7610
7611 /* Reconfigure BM to the original MTU */
7612 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7613 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007614 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007615 }
7616
7617 mvpp2_stop_dev(port);
7618
7619 err = mvpp2_bm_update_mtu(dev, mtu);
7620 if (!err) {
7621 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7622 goto out_start;
7623 }
7624
7625 /* Reconfigure BM to the original MTU */
7626 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7627 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007628 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007629
7630out_start:
7631 mvpp2_start_dev(port);
7632 mvpp2_egress_enable(port);
7633 mvpp2_ingress_enable(port);
7634
7635 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007636log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007637 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007638 return err;
7639}
7640
stephen hemmingerbc1f4472017-01-06 19:12:52 -08007641static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03007642mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7643{
7644 struct mvpp2_port *port = netdev_priv(dev);
7645 unsigned int start;
7646 int cpu;
7647
7648 for_each_possible_cpu(cpu) {
7649 struct mvpp2_pcpu_stats *cpu_stats;
7650 u64 rx_packets;
7651 u64 rx_bytes;
7652 u64 tx_packets;
7653 u64 tx_bytes;
7654
7655 cpu_stats = per_cpu_ptr(port->stats, cpu);
7656 do {
7657 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7658 rx_packets = cpu_stats->rx_packets;
7659 rx_bytes = cpu_stats->rx_bytes;
7660 tx_packets = cpu_stats->tx_packets;
7661 tx_bytes = cpu_stats->tx_bytes;
7662 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7663
7664 stats->rx_packets += rx_packets;
7665 stats->rx_bytes += rx_bytes;
7666 stats->tx_packets += tx_packets;
7667 stats->tx_bytes += tx_bytes;
7668 }
7669
7670 stats->rx_errors = dev->stats.rx_errors;
7671 stats->rx_dropped = dev->stats.rx_dropped;
7672 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007673}
7674
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007675static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7676{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007677 int ret;
7678
Philippe Reynes8e072692016-06-28 00:08:11 +02007679 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007680 return -ENOTSUPP;
7681
Philippe Reynes8e072692016-06-28 00:08:11 +02007682 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007683 if (!ret)
7684 mvpp2_link_event(dev);
7685
7686 return ret;
7687}
7688
Maxime Chevallier56beda32018-02-28 10:14:13 +01007689static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7690{
7691 struct mvpp2_port *port = netdev_priv(dev);
7692 int ret;
7693
7694 ret = mvpp2_prs_vid_entry_add(port, vid);
7695 if (ret)
7696 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7697 MVPP2_PRS_VLAN_FILT_MAX - 1);
7698 return ret;
7699}
7700
7701static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7702{
7703 struct mvpp2_port *port = netdev_priv(dev);
7704
7705 mvpp2_prs_vid_entry_remove(port, vid);
7706 return 0;
7707}
7708
7709static int mvpp2_set_features(struct net_device *dev,
7710 netdev_features_t features)
7711{
7712 netdev_features_t changed = dev->features ^ features;
7713 struct mvpp2_port *port = netdev_priv(dev);
7714
7715 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7716 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7717 mvpp2_prs_vid_enable_filtering(port);
7718 } else {
7719 /* Invalidate all registered VID filters for this
7720 * port
7721 */
7722 mvpp2_prs_vid_remove_all(port);
7723
7724 mvpp2_prs_vid_disable_filtering(port);
7725 }
7726 }
7727
7728 return 0;
7729}
7730
Marcin Wojtas3f518502014-07-10 16:52:13 -03007731/* Ethtool methods */
7732
Marcin Wojtas3f518502014-07-10 16:52:13 -03007733/* Set interrupt coalescing for ethtools */
7734static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7735 struct ethtool_coalesce *c)
7736{
7737 struct mvpp2_port *port = netdev_priv(dev);
7738 int queue;
7739
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007740 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007741 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7742
7743 rxq->time_coal = c->rx_coalesce_usecs;
7744 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01007745 mvpp2_rx_pkts_coal_set(port, rxq);
7746 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007747 }
7748
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007749 if (port->has_tx_irqs) {
7750 port->tx_time_coal = c->tx_coalesce_usecs;
7751 mvpp2_tx_time_coal_set(port);
7752 }
7753
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007754 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007755 struct mvpp2_tx_queue *txq = port->txqs[queue];
7756
7757 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007758
7759 if (port->has_tx_irqs)
7760 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007761 }
7762
Marcin Wojtas3f518502014-07-10 16:52:13 -03007763 return 0;
7764}
7765
7766/* get coalescing for ethtools */
7767static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7768 struct ethtool_coalesce *c)
7769{
7770 struct mvpp2_port *port = netdev_priv(dev);
7771
Antoine Tenart385c2842017-12-11 09:13:27 +01007772 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7773 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7774 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
Antoine Tenart24b28cc2017-12-11 09:13:28 +01007775 c->tx_coalesce_usecs = port->tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007776 return 0;
7777}
7778
7779static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7780 struct ethtool_drvinfo *drvinfo)
7781{
7782 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7783 sizeof(drvinfo->driver));
7784 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7785 sizeof(drvinfo->version));
7786 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7787 sizeof(drvinfo->bus_info));
7788}
7789
7790static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7791 struct ethtool_ringparam *ring)
7792{
7793 struct mvpp2_port *port = netdev_priv(dev);
7794
Yan Markman7cf87e42017-12-11 09:13:26 +01007795 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7796 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007797 ring->rx_pending = port->rx_ring_size;
7798 ring->tx_pending = port->tx_ring_size;
7799}
7800
7801static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7802 struct ethtool_ringparam *ring)
7803{
7804 struct mvpp2_port *port = netdev_priv(dev);
7805 u16 prev_rx_ring_size = port->rx_ring_size;
7806 u16 prev_tx_ring_size = port->tx_ring_size;
7807 int err;
7808
7809 err = mvpp2_check_ringparam_valid(dev, ring);
7810 if (err)
7811 return err;
7812
7813 if (!netif_running(dev)) {
7814 port->rx_ring_size = ring->rx_pending;
7815 port->tx_ring_size = ring->tx_pending;
7816 return 0;
7817 }
7818
7819 /* The interface is running, so we have to force a
7820 * reallocation of the queues
7821 */
7822 mvpp2_stop_dev(port);
7823 mvpp2_cleanup_rxqs(port);
7824 mvpp2_cleanup_txqs(port);
7825
7826 port->rx_ring_size = ring->rx_pending;
7827 port->tx_ring_size = ring->tx_pending;
7828
7829 err = mvpp2_setup_rxqs(port);
7830 if (err) {
7831 /* Reallocate Rx queues with the original ring size */
7832 port->rx_ring_size = prev_rx_ring_size;
7833 ring->rx_pending = prev_rx_ring_size;
7834 err = mvpp2_setup_rxqs(port);
7835 if (err)
7836 goto err_out;
7837 }
7838 err = mvpp2_setup_txqs(port);
7839 if (err) {
7840 /* Reallocate Tx queues with the original ring size */
7841 port->tx_ring_size = prev_tx_ring_size;
7842 ring->tx_pending = prev_tx_ring_size;
7843 err = mvpp2_setup_txqs(port);
7844 if (err)
7845 goto err_clean_rxqs;
7846 }
7847
7848 mvpp2_start_dev(port);
7849 mvpp2_egress_enable(port);
7850 mvpp2_ingress_enable(port);
7851
7852 return 0;
7853
7854err_clean_rxqs:
7855 mvpp2_cleanup_rxqs(port);
7856err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02007857 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007858 return err;
7859}
7860
7861/* Device ops */
7862
7863static const struct net_device_ops mvpp2_netdev_ops = {
7864 .ndo_open = mvpp2_open,
7865 .ndo_stop = mvpp2_stop,
7866 .ndo_start_xmit = mvpp2_tx,
7867 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7868 .ndo_set_mac_address = mvpp2_set_mac_address,
7869 .ndo_change_mtu = mvpp2_change_mtu,
7870 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007871 .ndo_do_ioctl = mvpp2_ioctl,
Maxime Chevallier56beda32018-02-28 10:14:13 +01007872 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
7873 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
7874 .ndo_set_features = mvpp2_set_features,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007875};
7876
7877static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08007878 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007879 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007880 .set_coalesce = mvpp2_ethtool_set_coalesce,
7881 .get_coalesce = mvpp2_ethtool_get_coalesce,
7882 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7883 .get_ringparam = mvpp2_ethtool_get_ringparam,
7884 .set_ringparam = mvpp2_ethtool_set_ringparam,
Miquel Raynal118d6292017-11-06 22:56:53 +01007885 .get_strings = mvpp2_ethtool_get_strings,
7886 .get_ethtool_stats = mvpp2_ethtool_get_stats,
7887 .get_sset_count = mvpp2_ethtool_get_sset_count,
Philippe Reynesfb773e92016-06-28 00:08:12 +02007888 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7889 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007890};
7891
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007892/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7893 * had a single IRQ defined per-port.
7894 */
7895static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7896 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007897{
7898 struct mvpp2_queue_vector *v = &port->qvecs[0];
7899
7900 v->first_rxq = 0;
7901 v->nrxqs = port->nrxqs;
7902 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7903 v->sw_thread_id = 0;
7904 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7905 v->port = port;
7906 v->irq = irq_of_parse_and_map(port_node, 0);
7907 if (v->irq <= 0)
7908 return -EINVAL;
7909 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7910 NAPI_POLL_WEIGHT);
7911
7912 port->nqvecs = 1;
7913
7914 return 0;
7915}
7916
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007917static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7918 struct device_node *port_node)
7919{
7920 struct mvpp2_queue_vector *v;
7921 int i, ret;
7922
7923 port->nqvecs = num_possible_cpus();
7924 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7925 port->nqvecs += 1;
7926
7927 for (i = 0; i < port->nqvecs; i++) {
7928 char irqname[16];
7929
7930 v = port->qvecs + i;
7931
7932 v->port = port;
7933 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7934 v->sw_thread_id = i;
7935 v->sw_thread_mask = BIT(i);
7936
7937 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7938
7939 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7940 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7941 v->nrxqs = MVPP2_DEFAULT_RXQ;
7942 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7943 i == (port->nqvecs - 1)) {
7944 v->first_rxq = 0;
7945 v->nrxqs = port->nrxqs;
7946 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7947 strncpy(irqname, "rx-shared", sizeof(irqname));
7948 }
7949
Marcin Wojtasa75edc72018-01-18 13:31:44 +01007950 if (port_node)
7951 v->irq = of_irq_get_byname(port_node, irqname);
7952 else
7953 v->irq = fwnode_irq_get(port->fwnode, i);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007954 if (v->irq <= 0) {
7955 ret = -EINVAL;
7956 goto err;
7957 }
7958
7959 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7960 NAPI_POLL_WEIGHT);
7961 }
7962
7963 return 0;
7964
7965err:
7966 for (i = 0; i < port->nqvecs; i++)
7967 irq_dispose_mapping(port->qvecs[i].irq);
7968 return ret;
7969}
7970
7971static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7972 struct device_node *port_node)
7973{
7974 if (port->has_tx_irqs)
7975 return mvpp2_multi_queue_vectors_init(port, port_node);
7976 else
7977 return mvpp2_simple_queue_vectors_init(port, port_node);
7978}
7979
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007980static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7981{
7982 int i;
7983
7984 for (i = 0; i < port->nqvecs; i++)
7985 irq_dispose_mapping(port->qvecs[i].irq);
7986}
7987
7988/* Configure Rx queue group interrupt for this port */
7989static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7990{
7991 struct mvpp2 *priv = port->priv;
7992 u32 val;
7993 int i;
7994
7995 if (priv->hw_version == MVPP21) {
7996 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7997 port->nrxqs);
7998 return;
7999 }
8000
8001 /* Handle the more complicated PPv2.2 case */
8002 for (i = 0; i < port->nqvecs; i++) {
8003 struct mvpp2_queue_vector *qv = port->qvecs + i;
8004
8005 if (!qv->nrxqs)
8006 continue;
8007
8008 val = qv->sw_thread_id;
8009 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
8010 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
8011
8012 val = qv->first_rxq;
8013 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
8014 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
8015 }
8016}
8017
Marcin Wojtas3f518502014-07-10 16:52:13 -03008018/* Initialize port HW */
8019static int mvpp2_port_init(struct mvpp2_port *port)
8020{
8021 struct device *dev = port->dev->dev.parent;
8022 struct mvpp2 *priv = port->priv;
8023 struct mvpp2_txq_pcpu *txq_pcpu;
8024 int queue, cpu, err;
8025
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008026 /* Checks for hardware constraints */
8027 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008028 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008029 return -EINVAL;
8030
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008031 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
8032 (port->ntxqs > MVPP2_MAX_TXQ))
8033 return -EINVAL;
8034
Marcin Wojtas3f518502014-07-10 16:52:13 -03008035 /* Disable port */
8036 mvpp2_egress_disable(port);
8037 mvpp2_port_disable(port);
8038
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008039 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
8040
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008041 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008042 GFP_KERNEL);
8043 if (!port->txqs)
8044 return -ENOMEM;
8045
8046 /* Associate physical Tx queues to this port and initialize.
8047 * The mapping is predefined.
8048 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008049 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008050 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
8051 struct mvpp2_tx_queue *txq;
8052
8053 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01008054 if (!txq) {
8055 err = -ENOMEM;
8056 goto err_free_percpu;
8057 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008058
8059 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
8060 if (!txq->pcpu) {
8061 err = -ENOMEM;
8062 goto err_free_percpu;
8063 }
8064
8065 txq->id = queue_phy_id;
8066 txq->log_id = queue;
8067 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
8068 for_each_present_cpu(cpu) {
8069 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
8070 txq_pcpu->cpu = cpu;
8071 }
8072
8073 port->txqs[queue] = txq;
8074 }
8075
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008076 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008077 GFP_KERNEL);
8078 if (!port->rxqs) {
8079 err = -ENOMEM;
8080 goto err_free_percpu;
8081 }
8082
8083 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008084 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008085 struct mvpp2_rx_queue *rxq;
8086
8087 /* Map physical Rx queue to port's logical Rx queue */
8088 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08008089 if (!rxq) {
8090 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008091 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08008092 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008093 /* Map this Rx queue to a physical queue */
8094 rxq->id = port->first_rxq + queue;
8095 rxq->port = port->id;
8096 rxq->logic_rxq = queue;
8097
8098 port->rxqs[queue] = rxq;
8099 }
8100
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008101 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008102
8103 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008104 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008105 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8106
8107 rxq->size = port->rx_ring_size;
8108 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8109 rxq->time_coal = MVPP2_RX_COAL_USEC;
8110 }
8111
8112 mvpp2_ingress_disable(port);
8113
8114 /* Port default configuration */
8115 mvpp2_defaults_set(port);
8116
8117 /* Port's classifier configuration */
8118 mvpp2_cls_oversize_rxq_set(port);
8119 mvpp2_cls_port_config(port);
8120
8121 /* Provide an initial Rx packet size */
8122 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8123
8124 /* Initialize pools for swf */
8125 err = mvpp2_swf_bm_pool_init(port);
8126 if (err)
8127 goto err_free_percpu;
8128
8129 return 0;
8130
8131err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008132 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008133 if (!port->txqs[queue])
8134 continue;
8135 free_percpu(port->txqs[queue]->pcpu);
8136 }
8137 return err;
8138}
8139
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008140/* Checks if the port DT description has the TX interrupts
8141 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8142 * there are available, but we need to keep support for old DTs.
8143 */
8144static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8145 struct device_node *port_node)
8146{
8147 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8148 "tx-cpu2", "tx-cpu3" };
8149 int ret, i;
8150
8151 if (priv->hw_version == MVPP21)
8152 return false;
8153
8154 for (i = 0; i < 5; i++) {
8155 ret = of_property_match_string(port_node, "interrupt-names",
8156 irqs[i]);
8157 if (ret < 0)
8158 return false;
8159 }
8160
8161 return true;
8162}
8163
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008164static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
Marcin Wojtas24812222018-01-18 13:31:43 +01008165 struct fwnode_handle *fwnode,
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008166 char **mac_from)
8167{
8168 struct mvpp2_port *port = netdev_priv(dev);
8169 char hw_mac_addr[ETH_ALEN] = {0};
Marcin Wojtas24812222018-01-18 13:31:43 +01008170 char fw_mac_addr[ETH_ALEN];
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008171
Marcin Wojtas24812222018-01-18 13:31:43 +01008172 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8173 *mac_from = "firmware node";
8174 ether_addr_copy(dev->dev_addr, fw_mac_addr);
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008175 return;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008176 }
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008177
8178 if (priv->hw_version == MVPP21) {
8179 mvpp21_get_mac_address(port, hw_mac_addr);
8180 if (is_valid_ether_addr(hw_mac_addr)) {
8181 *mac_from = "hardware";
8182 ether_addr_copy(dev->dev_addr, hw_mac_addr);
8183 return;
8184 }
8185 }
8186
8187 *mac_from = "random";
8188 eth_hw_addr_random(dev);
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008189}
8190
Marcin Wojtas3f518502014-07-10 16:52:13 -03008191/* Ports initialization */
8192static int mvpp2_port_probe(struct platform_device *pdev,
Marcin Wojtas24812222018-01-18 13:31:43 +01008193 struct fwnode_handle *port_fwnode,
Marcin Wojtasbf147152018-01-18 13:31:42 +01008194 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008195{
8196 struct device_node *phy_node;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008197 struct phy *comphy = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008198 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008199 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas24812222018-01-18 13:31:43 +01008200 struct device_node *port_node = to_of_node(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008201 struct net_device *dev;
8202 struct resource *res;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008203 char *mac_from = "";
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008204 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008205 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008206 u32 id;
8207 int features;
8208 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008209 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008210
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008211 if (port_node) {
8212 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8213 } else {
8214 has_tx_irqs = true;
8215 queue_mode = MVPP2_QDIST_MULTI_MODE;
8216 }
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008217
8218 if (!has_tx_irqs)
8219 queue_mode = MVPP2_QDIST_SINGLE_MODE;
8220
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008221 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008222 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8223 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8224 else
8225 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008226
8227 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008228 if (!dev)
8229 return -ENOMEM;
8230
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008231 if (port_node)
8232 phy_node = of_parse_phandle(port_node, "phy", 0);
8233 else
8234 phy_node = NULL;
8235
Marcin Wojtas24812222018-01-18 13:31:43 +01008236 phy_mode = fwnode_get_phy_mode(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008237 if (phy_mode < 0) {
8238 dev_err(&pdev->dev, "incorrect phy mode\n");
8239 err = phy_mode;
8240 goto err_free_netdev;
8241 }
8242
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008243 if (port_node) {
8244 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8245 if (IS_ERR(comphy)) {
8246 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8247 err = -EPROBE_DEFER;
8248 goto err_free_netdev;
8249 }
8250 comphy = NULL;
Antoine Tenart542897d2017-08-30 10:29:15 +02008251 }
Antoine Tenart542897d2017-08-30 10:29:15 +02008252 }
8253
Marcin Wojtas24812222018-01-18 13:31:43 +01008254 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008255 err = -EINVAL;
8256 dev_err(&pdev->dev, "missing port-id value\n");
8257 goto err_free_netdev;
8258 }
8259
Yan Markman7cf87e42017-12-11 09:13:26 +01008260 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008261 dev->watchdog_timeo = 5 * HZ;
8262 dev->netdev_ops = &mvpp2_netdev_ops;
8263 dev->ethtool_ops = &mvpp2_eth_tool_ops;
8264
8265 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008266 port->dev = dev;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008267 port->fwnode = port_fwnode;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008268 port->ntxqs = ntxqs;
8269 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008270 port->priv = priv;
8271 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008272
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008273 err = mvpp2_queue_vectors_init(port, port_node);
8274 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008275 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008276
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008277 if (port_node)
8278 port->link_irq = of_irq_get_byname(port_node, "link");
8279 else
8280 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008281 if (port->link_irq == -EPROBE_DEFER) {
8282 err = -EPROBE_DEFER;
8283 goto err_deinit_qvecs;
8284 }
8285 if (port->link_irq <= 0)
8286 /* the link irq is optional */
8287 port->link_irq = 0;
8288
Marcin Wojtas24812222018-01-18 13:31:43 +01008289 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
Marcin Wojtas3f518502014-07-10 16:52:13 -03008290 port->flags |= MVPP2_F_LOOPBACK;
8291
Marcin Wojtas3f518502014-07-10 16:52:13 -03008292 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008293 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008294 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008295 else
8296 port->first_rxq = port->id * priv->max_port_rxqs;
8297
Marcin Wojtas3f518502014-07-10 16:52:13 -03008298 port->phy_node = phy_node;
8299 port->phy_interface = phy_mode;
Antoine Tenart542897d2017-08-30 10:29:15 +02008300 port->comphy = comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008301
Thomas Petazzonia7868412017-03-07 16:53:13 +01008302 if (priv->hw_version == MVPP21) {
8303 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8304 port->base = devm_ioremap_resource(&pdev->dev, res);
8305 if (IS_ERR(port->base)) {
8306 err = PTR_ERR(port->base);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008307 goto err_free_irq;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008308 }
Miquel Raynal118d6292017-11-06 22:56:53 +01008309
8310 port->stats_base = port->priv->lms_base +
8311 MVPP21_MIB_COUNTERS_OFFSET +
8312 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008313 } else {
Marcin Wojtas24812222018-01-18 13:31:43 +01008314 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8315 &port->gop_id)) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008316 err = -EINVAL;
8317 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008318 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008319 }
8320
8321 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Miquel Raynal118d6292017-11-06 22:56:53 +01008322 port->stats_base = port->priv->iface_base +
8323 MVPP22_MIB_COUNTERS_OFFSET +
8324 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008325 }
8326
Miquel Raynal118d6292017-11-06 22:56:53 +01008327 /* Alloc per-cpu and ethtool stats */
Marcin Wojtas3f518502014-07-10 16:52:13 -03008328 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8329 if (!port->stats) {
8330 err = -ENOMEM;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008331 goto err_free_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008332 }
8333
Miquel Raynal118d6292017-11-06 22:56:53 +01008334 port->ethtool_stats = devm_kcalloc(&pdev->dev,
8335 ARRAY_SIZE(mvpp2_ethtool_regs),
8336 sizeof(u64), GFP_KERNEL);
8337 if (!port->ethtool_stats) {
8338 err = -ENOMEM;
8339 goto err_free_stats;
8340 }
8341
Miquel Raynale5c500e2017-11-08 08:59:40 +01008342 mutex_init(&port->gather_stats_lock);
8343 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8344
Marcin Wojtas24812222018-01-18 13:31:43 +01008345 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008346
Yan Markman7cf87e42017-12-11 09:13:26 +01008347 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8348 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008349 SET_NETDEV_DEV(dev, &pdev->dev);
8350
8351 err = mvpp2_port_init(port);
8352 if (err < 0) {
8353 dev_err(&pdev->dev, "failed to init port %d\n", id);
8354 goto err_free_stats;
8355 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01008356
Thomas Petazzoni26975822017-03-07 16:53:14 +01008357 mvpp2_port_periodic_xon_disable(port);
8358
8359 if (priv->hw_version == MVPP21)
8360 mvpp2_port_fc_adv_enable(port);
8361
8362 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008363
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008364 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8365 if (!port->pcpu) {
8366 err = -ENOMEM;
8367 goto err_free_txq_pcpu;
8368 }
8369
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008370 if (!port->has_tx_irqs) {
8371 for_each_present_cpu(cpu) {
8372 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008373
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008374 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8375 HRTIMER_MODE_REL_PINNED);
8376 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8377 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008378
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008379 tasklet_init(&port_pcpu->tx_done_tasklet,
8380 mvpp2_tx_proc_cb,
8381 (unsigned long)dev);
8382 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008383 }
8384
Antoine Tenart381c5672018-03-05 15:16:53 +01008385 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
8386 NETIF_F_TSO;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008387 dev->features = features | NETIF_F_RXCSUM;
Maxime Chevallier56beda32018-02-28 10:14:13 +01008388 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8389 NETIF_F_HW_VLAN_CTAG_FILTER;
Stefan Chulski576193f2018-03-05 15:16:54 +01008390
8391 if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
8392 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8393 dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
8394 }
8395
Marcin Wojtas3f518502014-07-10 16:52:13 -03008396 dev->vlan_features |= features;
Antoine Tenart1d17db02017-10-30 11:23:31 +01008397 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
Maxime Chevallier10fea262018-03-07 15:18:04 +01008398 dev->priv_flags |= IFF_UNICAST_FLT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008399
Stefan Chulski576193f2018-03-05 15:16:54 +01008400 /* MTU range: 68 - 9704 */
Jarod Wilson57779872016-10-17 15:54:06 -04008401 dev->min_mtu = ETH_MIN_MTU;
Stefan Chulski576193f2018-03-05 15:16:54 +01008402 /* 9704 == 9728 - 20 and rounding to 8 */
8403 dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
Jarod Wilson57779872016-10-17 15:54:06 -04008404
Marcin Wojtas3f518502014-07-10 16:52:13 -03008405 err = register_netdev(dev);
8406 if (err < 0) {
8407 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008408 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008409 }
8410 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8411
Marcin Wojtasbf147152018-01-18 13:31:42 +01008412 priv->port_list[priv->port_count++] = port;
8413
Marcin Wojtas3f518502014-07-10 16:52:13 -03008414 return 0;
8415
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008416err_free_port_pcpu:
8417 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008418err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008419 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008420 free_percpu(port->txqs[i]->pcpu);
8421err_free_stats:
8422 free_percpu(port->stats);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008423err_free_irq:
8424 if (port->link_irq)
8425 irq_dispose_mapping(port->link_irq);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008426err_deinit_qvecs:
8427 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008428err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08008429 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008430 free_netdev(dev);
8431 return err;
8432}
8433
8434/* Ports removal routine */
8435static void mvpp2_port_remove(struct mvpp2_port *port)
8436{
8437 int i;
8438
8439 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08008440 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008441 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008442 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008443 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008444 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008445 mvpp2_queue_vectors_deinit(port);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008446 if (port->link_irq)
8447 irq_dispose_mapping(port->link_irq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008448 free_netdev(port->dev);
8449}
8450
8451/* Initialize decoding windows */
8452static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8453 struct mvpp2 *priv)
8454{
8455 u32 win_enable;
8456 int i;
8457
8458 for (i = 0; i < 6; i++) {
8459 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8460 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8461
8462 if (i < 4)
8463 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8464 }
8465
8466 win_enable = 0;
8467
8468 for (i = 0; i < dram->num_cs; i++) {
8469 const struct mbus_dram_window *cs = dram->cs + i;
8470
8471 mvpp2_write(priv, MVPP2_WIN_BASE(i),
8472 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8473 dram->mbus_dram_target_id);
8474
8475 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8476 (cs->size - 1) & 0xffff0000);
8477
8478 win_enable |= (1 << i);
8479 }
8480
8481 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8482}
8483
8484/* Initialize Rx FIFO's */
8485static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8486{
8487 int port;
8488
8489 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8490 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008491 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008492 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008493 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8494 }
8495
8496 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8497 MVPP2_RX_FIFO_PORT_MIN_PKT);
8498 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8499}
8500
8501static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8502{
8503 int port;
8504
8505 /* The FIFO size parameters are set depending on the maximum speed a
8506 * given port can handle:
8507 * - Port 0: 10Gbps
8508 * - Port 1: 2.5Gbps
8509 * - Ports 2 and 3: 1Gbps
8510 */
8511
8512 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8513 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8514 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8515 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8516
8517 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8518 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8519 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8520 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8521
8522 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8523 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8524 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8525 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8526 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008527 }
8528
8529 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8530 MVPP2_RX_FIFO_PORT_MIN_PKT);
8531 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8532}
8533
Yan Markman93ff1302018-03-05 15:16:52 +01008534/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
8535 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
8536 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
8537 */
Antoine Tenart7c10f972017-10-30 11:23:29 +01008538static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8539{
Yan Markman93ff1302018-03-05 15:16:52 +01008540 int port, size, thrs;
Antoine Tenart7c10f972017-10-30 11:23:29 +01008541
Yan Markman93ff1302018-03-05 15:16:52 +01008542 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8543 if (port == 0) {
8544 size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
8545 thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
8546 } else {
8547 size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
8548 thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
8549 }
8550 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
8551 mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
8552 }
Antoine Tenart7c10f972017-10-30 11:23:29 +01008553}
8554
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008555static void mvpp2_axi_init(struct mvpp2 *priv)
8556{
8557 u32 val, rdval, wrval;
8558
8559 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8560
8561 /* AXI Bridge Configuration */
8562
8563 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8564 << MVPP22_AXI_ATTR_CACHE_OFFS;
8565 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8566 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8567
8568 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8569 << MVPP22_AXI_ATTR_CACHE_OFFS;
8570 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8571 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8572
8573 /* BM */
8574 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8575 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8576
8577 /* Descriptors */
8578 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8579 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8580 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8581 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8582
8583 /* Buffer Data */
8584 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8585 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8586
8587 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8588 << MVPP22_AXI_CODE_CACHE_OFFS;
8589 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8590 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8591 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8592 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8593
8594 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8595 << MVPP22_AXI_CODE_CACHE_OFFS;
8596 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8597 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8598
8599 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8600
8601 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8602 << MVPP22_AXI_CODE_CACHE_OFFS;
8603 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8604 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8605
8606 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8607}
8608
Marcin Wojtas3f518502014-07-10 16:52:13 -03008609/* Initialize network controller common part HW */
8610static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8611{
8612 const struct mbus_dram_target_info *dram_target_info;
8613 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03008614 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008615
Marcin Wojtas3f518502014-07-10 16:52:13 -03008616 /* MBUS windows configuration */
8617 dram_target_info = mv_mbus_dram_info();
8618 if (dram_target_info)
8619 mvpp2_conf_mbus_windows(dram_target_info, priv);
8620
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008621 if (priv->hw_version == MVPP22)
8622 mvpp2_axi_init(priv);
8623
Marcin Wojtas08a23752014-07-21 13:48:12 -03008624 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01008625 if (priv->hw_version == MVPP21) {
8626 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8627 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8628 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8629 } else {
8630 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8631 val &= ~MVPP22_SMI_POLLING_EN;
8632 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8633 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03008634
Marcin Wojtas3f518502014-07-10 16:52:13 -03008635 /* Allocate and initialize aggregated TXQs */
8636 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02008637 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008638 GFP_KERNEL);
8639 if (!priv->aggr_txqs)
8640 return -ENOMEM;
8641
8642 for_each_present_cpu(i) {
8643 priv->aggr_txqs[i].id = i;
8644 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
Antoine Ténart85affd72017-08-23 09:46:55 +02008645 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008646 if (err < 0)
8647 return err;
8648 }
8649
Antoine Tenart7c10f972017-10-30 11:23:29 +01008650 /* Fifo Init */
8651 if (priv->hw_version == MVPP21) {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008652 mvpp2_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008653 } else {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008654 mvpp22_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008655 mvpp22_tx_fifo_init(priv);
8656 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008657
Thomas Petazzoni26975822017-03-07 16:53:14 +01008658 if (priv->hw_version == MVPP21)
8659 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8660 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008661
8662 /* Allow cache snoop when transmiting packets */
8663 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8664
8665 /* Buffer Manager initialization */
8666 err = mvpp2_bm_init(pdev, priv);
8667 if (err < 0)
8668 return err;
8669
8670 /* Parser default initialization */
8671 err = mvpp2_prs_default_init(pdev, priv);
8672 if (err < 0)
8673 return err;
8674
8675 /* Classifier default initialization */
8676 mvpp2_cls_init(priv);
8677
8678 return 0;
8679}
8680
8681static int mvpp2_probe(struct platform_device *pdev)
8682{
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008683 const struct acpi_device_id *acpi_id;
Marcin Wojtas24812222018-01-18 13:31:43 +01008684 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8685 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008686 struct mvpp2 *priv;
8687 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008688 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +01008689 int i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008690 int err;
8691
Markus Elfring0b92e592017-04-17 08:38:32 +02008692 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008693 if (!priv)
8694 return -ENOMEM;
8695
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008696 if (has_acpi_companion(&pdev->dev)) {
8697 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8698 &pdev->dev);
8699 priv->hw_version = (unsigned long)acpi_id->driver_data;
8700 } else {
8701 priv->hw_version =
8702 (unsigned long)of_device_get_match_data(&pdev->dev);
8703 }
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008704
Marcin Wojtas3f518502014-07-10 16:52:13 -03008705 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01008706 base = devm_ioremap_resource(&pdev->dev, res);
8707 if (IS_ERR(base))
8708 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008709
Thomas Petazzonia7868412017-03-07 16:53:13 +01008710 if (priv->hw_version == MVPP21) {
8711 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8712 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8713 if (IS_ERR(priv->lms_base))
8714 return PTR_ERR(priv->lms_base);
8715 } else {
8716 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008717 if (has_acpi_companion(&pdev->dev)) {
8718 /* In case the MDIO memory region is declared in
8719 * the ACPI, it can already appear as 'in-use'
8720 * in the OS. Because it is overlapped by second
8721 * region of the network controller, make
8722 * sure it is released, before requesting it again.
8723 * The care is taken by mvpp2 driver to avoid
8724 * concurrent access to this memory region.
8725 */
8726 release_resource(res);
8727 }
Thomas Petazzonia7868412017-03-07 16:53:13 +01008728 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8729 if (IS_ERR(priv->iface_base))
8730 return PTR_ERR(priv->iface_base);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008731 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02008732
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008733 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
Antoine Ténartf84bf382017-08-22 19:08:27 +02008734 priv->sysctrl_base =
8735 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8736 "marvell,system-controller");
8737 if (IS_ERR(priv->sysctrl_base))
8738 /* The system controller regmap is optional for dt
8739 * compatibility reasons. When not provided, the
8740 * configuration of the GoP relies on the
8741 * firmware/bootloader.
8742 */
8743 priv->sysctrl_base = NULL;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008744 }
8745
Stefan Chulski01d04932018-03-05 15:16:50 +01008746 mvpp2_setup_bm_pool();
8747
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008748 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008749 u32 addr_space_sz;
8750
8751 addr_space_sz = (priv->hw_version == MVPP21 ?
8752 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008753 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008754 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008755
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008756 if (priv->hw_version == MVPP21)
8757 priv->max_port_rxqs = 8;
8758 else
8759 priv->max_port_rxqs = 32;
8760
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008761 if (dev_of_node(&pdev->dev)) {
8762 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8763 if (IS_ERR(priv->pp_clk))
8764 return PTR_ERR(priv->pp_clk);
8765 err = clk_prepare_enable(priv->pp_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008766 if (err < 0)
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008767 return err;
8768
8769 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8770 if (IS_ERR(priv->gop_clk)) {
8771 err = PTR_ERR(priv->gop_clk);
8772 goto err_pp_clk;
8773 }
8774 err = clk_prepare_enable(priv->gop_clk);
8775 if (err < 0)
8776 goto err_pp_clk;
8777
8778 if (priv->hw_version == MVPP22) {
8779 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8780 if (IS_ERR(priv->mg_clk)) {
8781 err = PTR_ERR(priv->mg_clk);
8782 goto err_gop_clk;
8783 }
8784
8785 err = clk_prepare_enable(priv->mg_clk);
8786 if (err < 0)
8787 goto err_gop_clk;
8788 }
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008789
8790 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8791 if (IS_ERR(priv->axi_clk)) {
8792 err = PTR_ERR(priv->axi_clk);
8793 if (err == -EPROBE_DEFER)
8794 goto err_gop_clk;
8795 priv->axi_clk = NULL;
8796 } else {
8797 err = clk_prepare_enable(priv->axi_clk);
8798 if (err < 0)
8799 goto err_gop_clk;
8800 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008801
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008802 /* Get system's tclk rate */
8803 priv->tclk = clk_get_rate(priv->pp_clk);
8804 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8805 &priv->tclk)) {
8806 dev_err(&pdev->dev, "missing clock-frequency value\n");
8807 return -EINVAL;
8808 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008809
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01008810 if (priv->hw_version == MVPP22) {
8811 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
8812 if (err)
8813 goto err_mg_clk;
8814 /* Sadly, the BM pools all share the same register to
8815 * store the high 32 bits of their address. So they
8816 * must all have the same high 32 bits, which forces
8817 * us to restrict coherent memory to DMA_BIT_MASK(32).
8818 */
8819 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8820 if (err)
8821 goto err_mg_clk;
8822 }
8823
Marcin Wojtas3f518502014-07-10 16:52:13 -03008824 /* Initialize network controller */
8825 err = mvpp2_init(pdev, priv);
8826 if (err < 0) {
8827 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008828 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008829 }
8830
Marcin Wojtasbf147152018-01-18 13:31:42 +01008831 /* Initialize ports */
Marcin Wojtas24812222018-01-18 13:31:43 +01008832 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8833 err = mvpp2_port_probe(pdev, port_fwnode, priv);
Marcin Wojtasbf147152018-01-18 13:31:42 +01008834 if (err < 0)
8835 goto err_port_probe;
8836 }
8837
Miquel Raynal118d6292017-11-06 22:56:53 +01008838 if (priv->port_count == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008839 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08008840 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008841 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008842 }
8843
Miquel Raynal118d6292017-11-06 22:56:53 +01008844 /* Statistics must be gathered regularly because some of them (like
8845 * packets counters) are 32-bit registers and could overflow quite
8846 * quickly. For instance, a 10Gb link used at full bandwidth with the
8847 * smallest packets (64B) will overflow a 32-bit counter in less than
8848 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8849 */
Miquel Raynal118d6292017-11-06 22:56:53 +01008850 snprintf(priv->queue_name, sizeof(priv->queue_name),
8851 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8852 priv->port_count > 1 ? "+" : "");
8853 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8854 if (!priv->stats_queue) {
8855 err = -ENOMEM;
Antoine Tenart26146b02017-11-28 14:19:49 +01008856 goto err_port_probe;
Miquel Raynal118d6292017-11-06 22:56:53 +01008857 }
8858
Marcin Wojtas3f518502014-07-10 16:52:13 -03008859 platform_set_drvdata(pdev, priv);
8860 return 0;
8861
Antoine Tenart26146b02017-11-28 14:19:49 +01008862err_port_probe:
8863 i = 0;
Marcin Wojtas24812222018-01-18 13:31:43 +01008864 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Antoine Tenart26146b02017-11-28 14:19:49 +01008865 if (priv->port_list[i])
8866 mvpp2_port_remove(priv->port_list[i]);
8867 i++;
8868 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008869err_mg_clk:
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008870 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008871 if (priv->hw_version == MVPP22)
8872 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008873err_gop_clk:
8874 clk_disable_unprepare(priv->gop_clk);
8875err_pp_clk:
8876 clk_disable_unprepare(priv->pp_clk);
8877 return err;
8878}
8879
8880static int mvpp2_remove(struct platform_device *pdev)
8881{
8882 struct mvpp2 *priv = platform_get_drvdata(pdev);
Marcin Wojtas24812222018-01-18 13:31:43 +01008883 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8884 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008885 int i = 0;
8886
Miquel Raynale5c500e2017-11-08 08:59:40 +01008887 flush_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008888 destroy_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008889
Marcin Wojtas24812222018-01-18 13:31:43 +01008890 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Miquel Raynale5c500e2017-11-08 08:59:40 +01008891 if (priv->port_list[i]) {
8892 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008893 mvpp2_port_remove(priv->port_list[i]);
Miquel Raynale5c500e2017-11-08 08:59:40 +01008894 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008895 i++;
8896 }
8897
8898 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8899 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8900
8901 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8902 }
8903
8904 for_each_present_cpu(i) {
8905 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8906
8907 dma_free_coherent(&pdev->dev,
8908 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8909 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01008910 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008911 }
8912
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008913 if (is_acpi_node(port_fwnode))
8914 return 0;
8915
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008916 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008917 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008918 clk_disable_unprepare(priv->pp_clk);
8919 clk_disable_unprepare(priv->gop_clk);
8920
8921 return 0;
8922}
8923
8924static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008925 {
8926 .compatible = "marvell,armada-375-pp2",
8927 .data = (void *)MVPP21,
8928 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01008929 {
8930 .compatible = "marvell,armada-7k-pp22",
8931 .data = (void *)MVPP22,
8932 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03008933 { }
8934};
8935MODULE_DEVICE_TABLE(of, mvpp2_match);
8936
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008937static const struct acpi_device_id mvpp2_acpi_match[] = {
8938 { "MRVL0110", MVPP22 },
8939 { },
8940};
8941MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8942
Marcin Wojtas3f518502014-07-10 16:52:13 -03008943static struct platform_driver mvpp2_driver = {
8944 .probe = mvpp2_probe,
8945 .remove = mvpp2_remove,
8946 .driver = {
8947 .name = MVPP2_DRIVER_NAME,
8948 .of_match_table = mvpp2_match,
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008949 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008950 },
8951};
8952
8953module_platform_driver(mvpp2_driver);
8954
8955MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8956MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03008957MODULE_LICENSE("GPL v2");