blob: d6cce0fcc55c66eaa110e4be5da539791686146f [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
Marcin Wojtasa75edc72018-01-18 13:31:44 +010013#include <linux/acpi.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030014#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020022#include <linux/mfd/syscon.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030023#include <linux/interrupt.h>
24#include <linux/cpumask.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010030#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030031#include <linux/phy.h>
Antoine Tenart542897d2017-08-30 10:29:15 +020032#include <linux/phy/phy.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020034#include <linux/hrtimer.h>
35#include <linux/ktime.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020036#include <linux/regmap.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030037#include <uapi/linux/ppp_defs.h>
38#include <net/ip.h>
39#include <net/ipv6.h>
Antoine Ténart186cd4d2017-08-23 09:46:56 +020040#include <net/tso.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030041
Antoine Tenart7c10f972017-10-30 11:23:29 +010042/* Fifo Registers */
Marcin Wojtas3f518502014-07-10 16:52:13 -030043#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46#define MVPP2_RX_FIFO_INIT_REG 0x64
Antoine Tenart7c10f972017-10-30 11:23:29 +010047#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
Marcin Wojtas3f518502014-07-10 16:52:13 -030048
49/* RX DMA Top Registers */
50#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
51#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
52#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
53#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
54#define MVPP2_POOL_BUF_SIZE_OFFSET 5
55#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
56#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
57#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
58#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010059#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
60#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030061#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010062#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
63#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030064#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
65#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
66#define MVPP2_RXQ_DISABLE_MASK BIT(31)
67
Maxime Chevallier56beda32018-02-28 10:14:13 +010068/* Top Registers */
69#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
70#define MVPP2_DSA_EXTENDED BIT(5)
71
Marcin Wojtas3f518502014-07-10 16:52:13 -030072/* Parser Registers */
73#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
74#define MVPP2_PRS_PORT_LU_MAX 0xf
75#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
76#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
77#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
78#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
79#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
80#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
81#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
82#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
83#define MVPP2_PRS_TCAM_IDX_REG 0x1100
84#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
85#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
86#define MVPP2_PRS_SRAM_IDX_REG 0x1200
87#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
88#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
89#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
90
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010091/* RSS Registers */
92#define MVPP22_RSS_INDEX 0x1500
Antoine Tenart8a7b7412017-12-08 10:24:20 +010093#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010094#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
95#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
96#define MVPP22_RSS_TABLE_ENTRY 0x1508
97#define MVPP22_RSS_TABLE 0x1510
98#define MVPP22_RSS_TABLE_POINTER(p) (p)
99#define MVPP22_RSS_WIDTH 0x150c
100
Marcin Wojtas3f518502014-07-10 16:52:13 -0300101/* Classifier Registers */
102#define MVPP2_CLS_MODE_REG 0x1800
103#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
104#define MVPP2_CLS_PORT_WAY_REG 0x1810
105#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
106#define MVPP2_CLS_LKP_INDEX_REG 0x1814
107#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
108#define MVPP2_CLS_LKP_TBL_REG 0x1818
109#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
110#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
111#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
112#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
113#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
114#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
115#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
116#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
117#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
118#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
119#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
120#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
121
122/* Descriptor Manager Top Registers */
123#define MVPP2_RXQ_NUM_REG 0x2040
124#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100125#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300126#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
127#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
128#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
129#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
130#define MVPP2_RXQ_NUM_NEW_OFFSET 16
131#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
132#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
133#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
134#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
135#define MVPP2_RXQ_THRESH_REG 0x204c
136#define MVPP2_OCCUPIED_THRESH_OFFSET 0
137#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
138#define MVPP2_RXQ_INDEX_REG 0x2050
139#define MVPP2_TXQ_NUM_REG 0x2080
140#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
141#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
142#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200143#define MVPP2_TXQ_THRESH_REG 0x2094
144#define MVPP2_TXQ_THRESH_OFFSET 16
145#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300146#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300147#define MVPP2_TXQ_INDEX_REG 0x2098
148#define MVPP2_TXQ_PREF_BUF_REG 0x209c
149#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
150#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
151#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
152#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
153#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
154#define MVPP2_TXQ_PENDING_REG 0x20a0
155#define MVPP2_TXQ_PENDING_MASK 0x3fff
156#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
157#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
158#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
159#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
160#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
161#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
162#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
163#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
164#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
165#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
166#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100167#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300168#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
169#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
170#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
171#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
172#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
173
174/* MBUS bridge registers */
175#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
176#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
177#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
178#define MVPP2_BASE_ADDR_ENABLE 0x4060
179
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100180/* AXI Bridge Registers */
181#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
182#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
183#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
184#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
185#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
186#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
187#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
188#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
189#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
190#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
191#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
192#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
193
194/* Values for AXI Bridge registers */
195#define MVPP22_AXI_ATTR_CACHE_OFFS 0
196#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
197
198#define MVPP22_AXI_CODE_CACHE_OFFS 0
199#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
200
201#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
202#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
203#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
204
205#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
206#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
207
Marcin Wojtas3f518502014-07-10 16:52:13 -0300208/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200209#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
210#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
211
Marcin Wojtas3f518502014-07-10 16:52:13 -0300212#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100213#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200214#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100215
Antoine Ténart81b66302017-08-22 19:08:21 +0200216#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100217#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200218#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
219#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100220
221#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200222#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100223
Antoine Ténart81b66302017-08-22 19:08:21 +0200224#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
225#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
226#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
227#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100228
Marcin Wojtas3f518502014-07-10 16:52:13 -0300229#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
230#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
231#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
232#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
233#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
234#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200235#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300236#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
237#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
238#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
239#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
240#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
241#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
242#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
243#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
244#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
245#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
246#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
247#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
248
249/* Buffer Manager registers */
250#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
251#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
252#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
253#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
254#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
255#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
256#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
257#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
258#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
259#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
260#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
261#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
262#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
263#define MVPP2_BM_START_MASK BIT(0)
264#define MVPP2_BM_STOP_MASK BIT(1)
265#define MVPP2_BM_STATE_MASK BIT(4)
266#define MVPP2_BM_LOW_THRESH_OFFS 8
267#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
268#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
269 MVPP2_BM_LOW_THRESH_OFFS)
270#define MVPP2_BM_HIGH_THRESH_OFFS 16
271#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
272#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
273 MVPP2_BM_HIGH_THRESH_OFFS)
274#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
275#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
276#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
277#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
278#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
279#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
280#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
281#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
282#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
283#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100284#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
285#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
286#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
287#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300288#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
289#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
290#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
291#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
292#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100293#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
294#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
Antoine Ténart81b66302017-08-22 19:08:21 +0200295#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100296#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300297
298/* TX Scheduler registers */
299#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
300#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
301#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
302#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
303#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
304#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
305#define MVPP2_TXP_SCHED_MTU_REG 0x801c
306#define MVPP2_TXP_MTU_MAX 0x7FFFF
307#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
308#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
309#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
310#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
311#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
312#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
313#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
314#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
315#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
316#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
317#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
318#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
319#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
320#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
321
322/* TX general registers */
323#define MVPP2_TX_SNOOP_REG 0x8800
324#define MVPP2_TX_PORT_FLUSH_REG 0x8810
325#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
326
327/* LMS registers */
328#define MVPP2_SRC_ADDR_MIDDLE 0x24
329#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300330#define MVPP2_PHY_AN_CFG0_REG 0x34
331#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300332#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100333#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300334
335/* Per-port registers */
336#define MVPP2_GMAC_CTRL_0_REG 0x0
Antoine Ténart81b66302017-08-22 19:08:21 +0200337#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200338#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200339#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
340#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
341#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300342#define MVPP2_GMAC_CTRL_1_REG 0x4
Antoine Ténart81b66302017-08-22 19:08:21 +0200343#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
344#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
345#define MVPP2_GMAC_PCS_LB_EN_BIT 6
346#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
347#define MVPP2_GMAC_SA_LOW_OFFS 7
Marcin Wojtas3f518502014-07-10 16:52:13 -0300348#define MVPP2_GMAC_CTRL_2_REG 0x8
Antoine Ténart81b66302017-08-22 19:08:21 +0200349#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200350#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200351#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
Antoine Tenartc7dfc8c2017-09-25 14:59:48 +0200352#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
Antoine Ténart39193572017-08-22 19:08:24 +0200353#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
Antoine Ténart81b66302017-08-22 19:08:21 +0200354#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300355#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
Antoine Ténart81b66302017-08-22 19:08:21 +0200356#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
357#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Antoine Ténart39193572017-08-22 19:08:24 +0200358#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
359#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
Antoine Ténart81b66302017-08-22 19:08:21 +0200360#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
361#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
362#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
363#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Antoine Ténart39193572017-08-22 19:08:24 +0200364#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
Antoine Ténart81b66302017-08-22 19:08:21 +0200365#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
366#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200367#define MVPP2_GMAC_STATUS0 0x10
368#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300369#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
Antoine Ténart81b66302017-08-22 19:08:21 +0200370#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
371#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
372#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
Marcin Wojtas3f518502014-07-10 16:52:13 -0300373 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200374#define MVPP22_GMAC_INT_STAT 0x20
375#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
376#define MVPP22_GMAC_INT_MASK 0x24
377#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100378#define MVPP22_GMAC_CTRL_4_REG 0x90
Antoine Ténart81b66302017-08-22 19:08:21 +0200379#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
380#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
Antoine Ténart1068ec72017-08-22 19:08:22 +0200381#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
Antoine Ténart81b66302017-08-22 19:08:21 +0200382#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200383#define MVPP22_GMAC_INT_SUM_MASK 0xa4
384#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100385
386/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
387 * relative to port->base.
388 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200389#define MVPP22_XLG_CTRL0_REG 0x100
Antoine Ténart81b66302017-08-22 19:08:21 +0200390#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
391#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
Antoine Ténart77321952017-08-22 19:08:25 +0200392#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
Antoine Ténart81b66302017-08-22 19:08:21 +0200393#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200394#define MVPP22_XLG_CTRL1_REG 0x104
Antoine Ténartec15ecd2017-08-25 15:24:46 +0200395#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200396#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200397#define MVPP22_XLG_STATUS 0x10c
398#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
399#define MVPP22_XLG_INT_STAT 0x114
400#define MVPP22_XLG_INT_STAT_LINK BIT(1)
401#define MVPP22_XLG_INT_MASK 0x118
402#define MVPP22_XLG_INT_MASK_LINK BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100403#define MVPP22_XLG_CTRL3_REG 0x11c
Antoine Ténart81b66302017-08-22 19:08:21 +0200404#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
405#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
406#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200407#define MVPP22_XLG_EXT_INT_MASK 0x15c
408#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
409#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
Antoine Ténart77321952017-08-22 19:08:25 +0200410#define MVPP22_XLG_CTRL4_REG 0x184
411#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
412#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
413#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
414
Thomas Petazzoni26975822017-03-07 16:53:14 +0100415/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
416#define MVPP22_SMI_MISC_CFG_REG 0x1204
Antoine Ténart81b66302017-08-22 19:08:21 +0200417#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300418
Thomas Petazzonia7868412017-03-07 16:53:13 +0100419#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
420
Marcin Wojtas3f518502014-07-10 16:52:13 -0300421#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
422
423/* Descriptor ring Macros */
424#define MVPP2_QUEUE_NEXT_DESC(q, index) \
425 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
426
Antoine Ténartf84bf382017-08-22 19:08:27 +0200427/* XPCS registers. PPv2.2 only */
428#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
429#define MVPP22_MPCS_CTRL 0x14
430#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
431#define MVPP22_MPCS_CLK_RESET 0x14c
432#define MAC_CLK_RESET_SD_TX BIT(0)
433#define MAC_CLK_RESET_SD_RX BIT(1)
434#define MAC_CLK_RESET_MAC BIT(2)
435#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
436#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
437
438/* XPCS registers. PPv2.2 only */
439#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
440#define MVPP22_XPCS_CFG0 0x0
441#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
442#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
443
444/* System controller registers. Accessed through a regmap. */
445#define GENCONF_SOFT_RESET1 0x1108
446#define GENCONF_SOFT_RESET1_GOP BIT(6)
447#define GENCONF_PORT_CTRL0 0x1110
448#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
449#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
450#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
451#define GENCONF_PORT_CTRL1 0x1114
452#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
453#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
454#define GENCONF_CTRL0 0x1120
455#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
456#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
457#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
458
Marcin Wojtas3f518502014-07-10 16:52:13 -0300459/* Various constants */
460
461/* Coalescing */
Antoine Tenart86162282017-12-11 09:13:29 +0100462#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200463#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200464#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300465#define MVPP2_RX_COAL_PKTS 32
Antoine Tenart86162282017-12-11 09:13:29 +0100466#define MVPP2_RX_COAL_USEC 64
Marcin Wojtas3f518502014-07-10 16:52:13 -0300467
468/* The two bytes Marvell header. Either contains a special value used
469 * by Marvell switches when a specific hardware mode is enabled (not
470 * supported by this driver) or is filled automatically by zeroes on
471 * the RX side. Those two bytes being at the front of the Ethernet
472 * header, they allow to have the IP header aligned on a 4 bytes
473 * boundary automatically: the hardware skips those two bytes on its
474 * own.
475 */
476#define MVPP2_MH_SIZE 2
477#define MVPP2_ETH_TYPE_LEN 2
478#define MVPP2_PPPOE_HDR_SIZE 8
479#define MVPP2_VLAN_TAG_LEN 4
Maxime Chevallier56beda32018-02-28 10:14:13 +0100480#define MVPP2_VLAN_TAG_EDSA_LEN 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300481
482/* Lbtd 802.3 type */
483#define MVPP2_IP_LBDT_TYPE 0xfffa
484
Marcin Wojtas3f518502014-07-10 16:52:13 -0300485#define MVPP2_TX_CSUM_MAX_SIZE 9800
486
487/* Timeout constants */
488#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
489#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
490
491#define MVPP2_TX_MTU_MAX 0x7ffff
492
493/* Maximum number of T-CONTs of PON port */
494#define MVPP2_MAX_TCONT 16
495
496/* Maximum number of supported ports */
497#define MVPP2_MAX_PORTS 4
498
499/* Maximum number of TXQs used by single port */
500#define MVPP2_MAX_TXQ 8
501
Antoine Tenart1d17db02017-10-30 11:23:31 +0100502/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
503 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
504 * multiply this value by two to count the maximum number of skb descs needed.
505 */
506#define MVPP2_MAX_TSO_SEGS 300
507#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
508
Marcin Wojtas3f518502014-07-10 16:52:13 -0300509/* Dfault number of RXQs in use */
510#define MVPP2_DEFAULT_RXQ 4
511
Marcin Wojtas3f518502014-07-10 16:52:13 -0300512/* Max number of Rx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100513#define MVPP2_MAX_RXD_MAX 1024
514#define MVPP2_MAX_RXD_DFLT 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300515
516/* Max number of Tx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100517#define MVPP2_MAX_TXD_MAX 2048
518#define MVPP2_MAX_TXD_DFLT 1024
Marcin Wojtas3f518502014-07-10 16:52:13 -0300519
520/* Amount of Tx descriptors that can be reserved at once by CPU */
521#define MVPP2_CPU_DESC_CHUNK 64
522
523/* Max number of Tx descriptors in each aggregated queue */
524#define MVPP2_AGGR_TXQ_SIZE 256
525
526/* Descriptor aligned size */
527#define MVPP2_DESC_ALIGNED_SIZE 32
528
529/* Descriptor alignment mask */
530#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
531
532/* RX FIFO constants */
Antoine Tenart2d1d7df2017-10-30 11:23:28 +0100533#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
534#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
535#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
536#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
537#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
538#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
539#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
Marcin Wojtas3f518502014-07-10 16:52:13 -0300540
Antoine Tenart7c10f972017-10-30 11:23:29 +0100541/* TX FIFO constants */
542#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
543#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
544
Marcin Wojtas3f518502014-07-10 16:52:13 -0300545/* RX buffer constants */
546#define MVPP2_SKB_SHINFO_SIZE \
547 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
548
549#define MVPP2_RX_PKT_SIZE(mtu) \
550 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800551 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300552
553#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
554#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
555#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
556 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
557
558#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
559
560/* IPv6 max L3 address size */
561#define MVPP2_MAX_L3_ADDR_SIZE 16
562
563/* Port flags */
564#define MVPP2_F_LOOPBACK BIT(0)
565
566/* Marvell tag types */
567enum mvpp2_tag_type {
568 MVPP2_TAG_TYPE_NONE = 0,
569 MVPP2_TAG_TYPE_MH = 1,
570 MVPP2_TAG_TYPE_DSA = 2,
571 MVPP2_TAG_TYPE_EDSA = 3,
572 MVPP2_TAG_TYPE_VLAN = 4,
573 MVPP2_TAG_TYPE_LAST = 5
574};
575
576/* Parser constants */
577#define MVPP2_PRS_TCAM_SRAM_SIZE 256
578#define MVPP2_PRS_TCAM_WORDS 6
579#define MVPP2_PRS_SRAM_WORDS 4
580#define MVPP2_PRS_FLOW_ID_SIZE 64
581#define MVPP2_PRS_FLOW_ID_MASK 0x3f
582#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
583#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
584#define MVPP2_PRS_IPV4_HEAD 0x40
585#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
586#define MVPP2_PRS_IPV4_MC 0xe0
587#define MVPP2_PRS_IPV4_MC_MASK 0xf0
588#define MVPP2_PRS_IPV4_BC_MASK 0xff
589#define MVPP2_PRS_IPV4_IHL 0x5
590#define MVPP2_PRS_IPV4_IHL_MASK 0xf
591#define MVPP2_PRS_IPV6_MC 0xff
592#define MVPP2_PRS_IPV6_MC_MASK 0xff
593#define MVPP2_PRS_IPV6_HOP_MASK 0xff
594#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
595#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
596#define MVPP2_PRS_DBL_VLANS_MAX 100
597
598/* Tcam structure:
599 * - lookup ID - 4 bits
600 * - port ID - 1 byte
601 * - additional information - 1 byte
602 * - header data - 8 bytes
603 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
604 */
605#define MVPP2_PRS_AI_BITS 8
606#define MVPP2_PRS_PORT_MASK 0xff
607#define MVPP2_PRS_LU_MASK 0xf
608#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
609 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
610#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
611 (((offs) * 2) - ((offs) % 2) + 2)
612#define MVPP2_PRS_TCAM_AI_BYTE 16
613#define MVPP2_PRS_TCAM_PORT_BYTE 17
614#define MVPP2_PRS_TCAM_LU_BYTE 20
615#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
616#define MVPP2_PRS_TCAM_INV_WORD 5
Maxime Chevallier56beda32018-02-28 10:14:13 +0100617
618#define MVPP2_PRS_VID_TCAM_BYTE 2
619
620/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
621 * 10 VLAN ID filter entries per port
622 * 1 default VLAN filter entry per port
623 * It is assumed that there are 3 ports for filter, not including loopback port
624 */
625#define MVPP2_PRS_VLAN_FILT_MAX 11
626#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
627
628#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
629#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
630
Marcin Wojtas3f518502014-07-10 16:52:13 -0300631/* Tcam entries ID */
632#define MVPP2_PE_DROP_ALL 0
633#define MVPP2_PE_FIRST_FREE_TID 1
Maxime Chevallier56beda32018-02-28 10:14:13 +0100634
635/* VLAN filtering range */
636#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
637#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
638 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
639#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300640#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
641#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
642#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
643#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
644#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100645#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
646#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
647#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
648#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
649#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
650#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
651#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
652#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
653#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
654#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
655#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
656#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
657#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
658#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
659#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
660#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300661#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
662#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
663#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
664#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
665#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
666
Maxime Chevallier56beda32018-02-28 10:14:13 +0100667#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
668 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
669#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
670 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
671/* Index of default vid filter for given port */
672#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
673 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
674
Marcin Wojtas3f518502014-07-10 16:52:13 -0300675/* Sram structure
676 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
677 */
678#define MVPP2_PRS_SRAM_RI_OFFS 0
679#define MVPP2_PRS_SRAM_RI_WORD 0
680#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
681#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
682#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
683#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
684#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
685#define MVPP2_PRS_SRAM_UDF_OFFS 73
686#define MVPP2_PRS_SRAM_UDF_BITS 8
687#define MVPP2_PRS_SRAM_UDF_MASK 0xff
688#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
689#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
690#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
691#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
692#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
693#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
694#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
695#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
696#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
697#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
698#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
699#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
700#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
701#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
702#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
703#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
704#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
705#define MVPP2_PRS_SRAM_AI_OFFS 90
706#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
707#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
708#define MVPP2_PRS_SRAM_AI_MASK 0xff
709#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
710#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
711#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
712#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
713
714/* Sram result info bits assignment */
715#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
716#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100717#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
718#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300719#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
720#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
721#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
722#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
723#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100724#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
725#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300726#define MVPP2_PRS_RI_L2_MCAST BIT(9)
727#define MVPP2_PRS_RI_L2_BCAST BIT(10)
728#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100729#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
730#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300731#define MVPP2_PRS_RI_L3_IP4 BIT(12)
732#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
733#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
734#define MVPP2_PRS_RI_L3_IP6 BIT(14)
735#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
736#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100737#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
738#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300739#define MVPP2_PRS_RI_L3_MCAST BIT(15)
740#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
741#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
Stefan Chulskiaff3da32017-09-25 14:59:46 +0200742#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300743#define MVPP2_PRS_RI_UDF3_MASK 0x300000
744#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
745#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
746#define MVPP2_PRS_RI_L4_TCP BIT(22)
747#define MVPP2_PRS_RI_L4_UDP BIT(23)
748#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
749#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
750#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
751#define MVPP2_PRS_RI_DROP_MASK 0x80000000
752
753/* Sram additional info bits assignment */
754#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
755#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
756#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
757#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
758#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
759#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
760#define MVPP2_PRS_SINGLE_VLAN_AI 0
761#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100762#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300763
764/* DSA/EDSA type */
765#define MVPP2_PRS_TAGGED true
766#define MVPP2_PRS_UNTAGGED false
767#define MVPP2_PRS_EDSA true
768#define MVPP2_PRS_DSA false
769
770/* MAC entries, shadow udf */
771enum mvpp2_prs_udf {
772 MVPP2_PRS_UDF_MAC_DEF,
773 MVPP2_PRS_UDF_MAC_RANGE,
774 MVPP2_PRS_UDF_L2_DEF,
775 MVPP2_PRS_UDF_L2_DEF_COPY,
776 MVPP2_PRS_UDF_L2_USER,
777};
778
779/* Lookup ID */
780enum mvpp2_prs_lookup {
781 MVPP2_PRS_LU_MH,
782 MVPP2_PRS_LU_MAC,
783 MVPP2_PRS_LU_DSA,
784 MVPP2_PRS_LU_VLAN,
Maxime Chevallier56beda32018-02-28 10:14:13 +0100785 MVPP2_PRS_LU_VID,
Marcin Wojtas3f518502014-07-10 16:52:13 -0300786 MVPP2_PRS_LU_L2,
787 MVPP2_PRS_LU_PPPOE,
788 MVPP2_PRS_LU_IP4,
789 MVPP2_PRS_LU_IP6,
790 MVPP2_PRS_LU_FLOWS,
791 MVPP2_PRS_LU_LAST,
792};
793
794/* L3 cast enum */
795enum mvpp2_prs_l3_cast {
796 MVPP2_PRS_L3_UNI_CAST,
797 MVPP2_PRS_L3_MULTI_CAST,
798 MVPP2_PRS_L3_BROAD_CAST
799};
800
801/* Classifier constants */
802#define MVPP2_CLS_FLOWS_TBL_SIZE 512
803#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
804#define MVPP2_CLS_LKP_TBL_SIZE 64
Antoine Tenart1d7d15d2017-10-30 11:23:30 +0100805#define MVPP2_CLS_RX_QUEUES 256
806
807/* RSS constants */
808#define MVPP22_RSS_TABLE_ENTRIES 32
Marcin Wojtas3f518502014-07-10 16:52:13 -0300809
810/* BM constants */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300811#define MVPP2_BM_LONG_BUF_NUM 1024
812#define MVPP2_BM_SHORT_BUF_NUM 2048
813#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
814#define MVPP2_BM_POOL_PTR_ALIGN 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300815
816/* BM cookie (32 bits) definition */
817#define MVPP2_BM_COOKIE_POOL_OFFS 8
818#define MVPP2_BM_COOKIE_CPU_OFFS 24
819
Stefan Chulski01d04932018-03-05 15:16:50 +0100820#define MVPP2_BM_SHORT_FRAME_SIZE 512
821#define MVPP2_BM_LONG_FRAME_SIZE 2048
Marcin Wojtas3f518502014-07-10 16:52:13 -0300822/* BM short pool packet size
823 * These value assure that for SWF the total number
824 * of bytes allocated for each buffer will be 512
825 */
Stefan Chulski01d04932018-03-05 15:16:50 +0100826#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
827#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300828
Thomas Petazzonia7868412017-03-07 16:53:13 +0100829#define MVPP21_ADDR_SPACE_SZ 0
830#define MVPP22_ADDR_SPACE_SZ SZ_64K
831
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200832#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200833#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100834
Stefan Chulski01d04932018-03-05 15:16:50 +0100835enum mvpp2_bm_pool_log_num {
836 MVPP2_BM_SHORT,
837 MVPP2_BM_LONG,
838 MVPP2_BM_POOLS_NUM
Marcin Wojtas3f518502014-07-10 16:52:13 -0300839};
840
Stefan Chulski01d04932018-03-05 15:16:50 +0100841static struct {
842 int pkt_size;
843 int buf_num;
844} mvpp2_pools[MVPP2_BM_POOLS_NUM];
845
Miquel Raynal118d6292017-11-06 22:56:53 +0100846/* GMAC MIB Counters register definitions */
847#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
848#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
849#define MVPP22_MIB_COUNTERS_OFFSET 0x0
850#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
851
852#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
853#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
854#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
855#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
856#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
857#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
858#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
859#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
860#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
861#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
862#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
863#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
864#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
865#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
866#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
867#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
868#define MVPP2_MIB_FC_SENT 0x54
869#define MVPP2_MIB_FC_RCVD 0x58
870#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
871#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
872#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
873#define MVPP2_MIB_OVERSIZE_RCVD 0x68
874#define MVPP2_MIB_JABBER_RCVD 0x6c
875#define MVPP2_MIB_MAC_RCV_ERROR 0x70
876#define MVPP2_MIB_BAD_CRC_EVENT 0x74
877#define MVPP2_MIB_COLLISION 0x78
878#define MVPP2_MIB_LATE_COLLISION 0x7c
879
880#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
881
Marcin Wojtas3f518502014-07-10 16:52:13 -0300882/* Definitions */
883
884/* Shared Packet Processor resources */
885struct mvpp2 {
886 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300887 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100888 void __iomem *iface_base;
889
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200890 /* On PPv2.2, each "software thread" can access the base
891 * register through a separate address space, each 64 KB apart
892 * from each other. Typically, such address spaces will be
893 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100894 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200895 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300896
Antoine Ténartf84bf382017-08-22 19:08:27 +0200897 /* On PPv2.2, some port control registers are located into the system
898 * controller space. These registers are accessible through a regmap.
899 */
900 struct regmap *sysctrl_base;
901
Marcin Wojtas3f518502014-07-10 16:52:13 -0300902 /* Common clocks */
903 struct clk *pp_clk;
904 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100905 struct clk *mg_clk;
Gregory CLEMENT4792ea02017-09-29 14:27:39 +0200906 struct clk *axi_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300907
908 /* List of pointers to port structures */
Miquel Raynal118d6292017-11-06 22:56:53 +0100909 int port_count;
Marcin Wojtasbf147152018-01-18 13:31:42 +0100910 struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300911
912 /* Aggregated TXQs */
913 struct mvpp2_tx_queue *aggr_txqs;
914
915 /* BM pools */
916 struct mvpp2_bm_pool *bm_pools;
917
918 /* PRS shadow table */
919 struct mvpp2_prs_shadow *prs_shadow;
920 /* PRS auxiliary table for double vlan entries control */
921 bool *prs_double_vlans;
922
923 /* Tclk value */
924 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100925
926 /* HW version */
927 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100928
929 /* Maximum number of RXQs per port */
930 unsigned int max_port_rxqs;
Miquel Raynal118d6292017-11-06 22:56:53 +0100931
Miquel Raynale5c500e2017-11-08 08:59:40 +0100932 /* Workqueue to gather hardware statistics */
Miquel Raynal118d6292017-11-06 22:56:53 +0100933 char queue_name[30];
934 struct workqueue_struct *stats_queue;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300935};
936
937struct mvpp2_pcpu_stats {
938 struct u64_stats_sync syncp;
939 u64 rx_packets;
940 u64 rx_bytes;
941 u64 tx_packets;
942 u64 tx_bytes;
943};
944
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200945/* Per-CPU port control */
946struct mvpp2_port_pcpu {
947 struct hrtimer tx_done_timer;
948 bool timer_scheduled;
949 /* Tasklet for egress finalization */
950 struct tasklet_struct tx_done_tasklet;
951};
952
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200953struct mvpp2_queue_vector {
954 int irq;
955 struct napi_struct napi;
956 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
957 int sw_thread_id;
958 u16 sw_thread_mask;
959 int first_rxq;
960 int nrxqs;
961 u32 pending_cause_rx;
962 struct mvpp2_port *port;
963};
964
Marcin Wojtas3f518502014-07-10 16:52:13 -0300965struct mvpp2_port {
966 u8 id;
967
Thomas Petazzonia7868412017-03-07 16:53:13 +0100968 /* Index of the port from the "group of ports" complex point
969 * of view
970 */
971 int gop_id;
972
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200973 int link_irq;
974
Marcin Wojtas3f518502014-07-10 16:52:13 -0300975 struct mvpp2 *priv;
976
Marcin Wojtas24812222018-01-18 13:31:43 +0100977 /* Firmware node associated to the port */
978 struct fwnode_handle *fwnode;
979
Marcin Wojtas3f518502014-07-10 16:52:13 -0300980 /* Per-port registers' base address */
981 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +0100982 void __iomem *stats_base;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300983
984 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200985 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300986 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200987 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300988 struct net_device *dev;
989
990 int pkt_size;
991
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200992 /* Per-CPU port control */
993 struct mvpp2_port_pcpu __percpu *pcpu;
994
Marcin Wojtas3f518502014-07-10 16:52:13 -0300995 /* Flags */
996 unsigned long flags;
997
998 u16 tx_ring_size;
999 u16 rx_ring_size;
1000 struct mvpp2_pcpu_stats __percpu *stats;
Miquel Raynal118d6292017-11-06 22:56:53 +01001001 u64 *ethtool_stats;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001002
Miquel Raynale5c500e2017-11-08 08:59:40 +01001003 /* Per-port work and its lock to gather hardware statistics */
1004 struct mutex gather_stats_lock;
1005 struct delayed_work stats_work;
1006
Marcin Wojtas3f518502014-07-10 16:52:13 -03001007 phy_interface_t phy_interface;
1008 struct device_node *phy_node;
Antoine Tenart542897d2017-08-30 10:29:15 +02001009 struct phy *comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001010 unsigned int link;
1011 unsigned int duplex;
1012 unsigned int speed;
1013
1014 struct mvpp2_bm_pool *pool_long;
1015 struct mvpp2_bm_pool *pool_short;
1016
1017 /* Index of first port's physical RXQ */
1018 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02001019
1020 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1021 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001022 bool has_tx_irqs;
1023
1024 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001025};
1026
1027/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1028 * layout of the transmit and reception DMA descriptors, and their
1029 * layout is therefore defined by the hardware design
1030 */
1031
1032#define MVPP2_TXD_L3_OFF_SHIFT 0
1033#define MVPP2_TXD_IP_HLEN_SHIFT 8
1034#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1035#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1036#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1037#define MVPP2_TXD_PADDING_DISABLE BIT(23)
1038#define MVPP2_TXD_L4_UDP BIT(24)
1039#define MVPP2_TXD_L3_IP6 BIT(26)
1040#define MVPP2_TXD_L_DESC BIT(28)
1041#define MVPP2_TXD_F_DESC BIT(29)
1042
1043#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1044#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1045#define MVPP2_RXD_ERR_CRC 0x0
1046#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1047#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1048#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1049#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1050#define MVPP2_RXD_HWF_SYNC BIT(21)
1051#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1052#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1053#define MVPP2_RXD_L4_TCP BIT(25)
1054#define MVPP2_RXD_L4_UDP BIT(26)
1055#define MVPP2_RXD_L3_IP4 BIT(28)
1056#define MVPP2_RXD_L3_IP6 BIT(30)
1057#define MVPP2_RXD_BUF_HDR BIT(31)
1058
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001059/* HW TX descriptor for PPv2.1 */
1060struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001061 u32 command; /* Options used by HW for packet transmitting.*/
1062 u8 packet_offset; /* the offset from the buffer beginning */
1063 u8 phys_txq; /* destination queue ID */
1064 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001065 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001066 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1067 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1068 u32 reserved2; /* reserved (for future use) */
1069};
1070
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001071/* HW RX descriptor for PPv2.1 */
1072struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001073 u32 status; /* info about received packet */
1074 u16 reserved1; /* parser_info (for future use, PnC) */
1075 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001076 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001077 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1078 u16 reserved2; /* gem_port_id (for future use, PON) */
1079 u16 reserved3; /* csum_l4 (for future use, PnC) */
1080 u8 reserved4; /* bm_qset (for future use, BM) */
1081 u8 reserved5;
1082 u16 reserved6; /* classify_info (for future use, PnC) */
1083 u32 reserved7; /* flow_id (for future use, PnC) */
1084 u32 reserved8;
1085};
1086
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001087/* HW TX descriptor for PPv2.2 */
1088struct mvpp22_tx_desc {
1089 u32 command;
1090 u8 packet_offset;
1091 u8 phys_txq;
1092 u16 data_size;
1093 u64 reserved1;
1094 u64 buf_dma_addr_ptp;
1095 u64 buf_cookie_misc;
1096};
1097
1098/* HW RX descriptor for PPv2.2 */
1099struct mvpp22_rx_desc {
1100 u32 status;
1101 u16 reserved1;
1102 u16 data_size;
1103 u32 reserved2;
1104 u32 reserved3;
1105 u64 buf_dma_addr_key_hash;
1106 u64 buf_cookie_misc;
1107};
1108
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001109/* Opaque type used by the driver to manipulate the HW TX and RX
1110 * descriptors
1111 */
1112struct mvpp2_tx_desc {
1113 union {
1114 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001115 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001116 };
1117};
1118
1119struct mvpp2_rx_desc {
1120 union {
1121 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001122 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001123 };
1124};
1125
Thomas Petazzoni83544912016-12-21 11:28:49 +01001126struct mvpp2_txq_pcpu_buf {
1127 /* Transmitted SKB */
1128 struct sk_buff *skb;
1129
1130 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001131 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +01001132
1133 /* Size transmitted */
1134 size_t size;
1135};
1136
Marcin Wojtas3f518502014-07-10 16:52:13 -03001137/* Per-CPU Tx queue control */
1138struct mvpp2_txq_pcpu {
1139 int cpu;
1140
1141 /* Number of Tx DMA descriptors in the descriptor ring */
1142 int size;
1143
1144 /* Number of currently used Tx DMA descriptor in the
1145 * descriptor ring
1146 */
1147 int count;
1148
Antoine Tenart1d17db02017-10-30 11:23:31 +01001149 int wake_threshold;
1150 int stop_threshold;
1151
Marcin Wojtas3f518502014-07-10 16:52:13 -03001152 /* Number of Tx DMA descriptors reserved for each CPU */
1153 int reserved_num;
1154
Thomas Petazzoni83544912016-12-21 11:28:49 +01001155 /* Infos about transmitted buffers */
1156 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001157
Marcin Wojtas3f518502014-07-10 16:52:13 -03001158 /* Index of last TX DMA descriptor that was inserted */
1159 int txq_put_index;
1160
1161 /* Index of the TX DMA descriptor to be cleaned up */
1162 int txq_get_index;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02001163
1164 /* DMA buffer for TSO headers */
1165 char *tso_headers;
1166 dma_addr_t tso_headers_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001167};
1168
1169struct mvpp2_tx_queue {
1170 /* Physical number of this Tx queue */
1171 u8 id;
1172
1173 /* Logical number of this Tx queue */
1174 u8 log_id;
1175
1176 /* Number of Tx DMA descriptors in the descriptor ring */
1177 int size;
1178
1179 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1180 int count;
1181
1182 /* Per-CPU control of physical Tx queues */
1183 struct mvpp2_txq_pcpu __percpu *pcpu;
1184
Marcin Wojtas3f518502014-07-10 16:52:13 -03001185 u32 done_pkts_coal;
1186
1187 /* Virtual address of thex Tx DMA descriptors array */
1188 struct mvpp2_tx_desc *descs;
1189
1190 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001191 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001192
1193 /* Index of the last Tx DMA descriptor */
1194 int last_desc;
1195
1196 /* Index of the next Tx DMA descriptor to process */
1197 int next_desc_to_proc;
1198};
1199
1200struct mvpp2_rx_queue {
1201 /* RX queue number, in the range 0-31 for physical RXQs */
1202 u8 id;
1203
1204 /* Num of rx descriptors in the rx descriptor ring */
1205 int size;
1206
1207 u32 pkts_coal;
1208 u32 time_coal;
1209
1210 /* Virtual address of the RX DMA descriptors array */
1211 struct mvpp2_rx_desc *descs;
1212
1213 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001214 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001215
1216 /* Index of the last RX DMA descriptor */
1217 int last_desc;
1218
1219 /* Index of the next RX DMA descriptor to process */
1220 int next_desc_to_proc;
1221
1222 /* ID of port to which physical RXQ is mapped */
1223 int port;
1224
1225 /* Port's logic RXQ number to which physical RXQ is mapped */
1226 int logic_rxq;
1227};
1228
1229union mvpp2_prs_tcam_entry {
1230 u32 word[MVPP2_PRS_TCAM_WORDS];
1231 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1232};
1233
1234union mvpp2_prs_sram_entry {
1235 u32 word[MVPP2_PRS_SRAM_WORDS];
1236 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1237};
1238
1239struct mvpp2_prs_entry {
1240 u32 index;
1241 union mvpp2_prs_tcam_entry tcam;
1242 union mvpp2_prs_sram_entry sram;
1243};
1244
1245struct mvpp2_prs_shadow {
1246 bool valid;
1247 bool finish;
1248
1249 /* Lookup ID */
1250 int lu;
1251
1252 /* User defined offset */
1253 int udf;
1254
1255 /* Result info */
1256 u32 ri;
1257 u32 ri_mask;
1258};
1259
1260struct mvpp2_cls_flow_entry {
1261 u32 index;
1262 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1263};
1264
1265struct mvpp2_cls_lookup_entry {
1266 u32 lkpid;
1267 u32 way;
1268 u32 data;
1269};
1270
1271struct mvpp2_bm_pool {
1272 /* Pool number in the range 0-7 */
1273 int id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001274
1275 /* Buffer Pointers Pool External (BPPE) size */
1276 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001277 /* BPPE size in bytes */
1278 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001279 /* Number of buffers for this pool */
1280 int buf_num;
1281 /* Pool buffer size */
1282 int buf_size;
1283 /* Packet size */
1284 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001285 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001286
1287 /* BPPE virtual base address */
1288 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001289 /* BPPE DMA base address */
1290 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001291
1292 /* Ports using BM pool */
1293 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001294};
1295
Antoine Tenart20920262017-10-23 15:24:30 +02001296#define IS_TSO_HEADER(txq_pcpu, addr) \
1297 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1298 (addr) < (txq_pcpu)->tso_headers_dma + \
1299 (txq_pcpu)->size * TSO_HEADER_SIZE)
1300
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001301/* Queue modes */
1302#define MVPP2_QDIST_SINGLE_MODE 0
1303#define MVPP2_QDIST_MULTI_MODE 1
1304
1305static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1306
1307module_param(queue_mode, int, 0444);
1308MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1309
Marcin Wojtas3f518502014-07-10 16:52:13 -03001310#define MVPP2_DRIVER_NAME "mvpp2"
1311#define MVPP2_DRIVER_VERSION "1.0"
1312
1313/* Utility/helper methods */
1314
1315static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1316{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001317 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001318}
1319
1320static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1321{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001322 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001323}
1324
1325/* These accessors should be used to access:
1326 *
1327 * - per-CPU registers, where each CPU has its own copy of the
1328 * register.
1329 *
1330 * MVPP2_BM_VIRT_ALLOC_REG
1331 * MVPP2_BM_ADDR_HIGH_ALLOC
1332 * MVPP22_BM_ADDR_HIGH_RLS_REG
1333 * MVPP2_BM_VIRT_RLS_REG
1334 * MVPP2_ISR_RX_TX_CAUSE_REG
1335 * MVPP2_ISR_RX_TX_MASK_REG
1336 * MVPP2_TXQ_NUM_REG
1337 * MVPP2_AGGR_TXQ_UPDATE_REG
1338 * MVPP2_TXQ_RSVD_REQ_REG
1339 * MVPP2_TXQ_RSVD_RSLT_REG
1340 * MVPP2_TXQ_SENT_REG
1341 * MVPP2_RXQ_NUM_REG
1342 *
1343 * - global registers that must be accessed through a specific CPU
1344 * window, because they are related to an access to a per-CPU
1345 * register
1346 *
1347 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1348 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1349 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1350 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1351 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1352 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1353 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1354 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1355 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1356 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1357 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1358 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1359 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1360 */
1361static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1362 u32 offset, u32 data)
1363{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001364 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001365}
1366
1367static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1368 u32 offset)
1369{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001370 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001371}
1372
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001373static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1374 struct mvpp2_tx_desc *tx_desc)
1375{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001376 if (port->priv->hw_version == MVPP21)
1377 return tx_desc->pp21.buf_dma_addr;
1378 else
1379 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001380}
1381
1382static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1383 struct mvpp2_tx_desc *tx_desc,
1384 dma_addr_t dma_addr)
1385{
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001386 dma_addr_t addr, offset;
1387
1388 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1389 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1390
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001391 if (port->priv->hw_version == MVPP21) {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001392 tx_desc->pp21.buf_dma_addr = addr;
1393 tx_desc->pp21.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001394 } else {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001395 u64 val = (u64)addr;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001396
1397 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1398 tx_desc->pp22.buf_dma_addr_ptp |= val;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001399 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001400 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001401}
1402
1403static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1404 struct mvpp2_tx_desc *tx_desc)
1405{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001406 if (port->priv->hw_version == MVPP21)
1407 return tx_desc->pp21.data_size;
1408 else
1409 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001410}
1411
1412static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1413 struct mvpp2_tx_desc *tx_desc,
1414 size_t size)
1415{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001416 if (port->priv->hw_version == MVPP21)
1417 tx_desc->pp21.data_size = size;
1418 else
1419 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001420}
1421
1422static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1423 struct mvpp2_tx_desc *tx_desc,
1424 unsigned int txq)
1425{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001426 if (port->priv->hw_version == MVPP21)
1427 tx_desc->pp21.phys_txq = txq;
1428 else
1429 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001430}
1431
1432static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1433 struct mvpp2_tx_desc *tx_desc,
1434 unsigned int command)
1435{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001436 if (port->priv->hw_version == MVPP21)
1437 tx_desc->pp21.command = command;
1438 else
1439 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001440}
1441
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001442static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1443 struct mvpp2_tx_desc *tx_desc)
1444{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001445 if (port->priv->hw_version == MVPP21)
1446 return tx_desc->pp21.packet_offset;
1447 else
1448 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001449}
1450
1451static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1452 struct mvpp2_rx_desc *rx_desc)
1453{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001454 if (port->priv->hw_version == MVPP21)
1455 return rx_desc->pp21.buf_dma_addr;
1456 else
1457 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001458}
1459
1460static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1461 struct mvpp2_rx_desc *rx_desc)
1462{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001463 if (port->priv->hw_version == MVPP21)
1464 return rx_desc->pp21.buf_cookie;
1465 else
1466 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001467}
1468
1469static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1470 struct mvpp2_rx_desc *rx_desc)
1471{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001472 if (port->priv->hw_version == MVPP21)
1473 return rx_desc->pp21.data_size;
1474 else
1475 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001476}
1477
1478static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1479 struct mvpp2_rx_desc *rx_desc)
1480{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001481 if (port->priv->hw_version == MVPP21)
1482 return rx_desc->pp21.status;
1483 else
1484 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001485}
1486
Marcin Wojtas3f518502014-07-10 16:52:13 -03001487static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1488{
1489 txq_pcpu->txq_get_index++;
1490 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1491 txq_pcpu->txq_get_index = 0;
1492}
1493
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001494static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1495 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001496 struct sk_buff *skb,
1497 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001498{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001499 struct mvpp2_txq_pcpu_buf *tx_buf =
1500 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1501 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001502 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1503 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1504 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001505 txq_pcpu->txq_put_index++;
1506 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1507 txq_pcpu->txq_put_index = 0;
1508}
1509
1510/* Get number of physical egress port */
1511static inline int mvpp2_egress_port(struct mvpp2_port *port)
1512{
1513 return MVPP2_MAX_TCONT + port->id;
1514}
1515
1516/* Get number of physical TXQ */
1517static inline int mvpp2_txq_phys(int port, int txq)
1518{
1519 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1520}
1521
1522/* Parser configuration routines */
1523
1524/* Update parser tcam and sram hw entries */
1525static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1526{
1527 int i;
1528
1529 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1530 return -EINVAL;
1531
1532 /* Clear entry invalidation bit */
1533 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1534
1535 /* Write tcam index - indirect access */
1536 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1537 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1538 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1539
1540 /* Write sram index - indirect access */
1541 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1542 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1543 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1544
1545 return 0;
1546}
1547
1548/* Read tcam entry from hw */
1549static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1550{
1551 int i;
1552
1553 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1554 return -EINVAL;
1555
1556 /* Write tcam index - indirect access */
1557 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1558
1559 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1560 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1561 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1562 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1563
1564 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1565 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1566
1567 /* Write sram index - indirect access */
1568 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1569 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1570 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1571
1572 return 0;
1573}
1574
1575/* Invalidate tcam hw entry */
1576static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1577{
1578 /* Write index - indirect access */
1579 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1580 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1581 MVPP2_PRS_TCAM_INV_MASK);
1582}
1583
1584/* Enable shadow table entry and set its lookup ID */
1585static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1586{
1587 priv->prs_shadow[index].valid = true;
1588 priv->prs_shadow[index].lu = lu;
1589}
1590
1591/* Update ri fields in shadow table entry */
1592static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1593 unsigned int ri, unsigned int ri_mask)
1594{
1595 priv->prs_shadow[index].ri_mask = ri_mask;
1596 priv->prs_shadow[index].ri = ri;
1597}
1598
1599/* Update lookup field in tcam sw entry */
1600static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1601{
1602 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1603
1604 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1605 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1606}
1607
1608/* Update mask for single port in tcam sw entry */
1609static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1610 unsigned int port, bool add)
1611{
1612 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1613
1614 if (add)
1615 pe->tcam.byte[enable_off] &= ~(1 << port);
1616 else
1617 pe->tcam.byte[enable_off] |= 1 << port;
1618}
1619
1620/* Update port map in tcam sw entry */
1621static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1622 unsigned int ports)
1623{
1624 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1625 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1626
1627 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1628 pe->tcam.byte[enable_off] &= ~port_mask;
1629 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1630}
1631
1632/* Obtain port map from tcam sw entry */
1633static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1634{
1635 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1636
1637 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1638}
1639
1640/* Set byte of data and its enable bits in tcam sw entry */
1641static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1642 unsigned int offs, unsigned char byte,
1643 unsigned char enable)
1644{
1645 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1646 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1647}
1648
1649/* Get byte of data and its enable bits from tcam sw entry */
1650static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1651 unsigned int offs, unsigned char *byte,
1652 unsigned char *enable)
1653{
1654 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1655 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1656}
1657
1658/* Compare tcam data bytes with a pattern */
1659static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1660 u16 data)
1661{
1662 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1663 u16 tcam_data;
1664
Antoine Tenartef4816f2017-10-24 11:41:26 +02001665 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
Marcin Wojtas3f518502014-07-10 16:52:13 -03001666 if (tcam_data != data)
1667 return false;
1668 return true;
1669}
1670
1671/* Update ai bits in tcam sw entry */
1672static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1673 unsigned int bits, unsigned int enable)
1674{
1675 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1676
1677 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1678
1679 if (!(enable & BIT(i)))
1680 continue;
1681
1682 if (bits & BIT(i))
1683 pe->tcam.byte[ai_idx] |= 1 << i;
1684 else
1685 pe->tcam.byte[ai_idx] &= ~(1 << i);
1686 }
1687
1688 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1689}
1690
1691/* Get ai bits from tcam sw entry */
1692static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1693{
1694 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1695}
1696
1697/* Set ethertype in tcam sw entry */
1698static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1699 unsigned short ethertype)
1700{
1701 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1702 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1703}
1704
Maxime Chevallier56beda32018-02-28 10:14:13 +01001705/* Set vid in tcam sw entry */
1706static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1707 unsigned short vid)
1708{
1709 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1710 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1711}
1712
Marcin Wojtas3f518502014-07-10 16:52:13 -03001713/* Set bits in sram sw entry */
1714static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1715 int val)
1716{
1717 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1718}
1719
1720/* Clear bits in sram sw entry */
1721static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1722 int val)
1723{
1724 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1725}
1726
1727/* Update ri bits in sram sw entry */
1728static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1729 unsigned int bits, unsigned int mask)
1730{
1731 unsigned int i;
1732
1733 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1734 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1735
1736 if (!(mask & BIT(i)))
1737 continue;
1738
1739 if (bits & BIT(i))
1740 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1741 else
1742 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1743
1744 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1745 }
1746}
1747
1748/* Obtain ri bits from sram sw entry */
1749static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1750{
1751 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1752}
1753
1754/* Update ai bits in sram sw entry */
1755static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1756 unsigned int bits, unsigned int mask)
1757{
1758 unsigned int i;
1759 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1760
1761 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1762
1763 if (!(mask & BIT(i)))
1764 continue;
1765
1766 if (bits & BIT(i))
1767 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1768 else
1769 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1770
1771 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1772 }
1773}
1774
1775/* Read ai bits from sram sw entry */
1776static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1777{
1778 u8 bits;
1779 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1780 int ai_en_off = ai_off + 1;
1781 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1782
1783 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1784 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1785
1786 return bits;
1787}
1788
1789/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1790 * lookup interation
1791 */
1792static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1793 unsigned int lu)
1794{
1795 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1796
1797 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1798 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1799 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1800}
1801
1802/* In the sram sw entry set sign and value of the next lookup offset
1803 * and the offset value generated to the classifier
1804 */
1805static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1806 unsigned int op)
1807{
1808 /* Set sign */
1809 if (shift < 0) {
1810 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1811 shift = 0 - shift;
1812 } else {
1813 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1814 }
1815
1816 /* Set value */
1817 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1818 (unsigned char)shift;
1819
1820 /* Reset and set operation */
1821 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1822 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1823 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1824
1825 /* Set base offset as current */
1826 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1827}
1828
1829/* In the sram sw entry set sign and value of the user defined offset
1830 * generated to the classifier
1831 */
1832static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1833 unsigned int type, int offset,
1834 unsigned int op)
1835{
1836 /* Set sign */
1837 if (offset < 0) {
1838 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1839 offset = 0 - offset;
1840 } else {
1841 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1842 }
1843
1844 /* Set value */
1845 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1846 MVPP2_PRS_SRAM_UDF_MASK);
1847 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1848 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1849 MVPP2_PRS_SRAM_UDF_BITS)] &=
1850 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1851 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1852 MVPP2_PRS_SRAM_UDF_BITS)] |=
1853 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1854
1855 /* Set offset type */
1856 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1857 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1858 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1859
1860 /* Set offset operation */
1861 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1862 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1863 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1864
1865 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1866 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1867 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1868 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1869
1870 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1871 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1872 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1873
1874 /* Set base offset as current */
1875 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1876}
1877
1878/* Find parser flow entry */
1879static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1880{
1881 struct mvpp2_prs_entry *pe;
1882 int tid;
1883
1884 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1885 if (!pe)
1886 return NULL;
1887 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1888
1889 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1890 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1891 u8 bits;
1892
1893 if (!priv->prs_shadow[tid].valid ||
1894 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1895 continue;
1896
1897 pe->index = tid;
1898 mvpp2_prs_hw_read(priv, pe);
1899 bits = mvpp2_prs_sram_ai_get(pe);
1900
1901 /* Sram store classification lookup ID in AI bits [5:0] */
1902 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1903 return pe;
1904 }
1905 kfree(pe);
1906
1907 return NULL;
1908}
1909
1910/* Return first free tcam index, seeking from start to end */
1911static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1912 unsigned char end)
1913{
1914 int tid;
1915
1916 if (start > end)
1917 swap(start, end);
1918
1919 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1920 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1921
1922 for (tid = start; tid <= end; tid++) {
1923 if (!priv->prs_shadow[tid].valid)
1924 return tid;
1925 }
1926
1927 return -EINVAL;
1928}
1929
1930/* Enable/disable dropping all mac da's */
1931static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1932{
1933 struct mvpp2_prs_entry pe;
1934
1935 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1936 /* Entry exist - update port only */
1937 pe.index = MVPP2_PE_DROP_ALL;
1938 mvpp2_prs_hw_read(priv, &pe);
1939 } else {
1940 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001941 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001942 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1943 pe.index = MVPP2_PE_DROP_ALL;
1944
1945 /* Non-promiscuous mode for all ports - DROP unknown packets */
1946 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1947 MVPP2_PRS_RI_DROP_MASK);
1948
1949 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1950 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1951
1952 /* Update shadow table */
1953 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1954
1955 /* Mask all ports */
1956 mvpp2_prs_tcam_port_map_set(&pe, 0);
1957 }
1958
1959 /* Update port mask */
1960 mvpp2_prs_tcam_port_set(&pe, port, add);
1961
1962 mvpp2_prs_hw_write(priv, &pe);
1963}
1964
1965/* Set port to promiscuous mode */
1966static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1967{
1968 struct mvpp2_prs_entry pe;
1969
Joe Perchesdbedd442015-03-06 20:49:12 -08001970 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001971
1972 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1973 /* Entry exist - update port only */
1974 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1975 mvpp2_prs_hw_read(priv, &pe);
1976 } else {
1977 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001978 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001979 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1980 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1981
1982 /* Continue - set next lookup */
1983 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1984
1985 /* Set result info bits */
1986 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1987 MVPP2_PRS_RI_L2_CAST_MASK);
1988
1989 /* Shift to ethertype */
1990 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1991 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1992
1993 /* Mask all ports */
1994 mvpp2_prs_tcam_port_map_set(&pe, 0);
1995
1996 /* Update shadow table */
1997 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1998 }
1999
2000 /* Update port mask */
2001 mvpp2_prs_tcam_port_set(&pe, port, add);
2002
2003 mvpp2_prs_hw_write(priv, &pe);
2004}
2005
2006/* Accept multicast */
2007static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
2008 bool add)
2009{
2010 struct mvpp2_prs_entry pe;
2011 unsigned char da_mc;
2012
2013 /* Ethernet multicast address first byte is
2014 * 0x01 for IPv4 and 0x33 for IPv6
2015 */
2016 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
2017
2018 if (priv->prs_shadow[index].valid) {
2019 /* Entry exist - update port only */
2020 pe.index = index;
2021 mvpp2_prs_hw_read(priv, &pe);
2022 } else {
2023 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002024 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002025 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2026 pe.index = index;
2027
2028 /* Continue - set next lookup */
2029 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2030
2031 /* Set result info bits */
2032 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
2033 MVPP2_PRS_RI_L2_CAST_MASK);
2034
2035 /* Update tcam entry data first byte */
2036 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
2037
2038 /* Shift to ethertype */
2039 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2040 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2041
2042 /* Mask all ports */
2043 mvpp2_prs_tcam_port_map_set(&pe, 0);
2044
2045 /* Update shadow table */
2046 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2047 }
2048
2049 /* Update port mask */
2050 mvpp2_prs_tcam_port_set(&pe, port, add);
2051
2052 mvpp2_prs_hw_write(priv, &pe);
2053}
2054
2055/* Set entry for dsa packets */
2056static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2057 bool tagged, bool extend)
2058{
2059 struct mvpp2_prs_entry pe;
2060 int tid, shift;
2061
2062 if (extend) {
2063 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2064 shift = 8;
2065 } else {
2066 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2067 shift = 4;
2068 }
2069
2070 if (priv->prs_shadow[tid].valid) {
2071 /* Entry exist - update port only */
2072 pe.index = tid;
2073 mvpp2_prs_hw_read(priv, &pe);
2074 } else {
2075 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002076 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002077 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2078 pe.index = tid;
2079
Marcin Wojtas3f518502014-07-10 16:52:13 -03002080 /* Update shadow table */
2081 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2082
2083 if (tagged) {
2084 /* Set tagged bit in DSA tag */
2085 mvpp2_prs_tcam_data_byte_set(&pe, 0,
Maxime Chevallier56beda32018-02-28 10:14:13 +01002086 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2087 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2088
2089 /* Set ai bits for next iteration */
2090 if (extend)
2091 mvpp2_prs_sram_ai_update(&pe, 1,
2092 MVPP2_PRS_SRAM_AI_MASK);
2093 else
2094 mvpp2_prs_sram_ai_update(&pe, 0,
2095 MVPP2_PRS_SRAM_AI_MASK);
2096
2097 /* If packet is tagged continue check vid filtering */
2098 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002099 } else {
Maxime Chevallier56beda32018-02-28 10:14:13 +01002100 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2101 mvpp2_prs_sram_shift_set(&pe, shift,
2102 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2103
Marcin Wojtas3f518502014-07-10 16:52:13 -03002104 /* Set result info bits to 'no vlans' */
2105 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2106 MVPP2_PRS_RI_VLAN_MASK);
2107 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2108 }
2109
2110 /* Mask all ports */
2111 mvpp2_prs_tcam_port_map_set(&pe, 0);
2112 }
2113
2114 /* Update port mask */
2115 mvpp2_prs_tcam_port_set(&pe, port, add);
2116
2117 mvpp2_prs_hw_write(priv, &pe);
2118}
2119
2120/* Set entry for dsa ethertype */
2121static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2122 bool add, bool tagged, bool extend)
2123{
2124 struct mvpp2_prs_entry pe;
2125 int tid, shift, port_mask;
2126
2127 if (extend) {
2128 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2129 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2130 port_mask = 0;
2131 shift = 8;
2132 } else {
2133 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2134 MVPP2_PE_ETYPE_DSA_UNTAGGED;
2135 port_mask = MVPP2_PRS_PORT_MASK;
2136 shift = 4;
2137 }
2138
2139 if (priv->prs_shadow[tid].valid) {
2140 /* Entry exist - update port only */
2141 pe.index = tid;
2142 mvpp2_prs_hw_read(priv, &pe);
2143 } else {
2144 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002145 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002146 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2147 pe.index = tid;
2148
2149 /* Set ethertype */
2150 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2151 mvpp2_prs_match_etype(&pe, 2, 0);
2152
2153 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2154 MVPP2_PRS_RI_DSA_MASK);
2155 /* Shift ethertype + 2 byte reserved + tag*/
2156 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2157 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2158
2159 /* Update shadow table */
2160 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2161
2162 if (tagged) {
2163 /* Set tagged bit in DSA tag */
2164 mvpp2_prs_tcam_data_byte_set(&pe,
2165 MVPP2_ETH_TYPE_LEN + 2 + 3,
2166 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2167 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2168 /* Clear all ai bits for next iteration */
2169 mvpp2_prs_sram_ai_update(&pe, 0,
2170 MVPP2_PRS_SRAM_AI_MASK);
2171 /* If packet is tagged continue check vlans */
2172 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2173 } else {
2174 /* Set result info bits to 'no vlans' */
2175 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2176 MVPP2_PRS_RI_VLAN_MASK);
2177 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2178 }
2179 /* Mask/unmask all ports, depending on dsa type */
2180 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2181 }
2182
2183 /* Update port mask */
2184 mvpp2_prs_tcam_port_set(&pe, port, add);
2185
2186 mvpp2_prs_hw_write(priv, &pe);
2187}
2188
2189/* Search for existing single/triple vlan entry */
2190static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2191 unsigned short tpid, int ai)
2192{
2193 struct mvpp2_prs_entry *pe;
2194 int tid;
2195
2196 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2197 if (!pe)
2198 return NULL;
2199 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2200
2201 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2202 for (tid = MVPP2_PE_FIRST_FREE_TID;
2203 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2204 unsigned int ri_bits, ai_bits;
2205 bool match;
2206
2207 if (!priv->prs_shadow[tid].valid ||
2208 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2209 continue;
2210
2211 pe->index = tid;
2212
2213 mvpp2_prs_hw_read(priv, pe);
2214 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2215 if (!match)
2216 continue;
2217
2218 /* Get vlan type */
2219 ri_bits = mvpp2_prs_sram_ri_get(pe);
2220 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2221
2222 /* Get current ai value from tcam */
2223 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2224 /* Clear double vlan bit */
2225 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2226
2227 if (ai != ai_bits)
2228 continue;
2229
2230 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2231 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2232 return pe;
2233 }
2234 kfree(pe);
2235
2236 return NULL;
2237}
2238
2239/* Add/update single/triple vlan entry */
2240static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2241 unsigned int port_map)
2242{
2243 struct mvpp2_prs_entry *pe;
2244 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302245 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002246
2247 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2248
2249 if (!pe) {
2250 /* Create new tcam entry */
2251 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2252 MVPP2_PE_FIRST_FREE_TID);
2253 if (tid < 0)
2254 return tid;
2255
2256 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2257 if (!pe)
2258 return -ENOMEM;
2259
2260 /* Get last double vlan tid */
2261 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2262 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2263 unsigned int ri_bits;
2264
2265 if (!priv->prs_shadow[tid_aux].valid ||
2266 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2267 continue;
2268
2269 pe->index = tid_aux;
2270 mvpp2_prs_hw_read(priv, pe);
2271 ri_bits = mvpp2_prs_sram_ri_get(pe);
2272 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2273 MVPP2_PRS_RI_VLAN_DOUBLE)
2274 break;
2275 }
2276
Sudip Mukherjee43737472014-11-01 16:59:34 +05302277 if (tid <= tid_aux) {
2278 ret = -EINVAL;
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002279 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302280 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002281
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002282 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002283 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2284 pe->index = tid;
2285
2286 mvpp2_prs_match_etype(pe, 0, tpid);
2287
Maxime Chevallier56beda32018-02-28 10:14:13 +01002288 /* VLAN tag detected, proceed with VID filtering */
2289 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VID);
2290
Marcin Wojtas3f518502014-07-10 16:52:13 -03002291 /* Clear all ai bits for next iteration */
2292 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2293
2294 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2295 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2296 MVPP2_PRS_RI_VLAN_MASK);
2297 } else {
2298 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2299 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2300 MVPP2_PRS_RI_VLAN_MASK);
2301 }
2302 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2303
2304 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2305 }
2306 /* Update ports' mask */
2307 mvpp2_prs_tcam_port_map_set(pe, port_map);
2308
2309 mvpp2_prs_hw_write(priv, pe);
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002310free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002311 kfree(pe);
2312
Sudip Mukherjee43737472014-11-01 16:59:34 +05302313 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002314}
2315
2316/* Get first free double vlan ai number */
2317static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2318{
2319 int i;
2320
2321 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2322 if (!priv->prs_double_vlans[i])
2323 return i;
2324 }
2325
2326 return -EINVAL;
2327}
2328
2329/* Search for existing double vlan entry */
2330static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2331 unsigned short tpid1,
2332 unsigned short tpid2)
2333{
2334 struct mvpp2_prs_entry *pe;
2335 int tid;
2336
2337 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2338 if (!pe)
2339 return NULL;
2340 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2341
2342 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2343 for (tid = MVPP2_PE_FIRST_FREE_TID;
2344 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2345 unsigned int ri_mask;
2346 bool match;
2347
2348 if (!priv->prs_shadow[tid].valid ||
2349 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2350 continue;
2351
2352 pe->index = tid;
2353 mvpp2_prs_hw_read(priv, pe);
2354
2355 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2356 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2357
2358 if (!match)
2359 continue;
2360
2361 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2362 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2363 return pe;
2364 }
2365 kfree(pe);
2366
2367 return NULL;
2368}
2369
2370/* Add or update double vlan entry */
2371static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2372 unsigned short tpid2,
2373 unsigned int port_map)
2374{
2375 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302376 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002377
2378 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2379
2380 if (!pe) {
2381 /* Create new tcam entry */
2382 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2383 MVPP2_PE_LAST_FREE_TID);
2384 if (tid < 0)
2385 return tid;
2386
2387 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2388 if (!pe)
2389 return -ENOMEM;
2390
2391 /* Set ai value for new double vlan entry */
2392 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302393 if (ai < 0) {
2394 ret = ai;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002395 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302396 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002397
2398 /* Get first single/triple vlan tid */
2399 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2400 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2401 unsigned int ri_bits;
2402
2403 if (!priv->prs_shadow[tid_aux].valid ||
2404 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2405 continue;
2406
2407 pe->index = tid_aux;
2408 mvpp2_prs_hw_read(priv, pe);
2409 ri_bits = mvpp2_prs_sram_ri_get(pe);
2410 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2411 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2412 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2413 break;
2414 }
2415
Sudip Mukherjee43737472014-11-01 16:59:34 +05302416 if (tid >= tid_aux) {
2417 ret = -ERANGE;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002418 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302419 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002420
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002421 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002422 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2423 pe->index = tid;
2424
2425 priv->prs_double_vlans[ai] = true;
2426
2427 mvpp2_prs_match_etype(pe, 0, tpid1);
2428 mvpp2_prs_match_etype(pe, 4, tpid2);
2429
2430 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
Maxime Chevallier56beda32018-02-28 10:14:13 +01002431 /* Shift 4 bytes - skip outer vlan tag */
2432 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002433 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2434 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2435 MVPP2_PRS_RI_VLAN_MASK);
2436 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2437 MVPP2_PRS_SRAM_AI_MASK);
2438
2439 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2440 }
2441
2442 /* Update ports' mask */
2443 mvpp2_prs_tcam_port_map_set(pe, port_map);
2444 mvpp2_prs_hw_write(priv, pe);
Markus Elfringc9a7e122017-04-17 13:03:49 +02002445free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002446 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302447 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002448}
2449
2450/* IPv4 header parsing for fragmentation and L4 offset */
2451static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2452 unsigned int ri, unsigned int ri_mask)
2453{
2454 struct mvpp2_prs_entry pe;
2455 int tid;
2456
2457 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2458 (proto != IPPROTO_IGMP))
2459 return -EINVAL;
2460
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002461 /* Not fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002462 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2463 MVPP2_PE_LAST_FREE_TID);
2464 if (tid < 0)
2465 return tid;
2466
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002467 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002468 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2469 pe.index = tid;
2470
2471 /* Set next lu to IPv4 */
2472 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2473 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2474 /* Set L4 offset */
2475 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2476 sizeof(struct iphdr) - 4,
2477 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2478 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2479 MVPP2_PRS_IPV4_DIP_AI_BIT);
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002480 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2481
2482 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2483 MVPP2_PRS_TCAM_PROTO_MASK_L);
2484 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2485 MVPP2_PRS_TCAM_PROTO_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002486
2487 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2488 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2489 /* Unmask all ports */
2490 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2491
2492 /* Update shadow table and hw entry */
2493 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2494 mvpp2_prs_hw_write(priv, &pe);
2495
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002496 /* Fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002497 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2498 MVPP2_PE_LAST_FREE_TID);
2499 if (tid < 0)
2500 return tid;
2501
2502 pe.index = tid;
2503 /* Clear ri before updating */
2504 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2505 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2506 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2507
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002508 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2509 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2510
2511 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2512 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002513
2514 /* Update shadow table and hw entry */
2515 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2516 mvpp2_prs_hw_write(priv, &pe);
2517
2518 return 0;
2519}
2520
2521/* IPv4 L3 multicast or broadcast */
2522static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2523{
2524 struct mvpp2_prs_entry pe;
2525 int mask, tid;
2526
2527 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2528 MVPP2_PE_LAST_FREE_TID);
2529 if (tid < 0)
2530 return tid;
2531
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002532 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002533 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2534 pe.index = tid;
2535
2536 switch (l3_cast) {
2537 case MVPP2_PRS_L3_MULTI_CAST:
2538 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2539 MVPP2_PRS_IPV4_MC_MASK);
2540 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2541 MVPP2_PRS_RI_L3_ADDR_MASK);
2542 break;
2543 case MVPP2_PRS_L3_BROAD_CAST:
2544 mask = MVPP2_PRS_IPV4_BC_MASK;
2545 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2546 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2547 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2548 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2549 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2550 MVPP2_PRS_RI_L3_ADDR_MASK);
2551 break;
2552 default:
2553 return -EINVAL;
2554 }
2555
2556 /* Finished: go to flowid generation */
2557 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2558 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2559
2560 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2561 MVPP2_PRS_IPV4_DIP_AI_BIT);
2562 /* Unmask all ports */
2563 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2564
2565 /* Update shadow table and hw entry */
2566 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2567 mvpp2_prs_hw_write(priv, &pe);
2568
2569 return 0;
2570}
2571
2572/* Set entries for protocols over IPv6 */
2573static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2574 unsigned int ri, unsigned int ri_mask)
2575{
2576 struct mvpp2_prs_entry pe;
2577 int tid;
2578
2579 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2580 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2581 return -EINVAL;
2582
2583 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2584 MVPP2_PE_LAST_FREE_TID);
2585 if (tid < 0)
2586 return tid;
2587
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002588 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002589 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2590 pe.index = tid;
2591
2592 /* Finished: go to flowid generation */
2593 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2594 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2595 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2596 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2597 sizeof(struct ipv6hdr) - 6,
2598 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2599
2600 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2601 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2602 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2603 /* Unmask all ports */
2604 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2605
2606 /* Write HW */
2607 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2608 mvpp2_prs_hw_write(priv, &pe);
2609
2610 return 0;
2611}
2612
2613/* IPv6 L3 multicast entry */
2614static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2615{
2616 struct mvpp2_prs_entry pe;
2617 int tid;
2618
2619 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2620 return -EINVAL;
2621
2622 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2623 MVPP2_PE_LAST_FREE_TID);
2624 if (tid < 0)
2625 return tid;
2626
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002627 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002628 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2629 pe.index = tid;
2630
2631 /* Finished: go to flowid generation */
2632 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2633 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2634 MVPP2_PRS_RI_L3_ADDR_MASK);
2635 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2636 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2637 /* Shift back to IPv6 NH */
2638 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2639
2640 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2641 MVPP2_PRS_IPV6_MC_MASK);
2642 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2643 /* Unmask all ports */
2644 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2645
2646 /* Update shadow table and hw entry */
2647 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2648 mvpp2_prs_hw_write(priv, &pe);
2649
2650 return 0;
2651}
2652
2653/* Parser per-port initialization */
2654static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2655 int lu_max, int offset)
2656{
2657 u32 val;
2658
2659 /* Set lookup ID */
2660 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2661 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2662 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2663 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2664
2665 /* Set maximum number of loops for packet received from port */
2666 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2667 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2668 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2669 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2670
2671 /* Set initial offset for packet header extraction for the first
2672 * searching loop
2673 */
2674 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2675 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2676 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2677 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2678}
2679
2680/* Default flow entries initialization for all ports */
2681static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2682{
2683 struct mvpp2_prs_entry pe;
2684 int port;
2685
2686 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002687 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002688 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2689 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2690
2691 /* Mask all ports */
2692 mvpp2_prs_tcam_port_map_set(&pe, 0);
2693
2694 /* Set flow ID*/
2695 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2696 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2697
2698 /* Update shadow table and hw entry */
2699 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2700 mvpp2_prs_hw_write(priv, &pe);
2701 }
2702}
2703
2704/* Set default entry for Marvell Header field */
2705static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2706{
2707 struct mvpp2_prs_entry pe;
2708
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002709 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002710
2711 pe.index = MVPP2_PE_MH_DEFAULT;
2712 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2713 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2714 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2715 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2716
2717 /* Unmask all ports */
2718 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2719
2720 /* Update shadow table and hw entry */
2721 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2722 mvpp2_prs_hw_write(priv, &pe);
2723}
2724
2725/* Set default entires (place holder) for promiscuous, non-promiscuous and
2726 * multicast MAC addresses
2727 */
2728static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2729{
2730 struct mvpp2_prs_entry pe;
2731
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002732 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002733
2734 /* Non-promiscuous mode for all ports - DROP unknown packets */
2735 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2736 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2737
2738 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2739 MVPP2_PRS_RI_DROP_MASK);
2740 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2741 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2742
2743 /* Unmask all ports */
2744 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2745
2746 /* Update shadow table and hw entry */
2747 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2748 mvpp2_prs_hw_write(priv, &pe);
2749
2750 /* place holders only - no ports */
2751 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2752 mvpp2_prs_mac_promisc_set(priv, 0, false);
Antoine Tenart20746d72017-10-24 11:41:27 +02002753 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
2754 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002755}
2756
2757/* Set default entries for various types of dsa packets */
2758static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2759{
2760 struct mvpp2_prs_entry pe;
2761
2762 /* None tagged EDSA entry - place holder */
2763 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2764 MVPP2_PRS_EDSA);
2765
2766 /* Tagged EDSA entry - place holder */
2767 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2768
2769 /* None tagged DSA entry - place holder */
2770 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2771 MVPP2_PRS_DSA);
2772
2773 /* Tagged DSA entry - place holder */
2774 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2775
2776 /* None tagged EDSA ethertype entry - place holder*/
2777 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2778 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2779
2780 /* Tagged EDSA ethertype entry - place holder*/
2781 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2782 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2783
2784 /* None tagged DSA ethertype entry */
2785 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2786 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2787
2788 /* Tagged DSA ethertype entry */
2789 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2790 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2791
2792 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002793 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002794 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2795 pe.index = MVPP2_PE_DSA_DEFAULT;
2796 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2797
2798 /* Shift 0 bytes */
2799 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2800 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2801
2802 /* Clear all sram ai bits for next iteration */
2803 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2804
2805 /* Unmask all ports */
2806 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2807
2808 mvpp2_prs_hw_write(priv, &pe);
2809}
2810
Maxime Chevallier56beda32018-02-28 10:14:13 +01002811/* Initialize parser entries for VID filtering */
2812static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2813{
2814 struct mvpp2_prs_entry pe;
2815
2816 memset(&pe, 0, sizeof(pe));
2817
2818 /* Set default vid entry */
2819 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2820 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2821
2822 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2823
2824 /* Skip VLAN header - Set offset to 4 bytes */
2825 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2826 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2827
2828 /* Clear all ai bits for next iteration */
2829 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2830
2831 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2832
2833 /* Unmask all ports */
2834 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2835
2836 /* Update shadow table and hw entry */
2837 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2838 mvpp2_prs_hw_write(priv, &pe);
2839
2840 /* Set default vid entry for extended DSA*/
2841 memset(&pe, 0, sizeof(pe));
2842
2843 /* Set default vid entry */
2844 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2845 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2846
2847 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2848 MVPP2_PRS_EDSA_VID_AI_BIT);
2849
2850 /* Skip VLAN header - Set offset to 8 bytes */
2851 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2852 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2853
2854 /* Clear all ai bits for next iteration */
2855 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2856
2857 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2858
2859 /* Unmask all ports */
2860 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2861
2862 /* Update shadow table and hw entry */
2863 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2864 mvpp2_prs_hw_write(priv, &pe);
2865}
2866
Marcin Wojtas3f518502014-07-10 16:52:13 -03002867/* Match basic ethertypes */
2868static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2869{
2870 struct mvpp2_prs_entry pe;
2871 int tid;
2872
2873 /* Ethertype: PPPoE */
2874 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2875 MVPP2_PE_LAST_FREE_TID);
2876 if (tid < 0)
2877 return tid;
2878
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002879 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002880 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2881 pe.index = tid;
2882
2883 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2884
2885 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2886 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2887 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2888 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2889 MVPP2_PRS_RI_PPPOE_MASK);
2890
2891 /* Update shadow table and hw entry */
2892 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2893 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2894 priv->prs_shadow[pe.index].finish = false;
2895 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2896 MVPP2_PRS_RI_PPPOE_MASK);
2897 mvpp2_prs_hw_write(priv, &pe);
2898
2899 /* Ethertype: ARP */
2900 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2901 MVPP2_PE_LAST_FREE_TID);
2902 if (tid < 0)
2903 return tid;
2904
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002905 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002906 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2907 pe.index = tid;
2908
2909 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2910
2911 /* Generate flow in the next iteration*/
2912 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2913 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2914 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2915 MVPP2_PRS_RI_L3_PROTO_MASK);
2916 /* Set L3 offset */
2917 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2918 MVPP2_ETH_TYPE_LEN,
2919 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2920
2921 /* Update shadow table and hw entry */
2922 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2923 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2924 priv->prs_shadow[pe.index].finish = true;
2925 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2926 MVPP2_PRS_RI_L3_PROTO_MASK);
2927 mvpp2_prs_hw_write(priv, &pe);
2928
2929 /* Ethertype: LBTD */
2930 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2931 MVPP2_PE_LAST_FREE_TID);
2932 if (tid < 0)
2933 return tid;
2934
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002935 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002936 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2937 pe.index = tid;
2938
2939 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2940
2941 /* Generate flow in the next iteration*/
2942 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2943 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2944 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2945 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2946 MVPP2_PRS_RI_CPU_CODE_MASK |
2947 MVPP2_PRS_RI_UDF3_MASK);
2948 /* Set L3 offset */
2949 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2950 MVPP2_ETH_TYPE_LEN,
2951 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2952
2953 /* Update shadow table and hw entry */
2954 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2955 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2956 priv->prs_shadow[pe.index].finish = true;
2957 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2958 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2959 MVPP2_PRS_RI_CPU_CODE_MASK |
2960 MVPP2_PRS_RI_UDF3_MASK);
2961 mvpp2_prs_hw_write(priv, &pe);
2962
2963 /* Ethertype: IPv4 without options */
2964 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2965 MVPP2_PE_LAST_FREE_TID);
2966 if (tid < 0)
2967 return tid;
2968
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002969 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002970 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2971 pe.index = tid;
2972
2973 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2974 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2975 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2976 MVPP2_PRS_IPV4_HEAD_MASK |
2977 MVPP2_PRS_IPV4_IHL_MASK);
2978
2979 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2980 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2981 MVPP2_PRS_RI_L3_PROTO_MASK);
2982 /* Skip eth_type + 4 bytes of IP header */
2983 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2984 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2985 /* Set L3 offset */
2986 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2987 MVPP2_ETH_TYPE_LEN,
2988 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2989
2990 /* Update shadow table and hw entry */
2991 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2992 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2993 priv->prs_shadow[pe.index].finish = false;
2994 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2995 MVPP2_PRS_RI_L3_PROTO_MASK);
2996 mvpp2_prs_hw_write(priv, &pe);
2997
2998 /* Ethertype: IPv4 with options */
2999 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3000 MVPP2_PE_LAST_FREE_TID);
3001 if (tid < 0)
3002 return tid;
3003
3004 pe.index = tid;
3005
3006 /* Clear tcam data before updating */
3007 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
3008 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
3009
3010 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3011 MVPP2_PRS_IPV4_HEAD,
3012 MVPP2_PRS_IPV4_HEAD_MASK);
3013
3014 /* Clear ri before updating */
3015 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3016 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3017 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3018 MVPP2_PRS_RI_L3_PROTO_MASK);
3019
3020 /* Update shadow table and hw entry */
3021 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3022 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3023 priv->prs_shadow[pe.index].finish = false;
3024 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3025 MVPP2_PRS_RI_L3_PROTO_MASK);
3026 mvpp2_prs_hw_write(priv, &pe);
3027
3028 /* Ethertype: IPv6 without options */
3029 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3030 MVPP2_PE_LAST_FREE_TID);
3031 if (tid < 0)
3032 return tid;
3033
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003034 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003035 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3036 pe.index = tid;
3037
3038 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3039
3040 /* Skip DIP of IPV6 header */
3041 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3042 MVPP2_MAX_L3_ADDR_SIZE,
3043 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3044 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3045 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3046 MVPP2_PRS_RI_L3_PROTO_MASK);
3047 /* Set L3 offset */
3048 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3049 MVPP2_ETH_TYPE_LEN,
3050 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3051
3052 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3053 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3054 priv->prs_shadow[pe.index].finish = false;
3055 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3056 MVPP2_PRS_RI_L3_PROTO_MASK);
3057 mvpp2_prs_hw_write(priv, &pe);
3058
3059 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3060 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3061 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3062 pe.index = MVPP2_PE_ETH_TYPE_UN;
3063
3064 /* Unmask all ports */
3065 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3066
3067 /* Generate flow in the next iteration*/
3068 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3069 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3070 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3071 MVPP2_PRS_RI_L3_PROTO_MASK);
3072 /* Set L3 offset even it's unknown L3 */
3073 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3074 MVPP2_ETH_TYPE_LEN,
3075 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3076
3077 /* Update shadow table and hw entry */
3078 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3079 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3080 priv->prs_shadow[pe.index].finish = true;
3081 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3082 MVPP2_PRS_RI_L3_PROTO_MASK);
3083 mvpp2_prs_hw_write(priv, &pe);
3084
3085 return 0;
3086}
3087
3088/* Configure vlan entries and detect up to 2 successive VLAN tags.
3089 * Possible options:
3090 * 0x8100, 0x88A8
3091 * 0x8100, 0x8100
3092 * 0x8100
3093 * 0x88A8
3094 */
3095static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3096{
3097 struct mvpp2_prs_entry pe;
3098 int err;
3099
3100 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3101 MVPP2_PRS_DBL_VLANS_MAX,
3102 GFP_KERNEL);
3103 if (!priv->prs_double_vlans)
3104 return -ENOMEM;
3105
3106 /* Double VLAN: 0x8100, 0x88A8 */
3107 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3108 MVPP2_PRS_PORT_MASK);
3109 if (err)
3110 return err;
3111
3112 /* Double VLAN: 0x8100, 0x8100 */
3113 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3114 MVPP2_PRS_PORT_MASK);
3115 if (err)
3116 return err;
3117
3118 /* Single VLAN: 0x88a8 */
3119 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3120 MVPP2_PRS_PORT_MASK);
3121 if (err)
3122 return err;
3123
3124 /* Single VLAN: 0x8100 */
3125 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3126 MVPP2_PRS_PORT_MASK);
3127 if (err)
3128 return err;
3129
3130 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003131 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003132 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3133 pe.index = MVPP2_PE_VLAN_DBL;
3134
Maxime Chevallier56beda32018-02-28 10:14:13 +01003135 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3136
Marcin Wojtas3f518502014-07-10 16:52:13 -03003137 /* Clear ai for next iterations */
3138 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3139 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3140 MVPP2_PRS_RI_VLAN_MASK);
3141
3142 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3143 MVPP2_PRS_DBL_VLAN_AI_BIT);
3144 /* Unmask all ports */
3145 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3146
3147 /* Update shadow table and hw entry */
3148 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3149 mvpp2_prs_hw_write(priv, &pe);
3150
3151 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003152 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003153 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3154 pe.index = MVPP2_PE_VLAN_NONE;
3155
3156 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3157 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3158 MVPP2_PRS_RI_VLAN_MASK);
3159
3160 /* Unmask all ports */
3161 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3162
3163 /* Update shadow table and hw entry */
3164 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3165 mvpp2_prs_hw_write(priv, &pe);
3166
3167 return 0;
3168}
3169
3170/* Set entries for PPPoE ethertype */
3171static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3172{
3173 struct mvpp2_prs_entry pe;
3174 int tid;
3175
3176 /* IPv4 over PPPoE with options */
3177 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3178 MVPP2_PE_LAST_FREE_TID);
3179 if (tid < 0)
3180 return tid;
3181
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003182 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003183 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3184 pe.index = tid;
3185
3186 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3187
3188 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3189 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3190 MVPP2_PRS_RI_L3_PROTO_MASK);
3191 /* Skip eth_type + 4 bytes of IP header */
3192 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3193 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3194 /* Set L3 offset */
3195 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3196 MVPP2_ETH_TYPE_LEN,
3197 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3198
3199 /* Update shadow table and hw entry */
3200 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3201 mvpp2_prs_hw_write(priv, &pe);
3202
3203 /* IPv4 over PPPoE without options */
3204 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3205 MVPP2_PE_LAST_FREE_TID);
3206 if (tid < 0)
3207 return tid;
3208
3209 pe.index = tid;
3210
3211 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3212 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3213 MVPP2_PRS_IPV4_HEAD_MASK |
3214 MVPP2_PRS_IPV4_IHL_MASK);
3215
3216 /* Clear ri before updating */
3217 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3218 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3219 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3220 MVPP2_PRS_RI_L3_PROTO_MASK);
3221
3222 /* Update shadow table and hw entry */
3223 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3224 mvpp2_prs_hw_write(priv, &pe);
3225
3226 /* IPv6 over PPPoE */
3227 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3228 MVPP2_PE_LAST_FREE_TID);
3229 if (tid < 0)
3230 return tid;
3231
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003232 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003233 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3234 pe.index = tid;
3235
3236 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3237
3238 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3239 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3240 MVPP2_PRS_RI_L3_PROTO_MASK);
3241 /* Skip eth_type + 4 bytes of IPv6 header */
3242 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3243 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3244 /* Set L3 offset */
3245 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3246 MVPP2_ETH_TYPE_LEN,
3247 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3248
3249 /* Update shadow table and hw entry */
3250 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3251 mvpp2_prs_hw_write(priv, &pe);
3252
3253 /* Non-IP over PPPoE */
3254 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3255 MVPP2_PE_LAST_FREE_TID);
3256 if (tid < 0)
3257 return tid;
3258
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003259 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003260 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3261 pe.index = tid;
3262
3263 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3264 MVPP2_PRS_RI_L3_PROTO_MASK);
3265
3266 /* Finished: go to flowid generation */
3267 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3268 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3269 /* Set L3 offset even if it's unknown L3 */
3270 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3271 MVPP2_ETH_TYPE_LEN,
3272 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3273
3274 /* Update shadow table and hw entry */
3275 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3276 mvpp2_prs_hw_write(priv, &pe);
3277
3278 return 0;
3279}
3280
3281/* Initialize entries for IPv4 */
3282static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3283{
3284 struct mvpp2_prs_entry pe;
3285 int err;
3286
3287 /* Set entries for TCP, UDP and IGMP over IPv4 */
3288 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3289 MVPP2_PRS_RI_L4_PROTO_MASK);
3290 if (err)
3291 return err;
3292
3293 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3294 MVPP2_PRS_RI_L4_PROTO_MASK);
3295 if (err)
3296 return err;
3297
3298 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3299 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3300 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3301 MVPP2_PRS_RI_CPU_CODE_MASK |
3302 MVPP2_PRS_RI_UDF3_MASK);
3303 if (err)
3304 return err;
3305
3306 /* IPv4 Broadcast */
3307 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3308 if (err)
3309 return err;
3310
3311 /* IPv4 Multicast */
3312 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3313 if (err)
3314 return err;
3315
3316 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003317 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003318 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3319 pe.index = MVPP2_PE_IP4_PROTO_UN;
3320
3321 /* Set next lu to IPv4 */
3322 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3323 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3324 /* Set L4 offset */
3325 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3326 sizeof(struct iphdr) - 4,
3327 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3328 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3329 MVPP2_PRS_IPV4_DIP_AI_BIT);
3330 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3331 MVPP2_PRS_RI_L4_PROTO_MASK);
3332
3333 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3334 /* Unmask all ports */
3335 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3336
3337 /* Update shadow table and hw entry */
3338 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3339 mvpp2_prs_hw_write(priv, &pe);
3340
3341 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003342 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003343 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3344 pe.index = MVPP2_PE_IP4_ADDR_UN;
3345
3346 /* Finished: go to flowid generation */
3347 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3348 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3349 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3350 MVPP2_PRS_RI_L3_ADDR_MASK);
3351
3352 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3353 MVPP2_PRS_IPV4_DIP_AI_BIT);
3354 /* Unmask all ports */
3355 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3356
3357 /* Update shadow table and hw entry */
3358 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3359 mvpp2_prs_hw_write(priv, &pe);
3360
3361 return 0;
3362}
3363
3364/* Initialize entries for IPv6 */
3365static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3366{
3367 struct mvpp2_prs_entry pe;
3368 int tid, err;
3369
3370 /* Set entries for TCP, UDP and ICMP over IPv6 */
3371 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3372 MVPP2_PRS_RI_L4_TCP,
3373 MVPP2_PRS_RI_L4_PROTO_MASK);
3374 if (err)
3375 return err;
3376
3377 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3378 MVPP2_PRS_RI_L4_UDP,
3379 MVPP2_PRS_RI_L4_PROTO_MASK);
3380 if (err)
3381 return err;
3382
3383 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3384 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3385 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3386 MVPP2_PRS_RI_CPU_CODE_MASK |
3387 MVPP2_PRS_RI_UDF3_MASK);
3388 if (err)
3389 return err;
3390
3391 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3392 /* Result Info: UDF7=1, DS lite */
3393 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3394 MVPP2_PRS_RI_UDF7_IP6_LITE,
3395 MVPP2_PRS_RI_UDF7_MASK);
3396 if (err)
3397 return err;
3398
3399 /* IPv6 multicast */
3400 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3401 if (err)
3402 return err;
3403
3404 /* Entry for checking hop limit */
3405 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3406 MVPP2_PE_LAST_FREE_TID);
3407 if (tid < 0)
3408 return tid;
3409
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003410 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003411 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3412 pe.index = tid;
3413
3414 /* Finished: go to flowid generation */
3415 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3416 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3417 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3418 MVPP2_PRS_RI_DROP_MASK,
3419 MVPP2_PRS_RI_L3_PROTO_MASK |
3420 MVPP2_PRS_RI_DROP_MASK);
3421
3422 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3423 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3424 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3425
3426 /* Update shadow table and hw entry */
3427 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3428 mvpp2_prs_hw_write(priv, &pe);
3429
3430 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003431 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003432 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3433 pe.index = MVPP2_PE_IP6_PROTO_UN;
3434
3435 /* Finished: go to flowid generation */
3436 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3437 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3438 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3439 MVPP2_PRS_RI_L4_PROTO_MASK);
3440 /* Set L4 offset relatively to our current place */
3441 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3442 sizeof(struct ipv6hdr) - 4,
3443 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3444
3445 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3446 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3447 /* Unmask all ports */
3448 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3449
3450 /* Update shadow table and hw entry */
3451 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3452 mvpp2_prs_hw_write(priv, &pe);
3453
3454 /* Default IPv6 entry for unknown ext protocols */
3455 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3456 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3457 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3458
3459 /* Finished: go to flowid generation */
3460 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3461 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3462 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3463 MVPP2_PRS_RI_L4_PROTO_MASK);
3464
3465 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3466 MVPP2_PRS_IPV6_EXT_AI_BIT);
3467 /* Unmask all ports */
3468 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3469
3470 /* Update shadow table and hw entry */
3471 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3472 mvpp2_prs_hw_write(priv, &pe);
3473
3474 /* Default IPv6 entry for unicast address */
3475 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3476 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3477 pe.index = MVPP2_PE_IP6_ADDR_UN;
3478
3479 /* Finished: go to IPv6 again */
3480 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3481 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3482 MVPP2_PRS_RI_L3_ADDR_MASK);
3483 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3484 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3485 /* Shift back to IPV6 NH */
3486 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3487
3488 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3489 /* Unmask all ports */
3490 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3491
3492 /* Update shadow table and hw entry */
3493 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3494 mvpp2_prs_hw_write(priv, &pe);
3495
3496 return 0;
3497}
3498
Maxime Chevallier56beda32018-02-28 10:14:13 +01003499/* Find tcam entry with matched pair <vid,port> */
3500static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3501 u16 mask)
3502{
3503 unsigned char byte[2], enable[2];
3504 struct mvpp2_prs_entry pe;
3505 u16 rvid, rmask;
3506 int tid;
3507
3508 /* Go through the all entries with MVPP2_PRS_LU_VID */
3509 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3510 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3511 if (!priv->prs_shadow[tid].valid ||
3512 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3513 continue;
3514
3515 pe.index = tid;
3516
3517 mvpp2_prs_hw_read(priv, &pe);
3518 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3519 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3520
3521 rvid = ((byte[0] & 0xf) << 8) + byte[1];
3522 rmask = ((enable[0] & 0xf) << 8) + enable[1];
3523
3524 if (rvid != vid || rmask != mask)
3525 continue;
3526
3527 return tid;
3528 }
3529
3530 return 0;
3531}
3532
3533/* Write parser entry for VID filtering */
3534static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3535{
3536 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3537 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3538 unsigned int mask = 0xfff, reg_val, shift;
3539 struct mvpp2 *priv = port->priv;
3540 struct mvpp2_prs_entry pe;
3541 int tid;
3542
3543 /* Scan TCAM and see if entry with this <vid,port> already exist */
3544 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3545
3546 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3547 if (reg_val & MVPP2_DSA_EXTENDED)
3548 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3549 else
3550 shift = MVPP2_VLAN_TAG_LEN;
3551
3552 /* No such entry */
3553 if (!tid) {
3554 memset(&pe, 0, sizeof(pe));
3555
3556 /* Go through all entries from first to last in vlan range */
3557 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3558 vid_start +
3559 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3560
3561 /* There isn't room for a new VID filter */
3562 if (tid < 0)
3563 return tid;
3564
3565 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3566 pe.index = tid;
3567
3568 /* Mask all ports */
3569 mvpp2_prs_tcam_port_map_set(&pe, 0);
3570 } else {
3571 mvpp2_prs_hw_read(priv, &pe);
3572 }
3573
3574 /* Enable the current port */
3575 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3576
3577 /* Continue - set next lookup */
3578 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3579
3580 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3581 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3582
3583 /* Set match on VID */
3584 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3585
3586 /* Clear all ai bits for next iteration */
3587 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3588
3589 /* Update shadow table */
3590 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3591 mvpp2_prs_hw_write(priv, &pe);
3592
3593 return 0;
3594}
3595
3596/* Write parser entry for VID filtering */
3597static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3598{
3599 struct mvpp2 *priv = port->priv;
3600 int tid;
3601
3602 /* Scan TCAM and see if entry with this <vid,port> already exist */
3603 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3604
3605 /* No such entry */
3606 if (tid)
3607 return;
3608
3609 mvpp2_prs_hw_inv(priv, tid);
3610 priv->prs_shadow[tid].valid = false;
3611}
3612
3613/* Remove all existing VID filters on this port */
3614static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3615{
3616 struct mvpp2 *priv = port->priv;
3617 int tid;
3618
3619 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3620 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3621 if (priv->prs_shadow[tid].valid)
3622 mvpp2_prs_vid_entry_remove(port, tid);
3623 }
3624}
3625
3626/* Remove VID filering entry for this port */
3627static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3628{
3629 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3630 struct mvpp2 *priv = port->priv;
3631
3632 /* Invalidate the guard entry */
3633 mvpp2_prs_hw_inv(priv, tid);
3634
3635 priv->prs_shadow[tid].valid = false;
3636}
3637
3638/* Add guard entry that drops packets when no VID is matched on this port */
3639static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3640{
3641 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3642 struct mvpp2 *priv = port->priv;
3643 unsigned int reg_val, shift;
3644 struct mvpp2_prs_entry pe;
3645
3646 if (priv->prs_shadow[tid].valid)
3647 return;
3648
3649 memset(&pe, 0, sizeof(pe));
3650
3651 pe.index = tid;
3652
3653 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3654 if (reg_val & MVPP2_DSA_EXTENDED)
3655 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3656 else
3657 shift = MVPP2_VLAN_TAG_LEN;
3658
3659 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3660
3661 /* Mask all ports */
3662 mvpp2_prs_tcam_port_map_set(&pe, 0);
3663
3664 /* Update port mask */
3665 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3666
3667 /* Continue - set next lookup */
3668 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3669
3670 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3671 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3672
3673 /* Drop VLAN packets that don't belong to any VIDs on this port */
3674 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3675 MVPP2_PRS_RI_DROP_MASK);
3676
3677 /* Clear all ai bits for next iteration */
3678 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3679
3680 /* Update shadow table */
3681 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3682 mvpp2_prs_hw_write(priv, &pe);
3683}
3684
Marcin Wojtas3f518502014-07-10 16:52:13 -03003685/* Parser default initialization */
3686static int mvpp2_prs_default_init(struct platform_device *pdev,
3687 struct mvpp2 *priv)
3688{
3689 int err, index, i;
3690
3691 /* Enable tcam table */
3692 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3693
3694 /* Clear all tcam and sram entries */
3695 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3696 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3697 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3698 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3699
3700 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3701 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3702 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3703 }
3704
3705 /* Invalidate all tcam entries */
3706 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3707 mvpp2_prs_hw_inv(priv, index);
3708
3709 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003710 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003711 GFP_KERNEL);
3712 if (!priv->prs_shadow)
3713 return -ENOMEM;
3714
3715 /* Always start from lookup = 0 */
3716 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3717 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3718 MVPP2_PRS_PORT_LU_MAX, 0);
3719
3720 mvpp2_prs_def_flow_init(priv);
3721
3722 mvpp2_prs_mh_init(priv);
3723
3724 mvpp2_prs_mac_init(priv);
3725
3726 mvpp2_prs_dsa_init(priv);
3727
Maxime Chevallier56beda32018-02-28 10:14:13 +01003728 mvpp2_prs_vid_init(priv);
3729
Marcin Wojtas3f518502014-07-10 16:52:13 -03003730 err = mvpp2_prs_etype_init(priv);
3731 if (err)
3732 return err;
3733
3734 err = mvpp2_prs_vlan_init(pdev, priv);
3735 if (err)
3736 return err;
3737
3738 err = mvpp2_prs_pppoe_init(priv);
3739 if (err)
3740 return err;
3741
3742 err = mvpp2_prs_ip6_init(priv);
3743 if (err)
3744 return err;
3745
3746 err = mvpp2_prs_ip4_init(priv);
3747 if (err)
3748 return err;
3749
3750 return 0;
3751}
3752
3753/* Compare MAC DA with tcam entry data */
3754static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3755 const u8 *da, unsigned char *mask)
3756{
3757 unsigned char tcam_byte, tcam_mask;
3758 int index;
3759
3760 for (index = 0; index < ETH_ALEN; index++) {
3761 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3762 if (tcam_mask != mask[index])
3763 return false;
3764
3765 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3766 return false;
3767 }
3768
3769 return true;
3770}
3771
3772/* Find tcam entry with matched pair <MAC DA, port> */
3773static struct mvpp2_prs_entry *
3774mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3775 unsigned char *mask, int udf_type)
3776{
3777 struct mvpp2_prs_entry *pe;
3778 int tid;
3779
Antoine Tenart239dd4e2017-10-24 11:41:28 +02003780 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003781 if (!pe)
3782 return NULL;
3783 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3784
3785 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3786 for (tid = MVPP2_PE_FIRST_FREE_TID;
3787 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3788 unsigned int entry_pmap;
3789
3790 if (!priv->prs_shadow[tid].valid ||
3791 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3792 (priv->prs_shadow[tid].udf != udf_type))
3793 continue;
3794
3795 pe->index = tid;
3796 mvpp2_prs_hw_read(priv, pe);
3797 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3798
3799 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3800 entry_pmap == pmap)
3801 return pe;
3802 }
3803 kfree(pe);
3804
3805 return NULL;
3806}
3807
3808/* Update parser's mac da entry */
3809static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3810 const u8 *da, bool add)
3811{
3812 struct mvpp2_prs_entry *pe;
3813 unsigned int pmap, len, ri;
3814 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3815 int tid;
3816
3817 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3818 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3819 MVPP2_PRS_UDF_MAC_DEF);
3820
3821 /* No such entry */
3822 if (!pe) {
3823 if (!add)
3824 return 0;
3825
3826 /* Create new TCAM entry */
3827 /* Find first range mac entry*/
3828 for (tid = MVPP2_PE_FIRST_FREE_TID;
3829 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3830 if (priv->prs_shadow[tid].valid &&
3831 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3832 (priv->prs_shadow[tid].udf ==
3833 MVPP2_PRS_UDF_MAC_RANGE))
3834 break;
3835
3836 /* Go through the all entries from first to last */
3837 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3838 tid - 1);
3839 if (tid < 0)
3840 return tid;
3841
Antoine Tenart239dd4e2017-10-24 11:41:28 +02003842 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003843 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303844 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003845 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3846 pe->index = tid;
3847
3848 /* Mask all ports */
3849 mvpp2_prs_tcam_port_map_set(pe, 0);
3850 }
3851
3852 /* Update port mask */
3853 mvpp2_prs_tcam_port_set(pe, port, add);
3854
3855 /* Invalidate the entry if no ports are left enabled */
3856 pmap = mvpp2_prs_tcam_port_map_get(pe);
3857 if (pmap == 0) {
3858 if (add) {
3859 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303860 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003861 }
3862 mvpp2_prs_hw_inv(priv, pe->index);
3863 priv->prs_shadow[pe->index].valid = false;
3864 kfree(pe);
3865 return 0;
3866 }
3867
3868 /* Continue - set next lookup */
3869 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3870
3871 /* Set match on DA */
3872 len = ETH_ALEN;
3873 while (len--)
3874 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3875
3876 /* Set result info bits */
3877 if (is_broadcast_ether_addr(da))
3878 ri = MVPP2_PRS_RI_L2_BCAST;
3879 else if (is_multicast_ether_addr(da))
3880 ri = MVPP2_PRS_RI_L2_MCAST;
3881 else
3882 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3883
3884 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3885 MVPP2_PRS_RI_MAC_ME_MASK);
3886 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3887 MVPP2_PRS_RI_MAC_ME_MASK);
3888
3889 /* Shift to ethertype */
3890 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3891 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3892
3893 /* Update shadow table and hw entry */
3894 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3895 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3896 mvpp2_prs_hw_write(priv, pe);
3897
3898 kfree(pe);
3899
3900 return 0;
3901}
3902
3903static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3904{
3905 struct mvpp2_port *port = netdev_priv(dev);
3906 int err;
3907
3908 /* Remove old parser entry */
3909 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3910 false);
3911 if (err)
3912 return err;
3913
3914 /* Add new parser entry */
3915 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3916 if (err)
3917 return err;
3918
3919 /* Set addr in the device */
3920 ether_addr_copy(dev->dev_addr, da);
3921
3922 return 0;
3923}
3924
3925/* Delete all port's multicast simple (not range) entries */
3926static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3927{
3928 struct mvpp2_prs_entry pe;
3929 int index, tid;
3930
3931 for (tid = MVPP2_PE_FIRST_FREE_TID;
3932 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3933 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3934
3935 if (!priv->prs_shadow[tid].valid ||
3936 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3937 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3938 continue;
3939
3940 /* Only simple mac entries */
3941 pe.index = tid;
3942 mvpp2_prs_hw_read(priv, &pe);
3943
3944 /* Read mac addr from entry */
3945 for (index = 0; index < ETH_ALEN; index++)
3946 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3947 &da_mask[index]);
3948
3949 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3950 /* Delete this entry */
3951 mvpp2_prs_mac_da_accept(priv, port, da, false);
3952 }
3953}
3954
3955static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3956{
3957 switch (type) {
3958 case MVPP2_TAG_TYPE_EDSA:
3959 /* Add port to EDSA entries */
3960 mvpp2_prs_dsa_tag_set(priv, port, true,
3961 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3962 mvpp2_prs_dsa_tag_set(priv, port, true,
3963 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3964 /* Remove port from DSA entries */
3965 mvpp2_prs_dsa_tag_set(priv, port, false,
3966 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3967 mvpp2_prs_dsa_tag_set(priv, port, false,
3968 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3969 break;
3970
3971 case MVPP2_TAG_TYPE_DSA:
3972 /* Add port to DSA entries */
3973 mvpp2_prs_dsa_tag_set(priv, port, true,
3974 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3975 mvpp2_prs_dsa_tag_set(priv, port, true,
3976 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3977 /* Remove port from EDSA entries */
3978 mvpp2_prs_dsa_tag_set(priv, port, false,
3979 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3980 mvpp2_prs_dsa_tag_set(priv, port, false,
3981 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3982 break;
3983
3984 case MVPP2_TAG_TYPE_MH:
3985 case MVPP2_TAG_TYPE_NONE:
3986 /* Remove port form EDSA and DSA entries */
3987 mvpp2_prs_dsa_tag_set(priv, port, false,
3988 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3989 mvpp2_prs_dsa_tag_set(priv, port, false,
3990 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3991 mvpp2_prs_dsa_tag_set(priv, port, false,
3992 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3993 mvpp2_prs_dsa_tag_set(priv, port, false,
3994 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3995 break;
3996
3997 default:
3998 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3999 return -EINVAL;
4000 }
4001
4002 return 0;
4003}
4004
4005/* Set prs flow for the port */
4006static int mvpp2_prs_def_flow(struct mvpp2_port *port)
4007{
4008 struct mvpp2_prs_entry *pe;
4009 int tid;
4010
4011 pe = mvpp2_prs_flow_find(port->priv, port->id);
4012
4013 /* Such entry not exist */
4014 if (!pe) {
4015 /* Go through the all entires from last to first */
4016 tid = mvpp2_prs_tcam_first_free(port->priv,
4017 MVPP2_PE_LAST_FREE_TID,
4018 MVPP2_PE_FIRST_FREE_TID);
4019 if (tid < 0)
4020 return tid;
4021
4022 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
4023 if (!pe)
4024 return -ENOMEM;
4025
4026 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
4027 pe->index = tid;
4028
4029 /* Set flow ID*/
4030 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4031 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4032
4033 /* Update shadow table */
4034 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
4035 }
4036
4037 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
4038 mvpp2_prs_hw_write(port->priv, pe);
4039 kfree(pe);
4040
4041 return 0;
4042}
4043
4044/* Classifier configuration routines */
4045
4046/* Update classification flow table registers */
4047static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4048 struct mvpp2_cls_flow_entry *fe)
4049{
4050 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4051 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4052 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4053 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4054}
4055
4056/* Update classification lookup table register */
4057static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4058 struct mvpp2_cls_lookup_entry *le)
4059{
4060 u32 val;
4061
4062 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4063 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4064 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4065}
4066
4067/* Classifier default initialization */
4068static void mvpp2_cls_init(struct mvpp2 *priv)
4069{
4070 struct mvpp2_cls_lookup_entry le;
4071 struct mvpp2_cls_flow_entry fe;
4072 int index;
4073
4074 /* Enable classifier */
4075 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4076
4077 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01004078 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004079 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4080 fe.index = index;
4081 mvpp2_cls_flow_write(priv, &fe);
4082 }
4083
4084 /* Clear classifier lookup table */
4085 le.data = 0;
4086 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4087 le.lkpid = index;
4088 le.way = 0;
4089 mvpp2_cls_lookup_write(priv, &le);
4090
4091 le.way = 1;
4092 mvpp2_cls_lookup_write(priv, &le);
4093 }
4094}
4095
4096static void mvpp2_cls_port_config(struct mvpp2_port *port)
4097{
4098 struct mvpp2_cls_lookup_entry le;
4099 u32 val;
4100
4101 /* Set way for the port */
4102 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4103 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4104 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4105
4106 /* Pick the entry to be accessed in lookup ID decoding table
4107 * according to the way and lkpid.
4108 */
4109 le.lkpid = port->id;
4110 le.way = 0;
4111 le.data = 0;
4112
4113 /* Set initial CPU queue for receiving packets */
4114 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4115 le.data |= port->first_rxq;
4116
4117 /* Disable classification engines */
4118 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4119
4120 /* Update lookup ID table entry */
4121 mvpp2_cls_lookup_write(port->priv, &le);
4122}
4123
4124/* Set CPU queue number for oversize packets */
4125static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4126{
4127 u32 val;
4128
4129 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4130 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4131
4132 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4133 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4134
4135 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4136 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4137 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4138}
4139
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004140static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4141{
4142 if (likely(pool->frag_size <= PAGE_SIZE))
4143 return netdev_alloc_frag(pool->frag_size);
4144 else
4145 return kmalloc(pool->frag_size, GFP_ATOMIC);
4146}
4147
4148static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4149{
4150 if (likely(pool->frag_size <= PAGE_SIZE))
4151 skb_free_frag(data);
4152 else
4153 kfree(data);
4154}
4155
Marcin Wojtas3f518502014-07-10 16:52:13 -03004156/* Buffer Manager configuration routines */
4157
4158/* Create pool */
4159static int mvpp2_bm_pool_create(struct platform_device *pdev,
4160 struct mvpp2 *priv,
4161 struct mvpp2_bm_pool *bm_pool, int size)
4162{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004163 u32 val;
4164
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004165 /* Number of buffer pointers must be a multiple of 16, as per
4166 * hardware constraints
4167 */
4168 if (!IS_ALIGNED(size, 16))
4169 return -EINVAL;
4170
4171 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4172 * bytes per buffer pointer
4173 */
4174 if (priv->hw_version == MVPP21)
4175 bm_pool->size_bytes = 2 * sizeof(u32) * size;
4176 else
4177 bm_pool->size_bytes = 2 * sizeof(u64) * size;
4178
4179 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004180 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004181 GFP_KERNEL);
4182 if (!bm_pool->virt_addr)
4183 return -ENOMEM;
4184
Thomas Petazzonid3158802017-02-21 11:28:13 +01004185 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4186 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004187 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4188 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004189 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4190 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4191 return -ENOMEM;
4192 }
4193
4194 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004195 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004196 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4197
4198 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4199 val |= MVPP2_BM_START_MASK;
4200 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4201
Marcin Wojtas3f518502014-07-10 16:52:13 -03004202 bm_pool->size = size;
4203 bm_pool->pkt_size = 0;
4204 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004205
4206 return 0;
4207}
4208
4209/* Set pool buffer size */
4210static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4211 struct mvpp2_bm_pool *bm_pool,
4212 int buf_size)
4213{
4214 u32 val;
4215
4216 bm_pool->buf_size = buf_size;
4217
4218 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4219 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4220}
4221
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004222static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4223 struct mvpp2_bm_pool *bm_pool,
4224 dma_addr_t *dma_addr,
4225 phys_addr_t *phys_addr)
4226{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004227 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004228
4229 *dma_addr = mvpp2_percpu_read(priv, cpu,
4230 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4231 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004232
4233 if (priv->hw_version == MVPP22) {
4234 u32 val;
4235 u32 dma_addr_highbits, phys_addr_highbits;
4236
Thomas Petazzonia7868412017-03-07 16:53:13 +01004237 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004238 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4239 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4240 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4241
4242 if (sizeof(dma_addr_t) == 8)
4243 *dma_addr |= (u64)dma_addr_highbits << 32;
4244
4245 if (sizeof(phys_addr_t) == 8)
4246 *phys_addr |= (u64)phys_addr_highbits << 32;
4247 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004248
4249 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004250}
4251
Ezequiel Garcia7861f122014-07-21 13:48:14 -03004252/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004253static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
4254 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004255{
4256 int i;
4257
Ezequiel Garcia7861f122014-07-21 13:48:14 -03004258 for (i = 0; i < bm_pool->buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01004259 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004260 phys_addr_t buf_phys_addr;
4261 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004262
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004263 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4264 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01004265
Thomas Petazzoni20396132017-03-07 16:53:00 +01004266 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01004267 bm_pool->buf_size, DMA_FROM_DEVICE);
4268
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004269 data = (void *)phys_to_virt(buf_phys_addr);
4270 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004271 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004272
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004273 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004274 }
4275
4276 /* Update BM driver with number of buffers removed from pool */
4277 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004278}
4279
4280/* Cleanup pool */
4281static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4282 struct mvpp2 *priv,
4283 struct mvpp2_bm_pool *bm_pool)
4284{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004285 u32 val;
4286
Marcin Wojtas4229d502015-12-03 15:20:50 +01004287 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004288 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004289 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
4290 return 0;
4291 }
4292
4293 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4294 val |= MVPP2_BM_STOP_MASK;
4295 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4296
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004297 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004298 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004299 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004300 return 0;
4301}
4302
4303static int mvpp2_bm_pools_init(struct platform_device *pdev,
4304 struct mvpp2 *priv)
4305{
4306 int i, err, size;
4307 struct mvpp2_bm_pool *bm_pool;
4308
4309 /* Create all pools with maximum size */
4310 size = MVPP2_BM_POOL_SIZE_MAX;
4311 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4312 bm_pool = &priv->bm_pools[i];
4313 bm_pool->id = i;
4314 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4315 if (err)
4316 goto err_unroll_pools;
4317 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4318 }
4319 return 0;
4320
4321err_unroll_pools:
4322 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4323 for (i = i - 1; i >= 0; i--)
4324 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4325 return err;
4326}
4327
4328static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4329{
4330 int i, err;
4331
4332 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4333 /* Mask BM all interrupts */
4334 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4335 /* Clear BM cause register */
4336 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4337 }
4338
4339 /* Allocate and initialize BM pools */
4340 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02004341 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004342 if (!priv->bm_pools)
4343 return -ENOMEM;
4344
4345 err = mvpp2_bm_pools_init(pdev, priv);
4346 if (err < 0)
4347 return err;
4348 return 0;
4349}
4350
Stefan Chulski01d04932018-03-05 15:16:50 +01004351static void mvpp2_setup_bm_pool(void)
4352{
4353 /* Short pool */
4354 mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
4355 mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
4356
4357 /* Long pool */
4358 mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
4359 mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
4360}
4361
Marcin Wojtas3f518502014-07-10 16:52:13 -03004362/* Attach long pool to rxq */
4363static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4364 int lrxq, int long_pool)
4365{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004366 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004367 int prxq;
4368
4369 /* Get queue physical ID */
4370 prxq = port->rxqs[lrxq]->id;
4371
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004372 if (port->priv->hw_version == MVPP21)
4373 mask = MVPP21_RXQ_POOL_LONG_MASK;
4374 else
4375 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004376
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004377 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4378 val &= ~mask;
4379 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004380 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4381}
4382
4383/* Attach short pool to rxq */
4384static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4385 int lrxq, int short_pool)
4386{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004387 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004388 int prxq;
4389
4390 /* Get queue physical ID */
4391 prxq = port->rxqs[lrxq]->id;
4392
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004393 if (port->priv->hw_version == MVPP21)
4394 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4395 else
4396 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004397
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004398 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4399 val &= ~mask;
4400 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004401 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4402}
4403
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004404static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4405 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004406 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004407 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004408 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004409{
Thomas Petazzoni20396132017-03-07 16:53:00 +01004410 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004411 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004412
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004413 data = mvpp2_frag_alloc(bm_pool);
4414 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004415 return NULL;
4416
Thomas Petazzoni20396132017-03-07 16:53:00 +01004417 dma_addr = dma_map_single(port->dev->dev.parent, data,
4418 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4419 DMA_FROM_DEVICE);
4420 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004421 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004422 return NULL;
4423 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01004424 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004425 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004426
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004427 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004428}
4429
Marcin Wojtas3f518502014-07-10 16:52:13 -03004430/* Release buffer to BM */
4431static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004432 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004433 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004434{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004435 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004436
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004437 if (port->priv->hw_version == MVPP22) {
4438 u32 val = 0;
4439
4440 if (sizeof(dma_addr_t) == 8)
4441 val |= upper_32_bits(buf_dma_addr) &
4442 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4443
4444 if (sizeof(phys_addr_t) == 8)
4445 val |= (upper_32_bits(buf_phys_addr)
4446 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4447 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4448
Thomas Petazzonia7868412017-03-07 16:53:13 +01004449 mvpp2_percpu_write(port->priv, cpu,
4450 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004451 }
4452
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004453 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4454 * returned in the "cookie" field of the RX
4455 * descriptor. Instead of storing the virtual address, we
4456 * store the physical address
4457 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004458 mvpp2_percpu_write(port->priv, cpu,
4459 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4460 mvpp2_percpu_write(port->priv, cpu,
4461 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004462
4463 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004464}
4465
Marcin Wojtas3f518502014-07-10 16:52:13 -03004466/* Allocate buffers for the pool */
4467static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4468 struct mvpp2_bm_pool *bm_pool, int buf_num)
4469{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004470 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01004471 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004472 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004473 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004474
4475 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4476 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4477
4478 if (buf_num < 0 ||
4479 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4480 netdev_err(port->dev,
4481 "cannot allocate %d buffers for pool %d\n",
4482 buf_num, bm_pool->id);
4483 return 0;
4484 }
4485
Marcin Wojtas3f518502014-07-10 16:52:13 -03004486 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004487 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4488 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004489 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004490 break;
4491
Thomas Petazzoni20396132017-03-07 16:53:00 +01004492 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004493 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004494 }
4495
4496 /* Update BM driver with number of buffers added to pool */
4497 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004498
4499 netdev_dbg(port->dev,
Stefan Chulski01d04932018-03-05 15:16:50 +01004500 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
Marcin Wojtas3f518502014-07-10 16:52:13 -03004501 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4502
4503 netdev_dbg(port->dev,
Stefan Chulski01d04932018-03-05 15:16:50 +01004504 "pool %d: %d of %d buffers added\n",
Marcin Wojtas3f518502014-07-10 16:52:13 -03004505 bm_pool->id, i, buf_num);
4506 return i;
4507}
4508
4509/* Notify the driver that BM pool is being used as specific type and return the
4510 * pool pointer on success
4511 */
4512static struct mvpp2_bm_pool *
Stefan Chulski01d04932018-03-05 15:16:50 +01004513mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004514{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004515 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4516 int num;
4517
Stefan Chulski01d04932018-03-05 15:16:50 +01004518 if (pool >= MVPP2_BM_POOLS_NUM) {
4519 netdev_err(port->dev, "Invalid pool %d\n", pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004520 return NULL;
4521 }
4522
Marcin Wojtas3f518502014-07-10 16:52:13 -03004523 /* Allocate buffers in case BM pool is used as long pool, but packet
4524 * size doesn't match MTU or BM pool hasn't being used yet
4525 */
Stefan Chulski01d04932018-03-05 15:16:50 +01004526 if (new_pool->pkt_size == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004527 int pkts_num;
4528
4529 /* Set default buffer number or free all the buffers in case
4530 * the pool is not empty
4531 */
4532 pkts_num = new_pool->buf_num;
4533 if (pkts_num == 0)
Stefan Chulski01d04932018-03-05 15:16:50 +01004534 pkts_num = mvpp2_pools[pool].buf_num;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004535 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004536 mvpp2_bm_bufs_free(port->dev->dev.parent,
4537 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004538
4539 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004540 new_pool->frag_size =
4541 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4542 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004543
4544 /* Allocate buffers for this pool */
4545 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4546 if (num != pkts_num) {
4547 WARN(1, "pool %d: %d of %d allocated\n",
4548 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004549 return NULL;
4550 }
4551 }
4552
4553 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4554 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4555
Marcin Wojtas3f518502014-07-10 16:52:13 -03004556 return new_pool;
4557}
4558
4559/* Initialize pools for swf */
4560static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4561{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004562 int rxq;
4563
4564 if (!port->pool_long) {
4565 port->pool_long =
Stefan Chulski01d04932018-03-05 15:16:50 +01004566 mvpp2_bm_pool_use(port, MVPP2_BM_LONG,
4567 mvpp2_pools[MVPP2_BM_LONG].pkt_size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004568 if (!port->pool_long)
4569 return -ENOMEM;
4570
Marcin Wojtas3f518502014-07-10 16:52:13 -03004571 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004572
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004573 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004574 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4575 }
4576
4577 if (!port->pool_short) {
4578 port->pool_short =
Stefan Chulski01d04932018-03-05 15:16:50 +01004579 mvpp2_bm_pool_use(port, MVPP2_BM_SHORT,
4580 mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004581 if (!port->pool_short)
4582 return -ENOMEM;
4583
Marcin Wojtas3f518502014-07-10 16:52:13 -03004584 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004585
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004586 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004587 mvpp2_rxq_short_pool_set(port, rxq,
4588 port->pool_short->id);
4589 }
4590
4591 return 0;
4592}
4593
4594static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4595{
4596 struct mvpp2_port *port = netdev_priv(dev);
4597 struct mvpp2_bm_pool *port_pool = port->pool_long;
4598 int num, pkts_num = port_pool->buf_num;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004599
4600 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004601 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004602 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004603 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4604 return -EIO;
4605 }
4606
Marcin Wojtas3f518502014-07-10 16:52:13 -03004607 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4608 if (num != pkts_num) {
4609 WARN(1, "pool %d: %d of %d allocated\n",
4610 port_pool->id, num, pkts_num);
4611 return -EIO;
4612 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004613 dev->mtu = mtu;
4614 netdev_update_features(dev);
4615 return 0;
4616}
4617
4618static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4619{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004620 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004621
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004622 for (i = 0; i < port->nqvecs; i++)
4623 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4624
Marcin Wojtas3f518502014-07-10 16:52:13 -03004625 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004626 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004627}
4628
4629static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4630{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004631 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004632
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004633 for (i = 0; i < port->nqvecs; i++)
4634 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4635
Marcin Wojtas3f518502014-07-10 16:52:13 -03004636 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004637 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4638}
4639
4640static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4641{
4642 struct mvpp2_port *port = qvec->port;
4643
4644 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4645 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4646}
4647
4648static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4649{
4650 struct mvpp2_port *port = qvec->port;
4651
4652 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4653 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004654}
4655
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004656/* Mask the current CPU's Rx/Tx interrupts
4657 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4658 * using smp_processor_id() is OK.
4659 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004660static void mvpp2_interrupts_mask(void *arg)
4661{
4662 struct mvpp2_port *port = arg;
4663
Thomas Petazzonia7868412017-03-07 16:53:13 +01004664 mvpp2_percpu_write(port->priv, smp_processor_id(),
4665 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004666}
4667
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004668/* Unmask the current CPU's Rx/Tx interrupts.
4669 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4670 * using smp_processor_id() is OK.
4671 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004672static void mvpp2_interrupts_unmask(void *arg)
4673{
4674 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004675 u32 val;
4676
4677 val = MVPP2_CAUSE_MISC_SUM_MASK |
4678 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4679 if (port->has_tx_irqs)
4680 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004681
Thomas Petazzonia7868412017-03-07 16:53:13 +01004682 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004683 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4684}
4685
4686static void
4687mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4688{
4689 u32 val;
4690 int i;
4691
4692 if (port->priv->hw_version != MVPP22)
4693 return;
4694
4695 if (mask)
4696 val = 0;
4697 else
4698 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4699
4700 for (i = 0; i < port->nqvecs; i++) {
4701 struct mvpp2_queue_vector *v = port->qvecs + i;
4702
4703 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4704 continue;
4705
4706 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4707 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4708 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004709}
4710
4711/* Port configuration routines */
4712
Antoine Ténartf84bf382017-08-22 19:08:27 +02004713static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4714{
4715 struct mvpp2 *priv = port->priv;
4716 u32 val;
4717
4718 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4719 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4720 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4721
4722 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4723 if (port->gop_id == 2)
4724 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4725 else if (port->gop_id == 3)
4726 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4727 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4728}
4729
4730static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4731{
4732 struct mvpp2 *priv = port->priv;
4733 u32 val;
4734
4735 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4736 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4737 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4738 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4739
4740 if (port->gop_id > 1) {
4741 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4742 if (port->gop_id == 2)
4743 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4744 else if (port->gop_id == 3)
4745 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4746 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4747 }
4748}
4749
4750static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4751{
4752 struct mvpp2 *priv = port->priv;
4753 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4754 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4755 u32 val;
4756
4757 /* XPCS */
4758 val = readl(xpcs + MVPP22_XPCS_CFG0);
4759 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4760 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4761 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4762 writel(val, xpcs + MVPP22_XPCS_CFG0);
4763
4764 /* MPCS */
4765 val = readl(mpcs + MVPP22_MPCS_CTRL);
4766 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4767 writel(val, mpcs + MVPP22_MPCS_CTRL);
4768
4769 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4770 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4771 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4772 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4773 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4774
4775 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4776 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4777 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4778}
4779
4780static int mvpp22_gop_init(struct mvpp2_port *port)
4781{
4782 struct mvpp2 *priv = port->priv;
4783 u32 val;
4784
4785 if (!priv->sysctrl_base)
4786 return 0;
4787
4788 switch (port->phy_interface) {
4789 case PHY_INTERFACE_MODE_RGMII:
4790 case PHY_INTERFACE_MODE_RGMII_ID:
4791 case PHY_INTERFACE_MODE_RGMII_RXID:
4792 case PHY_INTERFACE_MODE_RGMII_TXID:
4793 if (port->gop_id == 0)
4794 goto invalid_conf;
4795 mvpp22_gop_init_rgmii(port);
4796 break;
4797 case PHY_INTERFACE_MODE_SGMII:
4798 mvpp22_gop_init_sgmii(port);
4799 break;
4800 case PHY_INTERFACE_MODE_10GKR:
4801 if (port->gop_id != 0)
4802 goto invalid_conf;
4803 mvpp22_gop_init_10gkr(port);
4804 break;
4805 default:
4806 goto unsupported_conf;
4807 }
4808
4809 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4810 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4811 GENCONF_PORT_CTRL1_EN(port->gop_id);
4812 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4813
4814 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4815 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4816 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4817
4818 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4819 val |= GENCONF_SOFT_RESET1_GOP;
4820 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4821
4822unsupported_conf:
4823 return 0;
4824
4825invalid_conf:
4826 netdev_err(port->dev, "Invalid port configuration\n");
4827 return -EINVAL;
4828}
4829
Antoine Tenartfd3651b2017-09-01 11:04:54 +02004830static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4831{
4832 u32 val;
4833
4834 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4835 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4836 /* Enable the GMAC link status irq for this port */
4837 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4838 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4839 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4840 }
4841
4842 if (port->gop_id == 0) {
4843 /* Enable the XLG/GIG irqs for this port */
4844 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4845 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4846 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4847 else
4848 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4849 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4850 }
4851}
4852
4853static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4854{
4855 u32 val;
4856
4857 if (port->gop_id == 0) {
4858 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4859 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4860 MVPP22_XLG_EXT_INT_MASK_GIG);
4861 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4862 }
4863
4864 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4865 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4866 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4867 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4868 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4869 }
4870}
4871
4872static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4873{
4874 u32 val;
4875
4876 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4877 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4878 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4879 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4880 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4881 }
4882
4883 if (port->gop_id == 0) {
4884 val = readl(port->base + MVPP22_XLG_INT_MASK);
4885 val |= MVPP22_XLG_INT_MASK_LINK;
4886 writel(val, port->base + MVPP22_XLG_INT_MASK);
4887 }
4888
4889 mvpp22_gop_unmask_irq(port);
4890}
4891
Antoine Tenart542897d2017-08-30 10:29:15 +02004892static int mvpp22_comphy_init(struct mvpp2_port *port)
4893{
4894 enum phy_mode mode;
4895 int ret;
4896
4897 if (!port->comphy)
4898 return 0;
4899
4900 switch (port->phy_interface) {
4901 case PHY_INTERFACE_MODE_SGMII:
4902 mode = PHY_MODE_SGMII;
4903 break;
4904 case PHY_INTERFACE_MODE_10GKR:
4905 mode = PHY_MODE_10GKR;
4906 break;
4907 default:
4908 return -EINVAL;
4909 }
4910
4911 ret = phy_set_mode(port->comphy, mode);
4912 if (ret)
4913 return ret;
4914
4915 return phy_power_on(port->comphy);
4916}
4917
Antoine Ténart39193572017-08-22 19:08:24 +02004918static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4919{
4920 u32 val;
4921
4922 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4923 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4924 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4925 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4926 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4927 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Tenart1df22702017-09-01 11:04:52 +02004928 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02004929 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4930 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4931 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4932 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4933 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4934 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Ténart39193572017-08-22 19:08:24 +02004935 }
4936
4937 /* The port is connected to a copper PHY */
4938 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4939 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4940 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4941
4942 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4943 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4944 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4945 MVPP2_GMAC_AN_DUPLEX_EN;
4946 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4947 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
4948 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4949}
4950
4951static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4952{
4953 u32 val;
4954
4955 /* Force link down */
4956 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4957 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4958 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4959 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4960
4961 /* Set the GMAC in a reset state */
4962 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4963 val |= MVPP2_GMAC_PORT_RESET_MASK;
4964 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4965
4966 /* Configure the PCS and in-band AN */
4967 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4968 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4969 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Tenart1df22702017-09-01 11:04:52 +02004970 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02004971 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Ténart39193572017-08-22 19:08:24 +02004972 }
4973 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4974
4975 mvpp2_port_mii_gmac_configure_mode(port);
4976
4977 /* Unset the GMAC reset state */
4978 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4979 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
4980 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4981
4982 /* Stop forcing link down */
4983 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4984 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4985 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4986}
4987
Antoine Ténart77321952017-08-22 19:08:25 +02004988static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
4989{
4990 u32 val;
4991
4992 if (port->gop_id != 0)
4993 return;
4994
4995 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4996 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
4997 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4998
4999 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5000 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5001 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5002 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5003}
5004
Thomas Petazzoni26975822017-03-07 16:53:14 +01005005static void mvpp22_port_mii_set(struct mvpp2_port *port)
5006{
5007 u32 val;
5008
Thomas Petazzoni26975822017-03-07 16:53:14 +01005009 /* Only GOP port 0 has an XLG MAC */
5010 if (port->gop_id == 0) {
5011 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5012 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02005013
5014 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5015 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5016 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5017 else
5018 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5019
Thomas Petazzoni26975822017-03-07 16:53:14 +01005020 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5021 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01005022}
5023
Marcin Wojtas3f518502014-07-10 16:52:13 -03005024static void mvpp2_port_mii_set(struct mvpp2_port *port)
5025{
Thomas Petazzoni26975822017-03-07 16:53:14 +01005026 if (port->priv->hw_version == MVPP22)
5027 mvpp22_port_mii_set(port);
5028
Antoine Tenart1df22702017-09-01 11:04:52 +02005029 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
Antoine Ténart39193572017-08-22 19:08:24 +02005030 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5031 mvpp2_port_mii_gmac_configure(port);
Antoine Ténart77321952017-08-22 19:08:25 +02005032 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5033 mvpp2_port_mii_xlg_configure(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03005034}
5035
5036static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5037{
5038 u32 val;
5039
5040 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5041 val |= MVPP2_GMAC_FC_ADV_EN;
5042 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005043}
5044
5045static void mvpp2_port_enable(struct mvpp2_port *port)
5046{
5047 u32 val;
5048
Antoine Ténart725757a2017-06-12 16:01:39 +02005049 /* Only GOP port 0 has an XLG MAC */
5050 if (port->gop_id == 0 &&
5051 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5052 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5053 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5054 val |= MVPP22_XLG_CTRL0_PORT_EN |
5055 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5056 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5057 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5058 } else {
5059 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5060 val |= MVPP2_GMAC_PORT_EN_MASK;
5061 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5062 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5063 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005064}
5065
5066static void mvpp2_port_disable(struct mvpp2_port *port)
5067{
5068 u32 val;
5069
Antoine Ténart725757a2017-06-12 16:01:39 +02005070 /* Only GOP port 0 has an XLG MAC */
5071 if (port->gop_id == 0 &&
5072 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5073 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5074 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5075 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5076 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5077 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5078 } else {
5079 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5080 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5081 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5082 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005083}
5084
5085/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5086static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5087{
5088 u32 val;
5089
5090 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5091 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5092 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5093}
5094
5095/* Configure loopback port */
5096static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5097{
5098 u32 val;
5099
5100 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5101
5102 if (port->speed == 1000)
5103 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5104 else
5105 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5106
5107 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5108 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5109 else
5110 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5111
5112 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5113}
5114
Miquel Raynal118d6292017-11-06 22:56:53 +01005115struct mvpp2_ethtool_counter {
5116 unsigned int offset;
5117 const char string[ETH_GSTRING_LEN];
5118 bool reg_is_64b;
5119};
5120
5121static u64 mvpp2_read_count(struct mvpp2_port *port,
5122 const struct mvpp2_ethtool_counter *counter)
5123{
5124 u64 val;
5125
5126 val = readl(port->stats_base + counter->offset);
5127 if (counter->reg_is_64b)
5128 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5129
5130 return val;
5131}
5132
5133/* Due to the fact that software statistics and hardware statistics are, by
5134 * design, incremented at different moments in the chain of packet processing,
5135 * it is very likely that incoming packets could have been dropped after being
5136 * counted by hardware but before reaching software statistics (most probably
5137 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5138 * are added in between as well as TSO skb will be split and header bytes added.
5139 * Hence, statistics gathered from userspace with ifconfig (software) and
5140 * ethtool (hardware) cannot be compared.
5141 */
5142static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5143 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5144 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5145 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5146 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5147 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5148 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5149 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5150 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5151 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5152 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5153 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5154 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5155 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5156 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5157 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5158 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5159 { MVPP2_MIB_FC_SENT, "fc_sent" },
5160 { MVPP2_MIB_FC_RCVD, "fc_received" },
5161 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5162 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5163 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5164 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5165 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5166 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5167 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5168 { MVPP2_MIB_COLLISION, "collision" },
5169 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
5170};
5171
5172static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5173 u8 *data)
5174{
5175 if (sset == ETH_SS_STATS) {
5176 int i;
5177
5178 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5179 memcpy(data + i * ETH_GSTRING_LEN,
5180 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5181 }
5182}
5183
5184static void mvpp2_gather_hw_statistics(struct work_struct *work)
5185{
5186 struct delayed_work *del_work = to_delayed_work(work);
Miquel Raynale5c500e2017-11-08 08:59:40 +01005187 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5188 stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005189 u64 *pstats;
Miquel Raynale5c500e2017-11-08 08:59:40 +01005190 int i;
Miquel Raynal118d6292017-11-06 22:56:53 +01005191
Miquel Raynale5c500e2017-11-08 08:59:40 +01005192 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005193
Miquel Raynale5c500e2017-11-08 08:59:40 +01005194 pstats = port->ethtool_stats;
5195 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5196 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Miquel Raynal118d6292017-11-06 22:56:53 +01005197
5198 /* No need to read again the counters right after this function if it
5199 * was called asynchronously by the user (ie. use of ethtool).
5200 */
Miquel Raynale5c500e2017-11-08 08:59:40 +01005201 cancel_delayed_work(&port->stats_work);
5202 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01005203 MVPP2_MIB_COUNTERS_STATS_DELAY);
5204
Miquel Raynale5c500e2017-11-08 08:59:40 +01005205 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005206}
5207
5208static void mvpp2_ethtool_get_stats(struct net_device *dev,
5209 struct ethtool_stats *stats, u64 *data)
5210{
5211 struct mvpp2_port *port = netdev_priv(dev);
5212
Miquel Raynale5c500e2017-11-08 08:59:40 +01005213 /* Update statistics for the given port, then take the lock to avoid
5214 * concurrent accesses on the ethtool_stats structure during its copy.
5215 */
5216 mvpp2_gather_hw_statistics(&port->stats_work.work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005217
Miquel Raynale5c500e2017-11-08 08:59:40 +01005218 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005219 memcpy(data, port->ethtool_stats,
5220 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
Miquel Raynale5c500e2017-11-08 08:59:40 +01005221 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005222}
5223
5224static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5225{
5226 if (sset == ETH_SS_STATS)
5227 return ARRAY_SIZE(mvpp2_ethtool_regs);
5228
5229 return -EOPNOTSUPP;
5230}
5231
Marcin Wojtas3f518502014-07-10 16:52:13 -03005232static void mvpp2_port_reset(struct mvpp2_port *port)
5233{
5234 u32 val;
Miquel Raynal118d6292017-11-06 22:56:53 +01005235 unsigned int i;
5236
5237 /* Read the GOP statistics to reset the hardware counters */
5238 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5239 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005240
5241 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5242 ~MVPP2_GMAC_PORT_RESET_MASK;
5243 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5244
5245 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5246 MVPP2_GMAC_PORT_RESET_MASK)
5247 continue;
5248}
5249
5250/* Change maximum receive size of the port */
5251static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5252{
5253 u32 val;
5254
5255 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5256 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5257 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5258 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5259 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5260}
5261
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005262/* Change maximum receive size of the port */
5263static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5264{
5265 u32 val;
5266
5267 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
5268 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5269 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
Antoine Ténartec15ecd2017-08-25 15:24:46 +02005270 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005271 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5272}
5273
Marcin Wojtas3f518502014-07-10 16:52:13 -03005274/* Set defaults to the MVPP2 port */
5275static void mvpp2_defaults_set(struct mvpp2_port *port)
5276{
5277 int tx_port_num, val, queue, ptxq, lrxq;
5278
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005279 if (port->priv->hw_version == MVPP21) {
5280 /* Configure port to loopback if needed */
5281 if (port->flags & MVPP2_F_LOOPBACK)
5282 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005283
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005284 /* Update TX FIFO MIN Threshold */
5285 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5286 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5287 /* Min. TX threshold must be less than minimal packet length */
5288 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5289 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5290 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005291
5292 /* Disable Legacy WRR, Disable EJP, Release from reset */
5293 tx_port_num = mvpp2_egress_port(port);
5294 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5295 tx_port_num);
5296 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5297
5298 /* Close bandwidth for all queues */
5299 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5300 ptxq = mvpp2_txq_phys(port->id, queue);
5301 mvpp2_write(port->priv,
5302 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5303 }
5304
5305 /* Set refill period to 1 usec, refill tokens
5306 * and bucket size to maximum
5307 */
5308 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5309 port->priv->tclk / USEC_PER_SEC);
5310 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5311 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5312 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5313 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5314 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5315 val = MVPP2_TXP_TOKEN_SIZE_MAX;
5316 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5317
5318 /* Set MaximumLowLatencyPacketSize value to 256 */
5319 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5320 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5321 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5322
5323 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005324 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005325 queue = port->rxqs[lrxq]->id;
5326 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5327 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5328 MVPP2_SNOOP_BUF_HDR_MASK;
5329 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5330 }
5331
5332 /* At default, mask all interrupts to all present cpus */
5333 mvpp2_interrupts_disable(port);
5334}
5335
5336/* Enable/disable receiving packets */
5337static void mvpp2_ingress_enable(struct mvpp2_port *port)
5338{
5339 u32 val;
5340 int lrxq, queue;
5341
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005342 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005343 queue = port->rxqs[lrxq]->id;
5344 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5345 val &= ~MVPP2_RXQ_DISABLE_MASK;
5346 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5347 }
5348}
5349
5350static void mvpp2_ingress_disable(struct mvpp2_port *port)
5351{
5352 u32 val;
5353 int lrxq, queue;
5354
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005355 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005356 queue = port->rxqs[lrxq]->id;
5357 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5358 val |= MVPP2_RXQ_DISABLE_MASK;
5359 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5360 }
5361}
5362
5363/* Enable transmit via physical egress queue
5364 * - HW starts take descriptors from DRAM
5365 */
5366static void mvpp2_egress_enable(struct mvpp2_port *port)
5367{
5368 u32 qmap;
5369 int queue;
5370 int tx_port_num = mvpp2_egress_port(port);
5371
5372 /* Enable all initialized TXs. */
5373 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005374 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005375 struct mvpp2_tx_queue *txq = port->txqs[queue];
5376
Markus Elfringdbbb2f02017-04-17 14:07:52 +02005377 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005378 qmap |= (1 << queue);
5379 }
5380
5381 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5382 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5383}
5384
5385/* Disable transmit via physical egress queue
5386 * - HW doesn't take descriptors from DRAM
5387 */
5388static void mvpp2_egress_disable(struct mvpp2_port *port)
5389{
5390 u32 reg_data;
5391 int delay;
5392 int tx_port_num = mvpp2_egress_port(port);
5393
5394 /* Issue stop command for active channels only */
5395 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5396 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5397 MVPP2_TXP_SCHED_ENQ_MASK;
5398 if (reg_data != 0)
5399 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5400 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5401
5402 /* Wait for all Tx activity to terminate. */
5403 delay = 0;
5404 do {
5405 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5406 netdev_warn(port->dev,
5407 "Tx stop timed out, status=0x%08x\n",
5408 reg_data);
5409 break;
5410 }
5411 mdelay(1);
5412 delay++;
5413
5414 /* Check port TX Command register that all
5415 * Tx queues are stopped
5416 */
5417 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5418 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5419}
5420
5421/* Rx descriptors helper methods */
5422
5423/* Get number of Rx descriptors occupied by received packets */
5424static inline int
5425mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5426{
5427 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5428
5429 return val & MVPP2_RXQ_OCCUPIED_MASK;
5430}
5431
5432/* Update Rx queue status with the number of occupied and available
5433 * Rx descriptor slots.
5434 */
5435static inline void
5436mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5437 int used_count, int free_count)
5438{
5439 /* Decrement the number of used descriptors and increment count
5440 * increment the number of free descriptors.
5441 */
5442 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5443
5444 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5445}
5446
5447/* Get pointer to next RX descriptor to be processed by SW */
5448static inline struct mvpp2_rx_desc *
5449mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5450{
5451 int rx_desc = rxq->next_desc_to_proc;
5452
5453 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5454 prefetch(rxq->descs + rxq->next_desc_to_proc);
5455 return rxq->descs + rx_desc;
5456}
5457
5458/* Set rx queue offset */
5459static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5460 int prxq, int offset)
5461{
5462 u32 val;
5463
5464 /* Convert offset from bytes to units of 32 bytes */
5465 offset = offset >> 5;
5466
5467 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5468 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5469
5470 /* Offset is in */
5471 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5472 MVPP2_RXQ_PACKET_OFFSET_MASK);
5473
5474 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5475}
5476
Marcin Wojtas3f518502014-07-10 16:52:13 -03005477/* Tx descriptors helper methods */
5478
Marcin Wojtas3f518502014-07-10 16:52:13 -03005479/* Get pointer to next Tx descriptor to be processed (send) by HW */
5480static struct mvpp2_tx_desc *
5481mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5482{
5483 int tx_desc = txq->next_desc_to_proc;
5484
5485 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5486 return txq->descs + tx_desc;
5487}
5488
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005489/* Update HW with number of aggregated Tx descriptors to be sent
5490 *
5491 * Called only from mvpp2_tx(), so migration is disabled, using
5492 * smp_processor_id() is OK.
5493 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005494static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5495{
5496 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005497 mvpp2_percpu_write(port->priv, smp_processor_id(),
5498 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005499}
5500
5501
5502/* Check if there are enough free descriptors in aggregated txq.
5503 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005504 *
5505 * Called only from mvpp2_tx(), so migration is disabled, using
5506 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005507 */
5508static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5509 struct mvpp2_tx_queue *aggr_txq, int num)
5510{
Antoine Tenart02856a32017-10-30 11:23:32 +01005511 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005512 /* Update number of occupied aggregated Tx descriptors */
5513 int cpu = smp_processor_id();
5514 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5515
5516 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5517 }
5518
Antoine Tenart02856a32017-10-30 11:23:32 +01005519 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005520 return -ENOMEM;
5521
5522 return 0;
5523}
5524
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005525/* Reserved Tx descriptors allocation request
5526 *
5527 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5528 * only by mvpp2_tx(), so migration is disabled, using
5529 * smp_processor_id() is OK.
5530 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005531static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5532 struct mvpp2_tx_queue *txq, int num)
5533{
5534 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005535 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005536
5537 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005538 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005539
Thomas Petazzonia7868412017-03-07 16:53:13 +01005540 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005541
5542 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5543}
5544
5545/* Check if there are enough reserved descriptors for transmission.
5546 * If not, request chunk of reserved descriptors and check again.
5547 */
5548static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5549 struct mvpp2_tx_queue *txq,
5550 struct mvpp2_txq_pcpu *txq_pcpu,
5551 int num)
5552{
5553 int req, cpu, desc_count;
5554
5555 if (txq_pcpu->reserved_num >= num)
5556 return 0;
5557
5558 /* Not enough descriptors reserved! Update the reserved descriptor
5559 * count and check again.
5560 */
5561
5562 desc_count = 0;
5563 /* Compute total of used descriptors */
5564 for_each_present_cpu(cpu) {
5565 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5566
5567 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5568 desc_count += txq_pcpu_aux->count;
5569 desc_count += txq_pcpu_aux->reserved_num;
5570 }
5571
5572 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5573 desc_count += req;
5574
5575 if (desc_count >
5576 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5577 return -ENOMEM;
5578
5579 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5580
5581 /* OK, the descriptor cound has been updated: check again. */
5582 if (txq_pcpu->reserved_num < num)
5583 return -ENOMEM;
5584 return 0;
5585}
5586
5587/* Release the last allocated Tx descriptor. Useful to handle DMA
5588 * mapping failures in the Tx path.
5589 */
5590static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5591{
5592 if (txq->next_desc_to_proc == 0)
5593 txq->next_desc_to_proc = txq->last_desc - 1;
5594 else
5595 txq->next_desc_to_proc--;
5596}
5597
5598/* Set Tx descriptors fields relevant for CSUM calculation */
5599static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5600 int ip_hdr_len, int l4_proto)
5601{
5602 u32 command;
5603
5604 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5605 * G_L4_chk, L4_type required only for checksum calculation
5606 */
5607 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5608 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5609 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5610
5611 if (l3_proto == swab16(ETH_P_IP)) {
5612 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5613 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5614 } else {
5615 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5616 }
5617
5618 if (l4_proto == IPPROTO_TCP) {
5619 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5620 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5621 } else if (l4_proto == IPPROTO_UDP) {
5622 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5623 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5624 } else {
5625 command |= MVPP2_TXD_L4_CSUM_NOT;
5626 }
5627
5628 return command;
5629}
5630
5631/* Get number of sent descriptors and decrement counter.
5632 * The number of sent descriptors is returned.
5633 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005634 *
5635 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5636 * (migration disabled) and from the TX completion tasklet (migration
5637 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005638 */
5639static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5640 struct mvpp2_tx_queue *txq)
5641{
5642 u32 val;
5643
5644 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005645 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5646 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005647
5648 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5649 MVPP2_TRANSMITTED_COUNT_OFFSET;
5650}
5651
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005652/* Called through on_each_cpu(), so runs on all CPUs, with migration
5653 * disabled, therefore using smp_processor_id() is OK.
5654 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005655static void mvpp2_txq_sent_counter_clear(void *arg)
5656{
5657 struct mvpp2_port *port = arg;
5658 int queue;
5659
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005660 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005661 int id = port->txqs[queue]->id;
5662
Thomas Petazzonia7868412017-03-07 16:53:13 +01005663 mvpp2_percpu_read(port->priv, smp_processor_id(),
5664 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005665 }
5666}
5667
5668/* Set max sizes for Tx queues */
5669static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5670{
5671 u32 val, size, mtu;
5672 int txq, tx_port_num;
5673
5674 mtu = port->pkt_size * 8;
5675 if (mtu > MVPP2_TXP_MTU_MAX)
5676 mtu = MVPP2_TXP_MTU_MAX;
5677
5678 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5679 mtu = 3 * mtu;
5680
5681 /* Indirect access to registers */
5682 tx_port_num = mvpp2_egress_port(port);
5683 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5684
5685 /* Set MTU */
5686 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5687 val &= ~MVPP2_TXP_MTU_MAX;
5688 val |= mtu;
5689 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5690
5691 /* TXP token size and all TXQs token size must be larger that MTU */
5692 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5693 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5694 if (size < mtu) {
5695 size = mtu;
5696 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5697 val |= size;
5698 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5699 }
5700
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005701 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005702 val = mvpp2_read(port->priv,
5703 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5704 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5705
5706 if (size < mtu) {
5707 size = mtu;
5708 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5709 val |= size;
5710 mvpp2_write(port->priv,
5711 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5712 val);
5713 }
5714 }
5715}
5716
5717/* Set the number of packets that will be received before Rx interrupt
5718 * will be generated by HW.
5719 */
5720static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005721 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005722{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005723 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005724
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01005725 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5726 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005727
Thomas Petazzonia7868412017-03-07 16:53:13 +01005728 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5729 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5730 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005731
5732 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005733}
5734
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005735/* For some reason in the LSP this is done on each CPU. Why ? */
5736static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5737 struct mvpp2_tx_queue *txq)
5738{
5739 int cpu = get_cpu();
5740 u32 val;
5741
5742 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5743 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5744
5745 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5746 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5747 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5748
5749 put_cpu();
5750}
5751
Thomas Petazzoniab426762017-02-21 11:28:04 +01005752static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5753{
5754 u64 tmp = (u64)clk_hz * usec;
5755
5756 do_div(tmp, USEC_PER_SEC);
5757
5758 return tmp > U32_MAX ? U32_MAX : tmp;
5759}
5760
5761static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5762{
5763 u64 tmp = (u64)cycles * USEC_PER_SEC;
5764
5765 do_div(tmp, clk_hz);
5766
5767 return tmp > U32_MAX ? U32_MAX : tmp;
5768}
5769
Marcin Wojtas3f518502014-07-10 16:52:13 -03005770/* Set the time delay in usec before Rx interrupt */
5771static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005772 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005773{
Thomas Petazzoniab426762017-02-21 11:28:04 +01005774 unsigned long freq = port->priv->tclk;
5775 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005776
Thomas Petazzoniab426762017-02-21 11:28:04 +01005777 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5778 rxq->time_coal =
5779 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5780
5781 /* re-evaluate to get actual register value */
5782 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5783 }
5784
Marcin Wojtas3f518502014-07-10 16:52:13 -03005785 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005786}
5787
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005788static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5789{
5790 unsigned long freq = port->priv->tclk;
5791 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5792
5793 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5794 port->tx_time_coal =
5795 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5796
5797 /* re-evaluate to get actual register value */
5798 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5799 }
5800
5801 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5802}
5803
Marcin Wojtas3f518502014-07-10 16:52:13 -03005804/* Free Tx queue skbuffs */
5805static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5806 struct mvpp2_tx_queue *txq,
5807 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5808{
5809 int i;
5810
5811 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01005812 struct mvpp2_txq_pcpu_buf *tx_buf =
5813 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005814
Antoine Tenart20920262017-10-23 15:24:30 +02005815 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5816 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5817 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01005818 if (tx_buf->skb)
5819 dev_kfree_skb_any(tx_buf->skb);
5820
5821 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005822 }
5823}
5824
5825static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5826 u32 cause)
5827{
5828 int queue = fls(cause) - 1;
5829
5830 return port->rxqs[queue];
5831}
5832
5833static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5834 u32 cause)
5835{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005836 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005837
5838 return port->txqs[queue];
5839}
5840
5841/* Handle end of transmission */
5842static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5843 struct mvpp2_txq_pcpu *txq_pcpu)
5844{
5845 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5846 int tx_done;
5847
5848 if (txq_pcpu->cpu != smp_processor_id())
5849 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5850
5851 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5852 if (!tx_done)
5853 return;
5854 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5855
5856 txq_pcpu->count -= tx_done;
5857
5858 if (netif_tx_queue_stopped(nq))
Antoine Tenart1d17db02017-10-30 11:23:31 +01005859 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005860 netif_tx_wake_queue(nq);
5861}
5862
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005863static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5864 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005865{
5866 struct mvpp2_tx_queue *txq;
5867 struct mvpp2_txq_pcpu *txq_pcpu;
5868 unsigned int tx_todo = 0;
5869
5870 while (cause) {
5871 txq = mvpp2_get_tx_queue(port, cause);
5872 if (!txq)
5873 break;
5874
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005875 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005876
5877 if (txq_pcpu->count) {
5878 mvpp2_txq_done(port, txq, txq_pcpu);
5879 tx_todo += txq_pcpu->count;
5880 }
5881
5882 cause &= ~(1 << txq->log_id);
5883 }
5884 return tx_todo;
5885}
5886
Marcin Wojtas3f518502014-07-10 16:52:13 -03005887/* Rx/Tx queue initialization/cleanup methods */
5888
5889/* Allocate and initialize descriptors for aggr TXQ */
5890static int mvpp2_aggr_txq_init(struct platform_device *pdev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005891 struct mvpp2_tx_queue *aggr_txq, int cpu,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005892 struct mvpp2 *priv)
5893{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005894 u32 txq_dma;
5895
Marcin Wojtas3f518502014-07-10 16:52:13 -03005896 /* Allocate memory for TX descriptors */
Yan Markmana154f8e2017-11-30 10:49:46 +01005897 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005898 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005899 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005900 if (!aggr_txq->descs)
5901 return -ENOMEM;
5902
Antoine Tenart02856a32017-10-30 11:23:32 +01005903 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005904
5905 /* Aggr TXQ no reset WA */
5906 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5907 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5908
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005909 /* Set Tx descriptors queue starting address indirect
5910 * access
5911 */
5912 if (priv->hw_version == MVPP21)
5913 txq_dma = aggr_txq->descs_dma;
5914 else
5915 txq_dma = aggr_txq->descs_dma >>
5916 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5917
5918 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Antoine Ténart85affd72017-08-23 09:46:55 +02005919 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5920 MVPP2_AGGR_TXQ_SIZE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005921
5922 return 0;
5923}
5924
5925/* Create a specified Rx queue */
5926static int mvpp2_rxq_init(struct mvpp2_port *port,
5927 struct mvpp2_rx_queue *rxq)
5928
5929{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005930 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005931 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005932
Marcin Wojtas3f518502014-07-10 16:52:13 -03005933 rxq->size = port->rx_ring_size;
5934
5935 /* Allocate memory for RX descriptors */
5936 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5937 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005938 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005939 if (!rxq->descs)
5940 return -ENOMEM;
5941
Marcin Wojtas3f518502014-07-10 16:52:13 -03005942 rxq->last_desc = rxq->size - 1;
5943
5944 /* Zero occupied and non-occupied counters - direct access */
5945 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5946
5947 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005948 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005949 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005950 if (port->priv->hw_version == MVPP21)
5951 rxq_dma = rxq->descs_dma;
5952 else
5953 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005954 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5955 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5956 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005957 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005958
5959 /* Set Offset */
5960 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5961
5962 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005963 mvpp2_rx_pkts_coal_set(port, rxq);
5964 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005965
5966 /* Add number of descriptors ready for receiving packets */
5967 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5968
5969 return 0;
5970}
5971
5972/* Push packets received by the RXQ to BM pool */
5973static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5974 struct mvpp2_rx_queue *rxq)
5975{
5976 int rx_received, i;
5977
5978 rx_received = mvpp2_rxq_received(port, rxq->id);
5979 if (!rx_received)
5980 return;
5981
5982 for (i = 0; i < rx_received; i++) {
5983 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005984 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5985 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005986
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005987 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5988 MVPP2_RXD_BM_POOL_ID_OFFS;
5989
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005990 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005991 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5992 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005993 }
5994 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5995}
5996
5997/* Cleanup Rx queue */
5998static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5999 struct mvpp2_rx_queue *rxq)
6000{
Thomas Petazzonia7868412017-03-07 16:53:13 +01006001 int cpu;
6002
Marcin Wojtas3f518502014-07-10 16:52:13 -03006003 mvpp2_rxq_drop_pkts(port, rxq);
6004
6005 if (rxq->descs)
6006 dma_free_coherent(port->dev->dev.parent,
6007 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6008 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006009 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006010
6011 rxq->descs = NULL;
6012 rxq->last_desc = 0;
6013 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006014 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006015
6016 /* Clear Rx descriptors queue starting address and size;
6017 * free descriptor number
6018 */
6019 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006020 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006021 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6022 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6023 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006024 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006025}
6026
6027/* Create and initialize a Tx queue */
6028static int mvpp2_txq_init(struct mvpp2_port *port,
6029 struct mvpp2_tx_queue *txq)
6030{
6031 u32 val;
6032 int cpu, desc, desc_per_txq, tx_port_num;
6033 struct mvpp2_txq_pcpu *txq_pcpu;
6034
6035 txq->size = port->tx_ring_size;
6036
6037 /* Allocate memory for Tx descriptors */
6038 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6039 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006040 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006041 if (!txq->descs)
6042 return -ENOMEM;
6043
Marcin Wojtas3f518502014-07-10 16:52:13 -03006044 txq->last_desc = txq->size - 1;
6045
6046 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006047 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006048 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6049 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6050 txq->descs_dma);
6051 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6052 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6053 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6054 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6055 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6056 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006057 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006058 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006059
6060 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
6061 * for each existing TXQ.
6062 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6063 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6064 */
6065 desc_per_txq = 16;
6066 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6067 (txq->log_id * desc_per_txq);
6068
Thomas Petazzonia7868412017-03-07 16:53:13 +01006069 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6070 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6071 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006072 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006073
6074 /* WRR / EJP configuration - indirect access */
6075 tx_port_num = mvpp2_egress_port(port);
6076 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6077
6078 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6079 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6080 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6081 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6082 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6083
6084 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6085 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6086 val);
6087
6088 for_each_present_cpu(cpu) {
6089 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6090 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02006091 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6092 sizeof(*txq_pcpu->buffs),
6093 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006094 if (!txq_pcpu->buffs)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006095 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006096
6097 txq_pcpu->count = 0;
6098 txq_pcpu->reserved_num = 0;
6099 txq_pcpu->txq_put_index = 0;
6100 txq_pcpu->txq_get_index = 0;
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006101 txq_pcpu->tso_headers = NULL;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006102
Antoine Tenart1d17db02017-10-30 11:23:31 +01006103 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6104 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6105
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006106 txq_pcpu->tso_headers =
6107 dma_alloc_coherent(port->dev->dev.parent,
Yan Markman822eaf72017-10-23 15:24:29 +02006108 txq_pcpu->size * TSO_HEADER_SIZE,
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006109 &txq_pcpu->tso_headers_dma,
6110 GFP_KERNEL);
6111 if (!txq_pcpu->tso_headers)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006112 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006113 }
6114
6115 return 0;
6116}
6117
6118/* Free allocated TXQ resources */
6119static void mvpp2_txq_deinit(struct mvpp2_port *port,
6120 struct mvpp2_tx_queue *txq)
6121{
6122 struct mvpp2_txq_pcpu *txq_pcpu;
6123 int cpu;
6124
6125 for_each_present_cpu(cpu) {
6126 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006127 kfree(txq_pcpu->buffs);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006128
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006129 if (txq_pcpu->tso_headers)
6130 dma_free_coherent(port->dev->dev.parent,
6131 txq_pcpu->size * TSO_HEADER_SIZE,
6132 txq_pcpu->tso_headers,
6133 txq_pcpu->tso_headers_dma);
6134
6135 txq_pcpu->tso_headers = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006136 }
6137
6138 if (txq->descs)
6139 dma_free_coherent(port->dev->dev.parent,
6140 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006141 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006142
6143 txq->descs = NULL;
6144 txq->last_desc = 0;
6145 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006146 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006147
6148 /* Set minimum bandwidth for disabled TXQs */
6149 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6150
6151 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006152 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006153 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6154 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6155 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006156 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006157}
6158
6159/* Cleanup Tx ports */
6160static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6161{
6162 struct mvpp2_txq_pcpu *txq_pcpu;
6163 int delay, pending, cpu;
6164 u32 val;
6165
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006166 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006167 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6168 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006169 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006170 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006171
6172 /* The napi queue has been stopped so wait for all packets
6173 * to be transmitted.
6174 */
6175 delay = 0;
6176 do {
6177 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6178 netdev_warn(port->dev,
6179 "port %d: cleaning queue %d timed out\n",
6180 port->id, txq->log_id);
6181 break;
6182 }
6183 mdelay(1);
6184 delay++;
6185
Thomas Petazzonia7868412017-03-07 16:53:13 +01006186 pending = mvpp2_percpu_read(port->priv, cpu,
6187 MVPP2_TXQ_PENDING_REG);
6188 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006189 } while (pending);
6190
6191 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006192 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006193 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006194
6195 for_each_present_cpu(cpu) {
6196 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6197
6198 /* Release all packets */
6199 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6200
6201 /* Reset queue */
6202 txq_pcpu->count = 0;
6203 txq_pcpu->txq_put_index = 0;
6204 txq_pcpu->txq_get_index = 0;
6205 }
6206}
6207
6208/* Cleanup all Tx queues */
6209static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6210{
6211 struct mvpp2_tx_queue *txq;
6212 int queue;
6213 u32 val;
6214
6215 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6216
6217 /* Reset Tx ports and delete Tx queues */
6218 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6219 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6220
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006221 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006222 txq = port->txqs[queue];
6223 mvpp2_txq_clean(port, txq);
6224 mvpp2_txq_deinit(port, txq);
6225 }
6226
6227 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6228
6229 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6230 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6231}
6232
6233/* Cleanup all Rx queues */
6234static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6235{
6236 int queue;
6237
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006238 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006239 mvpp2_rxq_deinit(port, port->rxqs[queue]);
6240}
6241
6242/* Init all Rx queues for port */
6243static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6244{
6245 int queue, err;
6246
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006247 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006248 err = mvpp2_rxq_init(port, port->rxqs[queue]);
6249 if (err)
6250 goto err_cleanup;
6251 }
6252 return 0;
6253
6254err_cleanup:
6255 mvpp2_cleanup_rxqs(port);
6256 return err;
6257}
6258
6259/* Init all tx queues for port */
6260static int mvpp2_setup_txqs(struct mvpp2_port *port)
6261{
6262 struct mvpp2_tx_queue *txq;
6263 int queue, err;
6264
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006265 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006266 txq = port->txqs[queue];
6267 err = mvpp2_txq_init(port, txq);
6268 if (err)
6269 goto err_cleanup;
6270 }
6271
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006272 if (port->has_tx_irqs) {
6273 mvpp2_tx_time_coal_set(port);
6274 for (queue = 0; queue < port->ntxqs; queue++) {
6275 txq = port->txqs[queue];
6276 mvpp2_tx_pkts_coal_set(port, txq);
6277 }
6278 }
6279
Marcin Wojtas3f518502014-07-10 16:52:13 -03006280 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6281 return 0;
6282
6283err_cleanup:
6284 mvpp2_cleanup_txqs(port);
6285 return err;
6286}
6287
6288/* The callback for per-port interrupt */
6289static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6290{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006291 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006292
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006293 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006294
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006295 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006296
6297 return IRQ_HANDLED;
6298}
6299
Antoine Tenartfd3651b2017-09-01 11:04:54 +02006300/* Per-port interrupt for link status changes */
6301static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6302{
6303 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6304 struct net_device *dev = port->dev;
6305 bool event = false, link = false;
6306 u32 val;
6307
6308 mvpp22_gop_mask_irq(port);
6309
6310 if (port->gop_id == 0 &&
6311 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6312 val = readl(port->base + MVPP22_XLG_INT_STAT);
6313 if (val & MVPP22_XLG_INT_STAT_LINK) {
6314 event = true;
6315 val = readl(port->base + MVPP22_XLG_STATUS);
6316 if (val & MVPP22_XLG_STATUS_LINK_UP)
6317 link = true;
6318 }
6319 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6320 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6321 val = readl(port->base + MVPP22_GMAC_INT_STAT);
6322 if (val & MVPP22_GMAC_INT_STAT_LINK) {
6323 event = true;
6324 val = readl(port->base + MVPP2_GMAC_STATUS0);
6325 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6326 link = true;
6327 }
6328 }
6329
6330 if (!netif_running(dev) || !event)
6331 goto handled;
6332
6333 if (link) {
6334 mvpp2_interrupts_enable(port);
6335
6336 mvpp2_egress_enable(port);
6337 mvpp2_ingress_enable(port);
6338 netif_carrier_on(dev);
6339 netif_tx_wake_all_queues(dev);
6340 } else {
6341 netif_tx_stop_all_queues(dev);
6342 netif_carrier_off(dev);
6343 mvpp2_ingress_disable(port);
6344 mvpp2_egress_disable(port);
6345
6346 mvpp2_interrupts_disable(port);
6347 }
6348
6349handled:
6350 mvpp22_gop_unmask_irq(port);
6351 return IRQ_HANDLED;
6352}
6353
Antoine Tenart65a2c092017-08-30 10:29:18 +02006354static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6355 struct phy_device *phydev)
6356{
6357 u32 val;
6358
6359 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6360 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6361 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6362 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6363 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6364 return;
6365
6366 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6367 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6368 MVPP2_GMAC_CONFIG_GMII_SPEED |
6369 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6370 MVPP2_GMAC_AN_SPEED_EN |
6371 MVPP2_GMAC_AN_DUPLEX_EN);
6372
6373 if (phydev->duplex)
6374 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6375
6376 if (phydev->speed == SPEED_1000)
6377 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6378 else if (phydev->speed == SPEED_100)
6379 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6380
6381 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Antoine Tenart65a2c092017-08-30 10:29:18 +02006382}
6383
Marcin Wojtas3f518502014-07-10 16:52:13 -03006384/* Adjust link */
6385static void mvpp2_link_event(struct net_device *dev)
6386{
6387 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02006388 struct phy_device *phydev = dev->phydev;
Antoine Tenart89273bc2017-08-30 10:29:19 +02006389 bool link_reconfigured = false;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006390 u32 val;
6391
6392 if (phydev->link) {
Antoine Tenart89273bc2017-08-30 10:29:19 +02006393 if (port->phy_interface != phydev->interface && port->comphy) {
6394 /* disable current port for reconfiguration */
6395 mvpp2_interrupts_disable(port);
6396 netif_carrier_off(port->dev);
6397 mvpp2_port_disable(port);
6398 phy_power_off(port->comphy);
6399
6400 /* comphy reconfiguration */
6401 port->phy_interface = phydev->interface;
6402 mvpp22_comphy_init(port);
6403
6404 /* gop/mac reconfiguration */
6405 mvpp22_gop_init(port);
6406 mvpp2_port_mii_set(port);
6407
6408 link_reconfigured = true;
6409 }
6410
Marcin Wojtas3f518502014-07-10 16:52:13 -03006411 if ((port->speed != phydev->speed) ||
6412 (port->duplex != phydev->duplex)) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006413 mvpp2_gmac_set_autoneg(port, phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006414
6415 port->duplex = phydev->duplex;
6416 port->speed = phydev->speed;
6417 }
6418 }
6419
Antoine Tenart89273bc2017-08-30 10:29:19 +02006420 if (phydev->link != port->link || link_reconfigured) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006421 port->link = phydev->link;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006422
Marcin Wojtas3f518502014-07-10 16:52:13 -03006423 if (phydev->link) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006424 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6425 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6426 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6427 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6428 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6429 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6430 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6431 MVPP2_GMAC_FORCE_LINK_DOWN);
6432 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6433 }
Antoine Tenartf55744a2017-08-30 10:29:17 +02006434
6435 mvpp2_interrupts_enable(port);
6436 mvpp2_port_enable(port);
6437
Marcin Wojtas3f518502014-07-10 16:52:13 -03006438 mvpp2_egress_enable(port);
6439 mvpp2_ingress_enable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006440 netif_carrier_on(dev);
6441 netif_tx_wake_all_queues(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006442 } else {
Antoine Tenart968b2112017-08-30 10:29:16 +02006443 port->duplex = -1;
6444 port->speed = 0;
6445
Antoine Tenartf55744a2017-08-30 10:29:17 +02006446 netif_tx_stop_all_queues(dev);
6447 netif_carrier_off(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006448 mvpp2_ingress_disable(port);
6449 mvpp2_egress_disable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006450
6451 mvpp2_port_disable(port);
6452 mvpp2_interrupts_disable(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006453 }
Antoine Tenart968b2112017-08-30 10:29:16 +02006454
Marcin Wojtas3f518502014-07-10 16:52:13 -03006455 phy_print_status(phydev);
6456 }
6457}
6458
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006459static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6460{
6461 ktime_t interval;
6462
6463 if (!port_pcpu->timer_scheduled) {
6464 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01006465 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006466 hrtimer_start(&port_pcpu->tx_done_timer, interval,
6467 HRTIMER_MODE_REL_PINNED);
6468 }
6469}
6470
6471static void mvpp2_tx_proc_cb(unsigned long data)
6472{
6473 struct net_device *dev = (struct net_device *)data;
6474 struct mvpp2_port *port = netdev_priv(dev);
6475 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6476 unsigned int tx_todo, cause;
6477
6478 if (!netif_running(dev))
6479 return;
6480 port_pcpu->timer_scheduled = false;
6481
6482 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006483 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006484 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006485
6486 /* Set the timer in case not all the packets were processed */
6487 if (tx_todo)
6488 mvpp2_timer_set(port_pcpu);
6489}
6490
6491static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6492{
6493 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6494 struct mvpp2_port_pcpu,
6495 tx_done_timer);
6496
6497 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6498
6499 return HRTIMER_NORESTART;
6500}
6501
Marcin Wojtas3f518502014-07-10 16:52:13 -03006502/* Main RX/TX processing routines */
6503
6504/* Display more error info */
6505static void mvpp2_rx_error(struct mvpp2_port *port,
6506 struct mvpp2_rx_desc *rx_desc)
6507{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006508 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6509 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006510
6511 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6512 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006513 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6514 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006515 break;
6516 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006517 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6518 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006519 break;
6520 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006521 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6522 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006523 break;
6524 }
6525}
6526
6527/* Handle RX checksum offload */
6528static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6529 struct sk_buff *skb)
6530{
6531 if (((status & MVPP2_RXD_L3_IP4) &&
6532 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6533 (status & MVPP2_RXD_L3_IP6))
6534 if (((status & MVPP2_RXD_L4_UDP) ||
6535 (status & MVPP2_RXD_L4_TCP)) &&
6536 (status & MVPP2_RXD_L4_CSUM_OK)) {
6537 skb->csum = 0;
6538 skb->ip_summed = CHECKSUM_UNNECESSARY;
6539 return;
6540 }
6541
6542 skb->ip_summed = CHECKSUM_NONE;
6543}
6544
6545/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6546static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006547 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006548{
Thomas Petazzoni20396132017-03-07 16:53:00 +01006549 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006550 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006551 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006552
Marcin Wojtas3f518502014-07-10 16:52:13 -03006553 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006554 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6555 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006556 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006557 return -ENOMEM;
6558
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006559 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01006560
Marcin Wojtas3f518502014-07-10 16:52:13 -03006561 return 0;
6562}
6563
6564/* Handle tx checksum */
6565static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6566{
6567 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6568 int ip_hdr_len = 0;
6569 u8 l4_proto;
6570
6571 if (skb->protocol == htons(ETH_P_IP)) {
6572 struct iphdr *ip4h = ip_hdr(skb);
6573
6574 /* Calculate IPv4 checksum and L4 checksum */
6575 ip_hdr_len = ip4h->ihl;
6576 l4_proto = ip4h->protocol;
6577 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6578 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6579
6580 /* Read l4_protocol from one of IPv6 extra headers */
6581 if (skb_network_header_len(skb) > 0)
6582 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6583 l4_proto = ip6h->nexthdr;
6584 } else {
6585 return MVPP2_TXD_L4_CSUM_NOT;
6586 }
6587
6588 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6589 skb->protocol, ip_hdr_len, l4_proto);
6590 }
6591
6592 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6593}
6594
Marcin Wojtas3f518502014-07-10 16:52:13 -03006595/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006596static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6597 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006598{
6599 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01006600 int rx_received;
6601 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006602 u32 rcvd_pkts = 0;
6603 u32 rcvd_bytes = 0;
6604
6605 /* Get number of received packets and clamp the to-do */
6606 rx_received = mvpp2_rxq_received(port, rxq->id);
6607 if (rx_todo > rx_received)
6608 rx_todo = rx_received;
6609
Marcin Wojtasb5015852015-12-03 15:20:51 +01006610 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006611 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6612 struct mvpp2_bm_pool *bm_pool;
6613 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006614 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006615 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006616 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006617 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006618 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006619 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006620
Marcin Wojtasb5015852015-12-03 15:20:51 +01006621 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006622 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6623 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6624 rx_bytes -= MVPP2_MH_SIZE;
6625 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6626 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6627 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006628
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006629 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6630 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006631 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03006632
6633 /* In case of an error, release the requested buffer pointer
6634 * to the Buffer Manager. This request process is controlled
6635 * by the hardware, and the information about the buffer is
6636 * comprised by the RX descriptor.
6637 */
6638 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02006639err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006640 dev->stats.rx_errors++;
6641 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006642 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006643 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006644 continue;
6645 }
6646
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006647 if (bm_pool->frag_size > PAGE_SIZE)
6648 frag_size = 0;
6649 else
6650 frag_size = bm_pool->frag_size;
6651
6652 skb = build_skb(data, frag_size);
6653 if (!skb) {
6654 netdev_warn(port->dev, "skb build failed\n");
6655 goto err_drop_frame;
6656 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006657
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006658 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006659 if (err) {
6660 netdev_err(port->dev, "failed to refill BM pools\n");
6661 goto err_drop_frame;
6662 }
6663
Thomas Petazzoni20396132017-03-07 16:53:00 +01006664 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01006665 bm_pool->buf_size, DMA_FROM_DEVICE);
6666
Marcin Wojtas3f518502014-07-10 16:52:13 -03006667 rcvd_pkts++;
6668 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006669
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006670 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006671 skb_put(skb, rx_bytes);
6672 skb->protocol = eth_type_trans(skb, dev);
6673 mvpp2_rx_csum(port, rx_status, skb);
6674
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006675 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006676 }
6677
6678 if (rcvd_pkts) {
6679 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6680
6681 u64_stats_update_begin(&stats->syncp);
6682 stats->rx_packets += rcvd_pkts;
6683 stats->rx_bytes += rcvd_bytes;
6684 u64_stats_update_end(&stats->syncp);
6685 }
6686
6687 /* Update Rx queue management counters */
6688 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01006689 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006690
6691 return rx_todo;
6692}
6693
6694static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006695tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006696 struct mvpp2_tx_desc *desc)
6697{
Antoine Tenart20920262017-10-23 15:24:30 +02006698 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6699
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006700 dma_addr_t buf_dma_addr =
6701 mvpp2_txdesc_dma_addr_get(port, desc);
6702 size_t buf_sz =
6703 mvpp2_txdesc_size_get(port, desc);
Antoine Tenart20920262017-10-23 15:24:30 +02006704 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6705 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6706 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006707 mvpp2_txq_desc_put(txq);
6708}
6709
6710/* Handle tx fragmentation processing */
6711static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6712 struct mvpp2_tx_queue *aggr_txq,
6713 struct mvpp2_tx_queue *txq)
6714{
6715 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6716 struct mvpp2_tx_desc *tx_desc;
6717 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006718 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006719
6720 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6721 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6722 void *addr = page_address(frag->page.p) + frag->page_offset;
6723
6724 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006725 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6726 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006727
Thomas Petazzoni20396132017-03-07 16:53:00 +01006728 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006729 frag->size,
6730 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006731 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006732 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02006733 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006734 }
6735
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006736 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006737
6738 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6739 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006740 mvpp2_txdesc_cmd_set(port, tx_desc,
6741 MVPP2_TXD_L_DESC);
6742 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006743 } else {
6744 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006745 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6746 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006747 }
6748 }
6749
6750 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02006751cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006752 /* Release all descriptors that were used to map fragments of
6753 * this packet, as well as the corresponding DMA mappings
6754 */
6755 for (i = i - 1; i >= 0; i--) {
6756 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006757 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006758 }
6759
6760 return -ENOMEM;
6761}
6762
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006763static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6764 struct net_device *dev,
6765 struct mvpp2_tx_queue *txq,
6766 struct mvpp2_tx_queue *aggr_txq,
6767 struct mvpp2_txq_pcpu *txq_pcpu,
6768 int hdr_sz)
6769{
6770 struct mvpp2_port *port = netdev_priv(dev);
6771 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6772 dma_addr_t addr;
6773
6774 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6775 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6776
6777 addr = txq_pcpu->tso_headers_dma +
6778 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006779 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006780
6781 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6782 MVPP2_TXD_F_DESC |
6783 MVPP2_TXD_PADDING_DISABLE);
6784 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6785}
6786
6787static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6788 struct net_device *dev, struct tso_t *tso,
6789 struct mvpp2_tx_queue *txq,
6790 struct mvpp2_tx_queue *aggr_txq,
6791 struct mvpp2_txq_pcpu *txq_pcpu,
6792 int sz, bool left, bool last)
6793{
6794 struct mvpp2_port *port = netdev_priv(dev);
6795 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6796 dma_addr_t buf_dma_addr;
6797
6798 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6799 mvpp2_txdesc_size_set(port, tx_desc, sz);
6800
6801 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6802 DMA_TO_DEVICE);
6803 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6804 mvpp2_txq_desc_put(txq);
6805 return -ENOMEM;
6806 }
6807
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006808 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006809
6810 if (!left) {
6811 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6812 if (last) {
6813 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6814 return 0;
6815 }
6816 } else {
6817 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6818 }
6819
6820 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6821 return 0;
6822}
6823
6824static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6825 struct mvpp2_tx_queue *txq,
6826 struct mvpp2_tx_queue *aggr_txq,
6827 struct mvpp2_txq_pcpu *txq_pcpu)
6828{
6829 struct mvpp2_port *port = netdev_priv(dev);
6830 struct tso_t tso;
6831 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6832 int i, len, descs = 0;
6833
6834 /* Check number of available descriptors */
6835 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6836 tso_count_descs(skb)) ||
6837 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6838 tso_count_descs(skb)))
6839 return 0;
6840
6841 tso_start(skb, &tso);
6842 len = skb->len - hdr_sz;
6843 while (len > 0) {
6844 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6845 char *hdr = txq_pcpu->tso_headers +
6846 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6847
6848 len -= left;
6849 descs++;
6850
6851 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6852 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6853
6854 while (left > 0) {
6855 int sz = min_t(int, tso.size, left);
6856 left -= sz;
6857 descs++;
6858
6859 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6860 txq_pcpu, sz, left, len == 0))
6861 goto release;
6862 tso_build_data(skb, &tso, sz);
6863 }
6864 }
6865
6866 return descs;
6867
6868release:
6869 for (i = descs - 1; i >= 0; i--) {
6870 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6871 tx_desc_unmap_put(port, txq, tx_desc);
6872 }
6873 return 0;
6874}
6875
Marcin Wojtas3f518502014-07-10 16:52:13 -03006876/* Main tx processing */
6877static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6878{
6879 struct mvpp2_port *port = netdev_priv(dev);
6880 struct mvpp2_tx_queue *txq, *aggr_txq;
6881 struct mvpp2_txq_pcpu *txq_pcpu;
6882 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006883 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006884 int frags = 0;
6885 u16 txq_id;
6886 u32 tx_cmd;
6887
6888 txq_id = skb_get_queue_mapping(skb);
6889 txq = port->txqs[txq_id];
6890 txq_pcpu = this_cpu_ptr(txq->pcpu);
6891 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6892
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006893 if (skb_is_gso(skb)) {
6894 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6895 goto out;
6896 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006897 frags = skb_shinfo(skb)->nr_frags + 1;
6898
6899 /* Check number of available descriptors */
6900 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6901 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6902 txq_pcpu, frags)) {
6903 frags = 0;
6904 goto out;
6905 }
6906
6907 /* Get a descriptor for the first part of the packet */
6908 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006909 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6910 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006911
Thomas Petazzoni20396132017-03-07 16:53:00 +01006912 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006913 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006914 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006915 mvpp2_txq_desc_put(txq);
6916 frags = 0;
6917 goto out;
6918 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006919
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006920 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006921
6922 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6923
6924 if (frags == 1) {
6925 /* First and Last descriptor */
6926 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006927 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6928 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006929 } else {
6930 /* First but not Last */
6931 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006932 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6933 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006934
6935 /* Continue with other skb fragments */
6936 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006937 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006938 frags = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006939 }
6940 }
6941
Marcin Wojtas3f518502014-07-10 16:52:13 -03006942out:
6943 if (frags > 0) {
6944 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006945 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
6946
6947 txq_pcpu->reserved_num -= frags;
6948 txq_pcpu->count += frags;
6949 aggr_txq->count += frags;
6950
6951 /* Enable transmit */
6952 wmb();
6953 mvpp2_aggr_txq_pend_desc_add(port, frags);
6954
Antoine Tenart1d17db02017-10-30 11:23:31 +01006955 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006956 netif_tx_stop_queue(nq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006957
6958 u64_stats_update_begin(&stats->syncp);
6959 stats->tx_packets++;
6960 stats->tx_bytes += skb->len;
6961 u64_stats_update_end(&stats->syncp);
6962 } else {
6963 dev->stats.tx_dropped++;
6964 dev_kfree_skb_any(skb);
6965 }
6966
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006967 /* Finalize TX processing */
Antoine Tenart082297e2017-10-23 15:24:31 +02006968 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006969 mvpp2_txq_done(port, txq, txq_pcpu);
6970
6971 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006972 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
6973 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006974 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6975
6976 mvpp2_timer_set(port_pcpu);
6977 }
6978
Marcin Wojtas3f518502014-07-10 16:52:13 -03006979 return NETDEV_TX_OK;
6980}
6981
6982static inline void mvpp2_cause_error(struct net_device *dev, int cause)
6983{
6984 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
6985 netdev_err(dev, "FCS error\n");
6986 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
6987 netdev_err(dev, "rx fifo overrun error\n");
6988 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
6989 netdev_err(dev, "tx fifo underrun error\n");
6990}
6991
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006992static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006993{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006994 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006995 int rx_done = 0;
6996 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006997 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006998 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006999
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007000 qv = container_of(napi, struct mvpp2_queue_vector, napi);
7001
Marcin Wojtas3f518502014-07-10 16:52:13 -03007002 /* Rx/Tx cause register
7003 *
7004 * Bits 0-15: each bit indicates received packets on the Rx queue
7005 * (bit 0 is for Rx queue 0).
7006 *
7007 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7008 * (bit 16 is for Tx queue 0).
7009 *
7010 * Each CPU has its own Rx/Tx cause register
7011 */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007012 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
Thomas Petazzonia7868412017-03-07 16:53:13 +01007013 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03007014
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007015 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007016 if (cause_misc) {
7017 mvpp2_cause_error(port->dev, cause_misc);
7018
7019 /* Clear the cause register */
7020 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007021 mvpp2_percpu_write(port->priv, cpu,
7022 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7023 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007024 }
7025
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007026 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7027 if (cause_tx) {
7028 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7029 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7030 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007031
7032 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007033 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7034 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007035 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007036 while (cause_rx && budget > 0) {
7037 int count;
7038 struct mvpp2_rx_queue *rxq;
7039
7040 rxq = mvpp2_get_rx_queue(port, cause_rx);
7041 if (!rxq)
7042 break;
7043
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007044 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007045 rx_done += count;
7046 budget -= count;
7047 if (budget > 0) {
7048 /* Clear the bit associated to this Rx queue
7049 * so that next iteration will continue from
7050 * the next Rx queue.
7051 */
7052 cause_rx &= ~(1 << rxq->logic_rxq);
7053 }
7054 }
7055
7056 if (budget > 0) {
7057 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08007058 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007059
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007060 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007061 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007062 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007063 return rx_done;
7064}
7065
7066/* Set hw internals when starting port */
7067static void mvpp2_start_dev(struct mvpp2_port *port)
7068{
Philippe Reynes8e072692016-06-28 00:08:11 +02007069 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007070 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007071
Stefan Chulski76eb1b12017-08-22 19:08:26 +02007072 if (port->gop_id == 0 &&
7073 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7074 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7075 mvpp2_xlg_max_rx_size_set(port);
7076 else
7077 mvpp2_gmac_max_rx_size_set(port);
7078
Marcin Wojtas3f518502014-07-10 16:52:13 -03007079 mvpp2_txp_max_tx_size_set(port);
7080
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007081 for (i = 0; i < port->nqvecs; i++)
7082 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007083
7084 /* Enable interrupts on all CPUs */
7085 mvpp2_interrupts_enable(port);
7086
Antoine Tenart542897d2017-08-30 10:29:15 +02007087 if (port->priv->hw_version == MVPP22) {
7088 mvpp22_comphy_init(port);
Antoine Ténartf84bf382017-08-22 19:08:27 +02007089 mvpp22_gop_init(port);
Antoine Tenart542897d2017-08-30 10:29:15 +02007090 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02007091
Antoine Ténart2055d622017-08-22 19:08:23 +02007092 mvpp2_port_mii_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007093 mvpp2_port_enable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007094 if (ndev->phydev)
7095 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007096 netif_tx_start_all_queues(port->dev);
7097}
7098
7099/* Set hw internals when stopping port */
7100static void mvpp2_stop_dev(struct mvpp2_port *port)
7101{
Philippe Reynes8e072692016-06-28 00:08:11 +02007102 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007103 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007104
Marcin Wojtas3f518502014-07-10 16:52:13 -03007105 /* Stop new packets from arriving to RXQs */
7106 mvpp2_ingress_disable(port);
7107
7108 mdelay(10);
7109
7110 /* Disable interrupts on all CPUs */
7111 mvpp2_interrupts_disable(port);
7112
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007113 for (i = 0; i < port->nqvecs; i++)
7114 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007115
7116 netif_carrier_off(port->dev);
7117 netif_tx_stop_all_queues(port->dev);
7118
7119 mvpp2_egress_disable(port);
7120 mvpp2_port_disable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007121 if (ndev->phydev)
7122 phy_stop(ndev->phydev);
Antoine Tenart542897d2017-08-30 10:29:15 +02007123 phy_power_off(port->comphy);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007124}
7125
Marcin Wojtas3f518502014-07-10 16:52:13 -03007126static int mvpp2_check_ringparam_valid(struct net_device *dev,
7127 struct ethtool_ringparam *ring)
7128{
7129 u16 new_rx_pending = ring->rx_pending;
7130 u16 new_tx_pending = ring->tx_pending;
7131
7132 if (ring->rx_pending == 0 || ring->tx_pending == 0)
7133 return -EINVAL;
7134
Yan Markman7cf87e42017-12-11 09:13:26 +01007135 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7136 new_rx_pending = MVPP2_MAX_RXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007137 else if (!IS_ALIGNED(ring->rx_pending, 16))
7138 new_rx_pending = ALIGN(ring->rx_pending, 16);
7139
Yan Markman7cf87e42017-12-11 09:13:26 +01007140 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7141 new_tx_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007142 else if (!IS_ALIGNED(ring->tx_pending, 32))
7143 new_tx_pending = ALIGN(ring->tx_pending, 32);
7144
Antoine Tenart76e583c2017-11-28 14:19:51 +01007145 /* The Tx ring size cannot be smaller than the minimum number of
7146 * descriptors needed for TSO.
7147 */
7148 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7149 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7150
Marcin Wojtas3f518502014-07-10 16:52:13 -03007151 if (ring->rx_pending != new_rx_pending) {
7152 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7153 ring->rx_pending, new_rx_pending);
7154 ring->rx_pending = new_rx_pending;
7155 }
7156
7157 if (ring->tx_pending != new_tx_pending) {
7158 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7159 ring->tx_pending, new_tx_pending);
7160 ring->tx_pending = new_tx_pending;
7161 }
7162
7163 return 0;
7164}
7165
Thomas Petazzoni26975822017-03-07 16:53:14 +01007166static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007167{
7168 u32 mac_addr_l, mac_addr_m, mac_addr_h;
7169
7170 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7171 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7172 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7173 addr[0] = (mac_addr_h >> 24) & 0xFF;
7174 addr[1] = (mac_addr_h >> 16) & 0xFF;
7175 addr[2] = (mac_addr_h >> 8) & 0xFF;
7176 addr[3] = mac_addr_h & 0xFF;
7177 addr[4] = mac_addr_m & 0xFF;
7178 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7179}
7180
7181static int mvpp2_phy_connect(struct mvpp2_port *port)
7182{
7183 struct phy_device *phy_dev;
7184
Antoine Tenart5997c862017-09-01 11:04:53 +02007185 /* No PHY is attached */
7186 if (!port->phy_node)
7187 return 0;
7188
Marcin Wojtas3f518502014-07-10 16:52:13 -03007189 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7190 port->phy_interface);
7191 if (!phy_dev) {
7192 netdev_err(port->dev, "cannot connect to phy\n");
7193 return -ENODEV;
7194 }
7195 phy_dev->supported &= PHY_GBIT_FEATURES;
7196 phy_dev->advertising = phy_dev->supported;
7197
Marcin Wojtas3f518502014-07-10 16:52:13 -03007198 port->link = 0;
7199 port->duplex = 0;
7200 port->speed = 0;
7201
7202 return 0;
7203}
7204
7205static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7206{
Philippe Reynes8e072692016-06-28 00:08:11 +02007207 struct net_device *ndev = port->dev;
7208
Antoine Tenart5997c862017-09-01 11:04:53 +02007209 if (!ndev->phydev)
7210 return;
7211
Philippe Reynes8e072692016-06-28 00:08:11 +02007212 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007213}
7214
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007215static int mvpp2_irqs_init(struct mvpp2_port *port)
7216{
7217 int err, i;
7218
7219 for (i = 0; i < port->nqvecs; i++) {
7220 struct mvpp2_queue_vector *qv = port->qvecs + i;
7221
Marc Zyngier13c249a2017-11-04 12:33:47 +00007222 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7223 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7224
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007225 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7226 if (err)
7227 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007228
7229 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7230 irq_set_affinity_hint(qv->irq,
7231 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007232 }
7233
7234 return 0;
7235err:
7236 for (i = 0; i < port->nqvecs; i++) {
7237 struct mvpp2_queue_vector *qv = port->qvecs + i;
7238
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007239 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007240 free_irq(qv->irq, qv);
7241 }
7242
7243 return err;
7244}
7245
7246static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7247{
7248 int i;
7249
7250 for (i = 0; i < port->nqvecs; i++) {
7251 struct mvpp2_queue_vector *qv = port->qvecs + i;
7252
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007253 irq_set_affinity_hint(qv->irq, NULL);
Marc Zyngier13c249a2017-11-04 12:33:47 +00007254 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007255 free_irq(qv->irq, qv);
7256 }
7257}
7258
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007259static void mvpp22_init_rss(struct mvpp2_port *port)
7260{
7261 struct mvpp2 *priv = port->priv;
7262 int i;
7263
7264 /* Set the table width: replace the whole classifier Rx queue number
7265 * with the ones configured in RSS table entries.
7266 */
7267 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7268 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7269
7270 /* Loop through the classifier Rx Queues and map them to a RSS table.
7271 * Map them all to the first table (0) by default.
7272 */
7273 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7274 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7275 mvpp2_write(priv, MVPP22_RSS_TABLE,
7276 MVPP22_RSS_TABLE_POINTER(0));
7277 }
7278
7279 /* Configure the first table to evenly distribute the packets across
7280 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7281 */
7282 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7283 u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7284 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7285 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7286
7287 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7288 }
7289
7290}
7291
Marcin Wojtas3f518502014-07-10 16:52:13 -03007292static int mvpp2_open(struct net_device *dev)
7293{
7294 struct mvpp2_port *port = netdev_priv(dev);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007295 struct mvpp2 *priv = port->priv;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007296 unsigned char mac_bcast[ETH_ALEN] = {
7297 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7298 int err;
7299
7300 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
7301 if (err) {
7302 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7303 return err;
7304 }
7305 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
7306 dev->dev_addr, true);
7307 if (err) {
7308 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
7309 return err;
7310 }
7311 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7312 if (err) {
7313 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7314 return err;
7315 }
7316 err = mvpp2_prs_def_flow(port);
7317 if (err) {
7318 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7319 return err;
7320 }
7321
7322 /* Allocate the Rx/Tx queues */
7323 err = mvpp2_setup_rxqs(port);
7324 if (err) {
7325 netdev_err(port->dev, "cannot allocate Rx queues\n");
7326 return err;
7327 }
7328
7329 err = mvpp2_setup_txqs(port);
7330 if (err) {
7331 netdev_err(port->dev, "cannot allocate Tx queues\n");
7332 goto err_cleanup_rxqs;
7333 }
7334
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007335 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007336 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007337 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007338 goto err_cleanup_txqs;
7339 }
7340
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007341 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7342 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7343 dev->name, port);
7344 if (err) {
7345 netdev_err(port->dev, "cannot request link IRQ %d\n",
7346 port->link_irq);
7347 goto err_free_irq;
7348 }
7349
7350 mvpp22_gop_setup_irq(port);
7351 }
7352
Marcin Wojtas3f518502014-07-10 16:52:13 -03007353 /* In default link is down */
7354 netif_carrier_off(port->dev);
7355
7356 err = mvpp2_phy_connect(port);
7357 if (err < 0)
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007358 goto err_free_link_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007359
7360 /* Unmask interrupts on all CPUs */
7361 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007362 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007363
7364 mvpp2_start_dev(port);
7365
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007366 if (priv->hw_version == MVPP22)
7367 mvpp22_init_rss(port);
7368
Miquel Raynal118d6292017-11-06 22:56:53 +01007369 /* Start hardware statistics gathering */
Miquel Raynale5c500e2017-11-08 08:59:40 +01007370 queue_delayed_work(priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01007371 MVPP2_MIB_COUNTERS_STATS_DELAY);
7372
Marcin Wojtas3f518502014-07-10 16:52:13 -03007373 return 0;
7374
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007375err_free_link_irq:
7376 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7377 free_irq(port->link_irq, port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007378err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007379 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007380err_cleanup_txqs:
7381 mvpp2_cleanup_txqs(port);
7382err_cleanup_rxqs:
7383 mvpp2_cleanup_rxqs(port);
7384 return err;
7385}
7386
7387static int mvpp2_stop(struct net_device *dev)
7388{
7389 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007390 struct mvpp2_port_pcpu *port_pcpu;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007391 struct mvpp2 *priv = port->priv;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007392 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007393
7394 mvpp2_stop_dev(port);
7395 mvpp2_phy_disconnect(port);
7396
7397 /* Mask interrupts on all CPUs */
7398 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007399 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007400
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007401 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7402 free_irq(port->link_irq, port);
7403
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007404 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007405 if (!port->has_tx_irqs) {
7406 for_each_present_cpu(cpu) {
7407 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007408
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007409 hrtimer_cancel(&port_pcpu->tx_done_timer);
7410 port_pcpu->timer_scheduled = false;
7411 tasklet_kill(&port_pcpu->tx_done_tasklet);
7412 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007413 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007414 mvpp2_cleanup_rxqs(port);
7415 mvpp2_cleanup_txqs(port);
7416
Miquel Raynale5c500e2017-11-08 08:59:40 +01007417 cancel_delayed_work_sync(&port->stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01007418
Marcin Wojtas3f518502014-07-10 16:52:13 -03007419 return 0;
7420}
7421
7422static void mvpp2_set_rx_mode(struct net_device *dev)
7423{
7424 struct mvpp2_port *port = netdev_priv(dev);
7425 struct mvpp2 *priv = port->priv;
7426 struct netdev_hw_addr *ha;
7427 int id = port->id;
7428 bool allmulti = dev->flags & IFF_ALLMULTI;
7429
Mikulas Patocka7ac8ff92018-02-11 18:10:28 -05007430retry:
Marcin Wojtas3f518502014-07-10 16:52:13 -03007431 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
7432 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
7433 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
7434
7435 /* Remove all port->id's mcast enries */
7436 mvpp2_prs_mcast_del_all(priv, id);
7437
Mikulas Patocka7ac8ff92018-02-11 18:10:28 -05007438 if (!allmulti) {
7439 netdev_for_each_mc_addr(ha, dev) {
7440 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
7441 allmulti = true;
7442 goto retry;
7443 }
7444 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007445 }
Maxime Chevallier56beda32018-02-28 10:14:13 +01007446
7447 /* Disable VLAN filtering in promiscuous mode */
7448 if (dev->flags & IFF_PROMISC)
7449 mvpp2_prs_vid_disable_filtering(port);
7450 else
7451 mvpp2_prs_vid_enable_filtering(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007452}
7453
7454static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7455{
7456 struct mvpp2_port *port = netdev_priv(dev);
7457 const struct sockaddr *addr = p;
7458 int err;
7459
7460 if (!is_valid_ether_addr(addr->sa_data)) {
7461 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02007462 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007463 }
7464
7465 if (!netif_running(dev)) {
7466 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7467 if (!err)
7468 return 0;
7469 /* Reconfigure parser to accept the original MAC address */
7470 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7471 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007472 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007473 }
7474
7475 mvpp2_stop_dev(port);
7476
7477 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7478 if (!err)
7479 goto out_start;
7480
7481 /* Reconfigure parser accept the original MAC address */
7482 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7483 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007484 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007485out_start:
7486 mvpp2_start_dev(port);
7487 mvpp2_egress_enable(port);
7488 mvpp2_ingress_enable(port);
7489 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007490log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007491 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007492 return err;
7493}
7494
7495static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7496{
7497 struct mvpp2_port *port = netdev_priv(dev);
7498 int err;
7499
Jarod Wilson57779872016-10-17 15:54:06 -04007500 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7501 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7502 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7503 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007504 }
7505
7506 if (!netif_running(dev)) {
7507 err = mvpp2_bm_update_mtu(dev, mtu);
7508 if (!err) {
7509 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7510 return 0;
7511 }
7512
7513 /* Reconfigure BM to the original MTU */
7514 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7515 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007516 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007517 }
7518
7519 mvpp2_stop_dev(port);
7520
7521 err = mvpp2_bm_update_mtu(dev, mtu);
7522 if (!err) {
7523 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7524 goto out_start;
7525 }
7526
7527 /* Reconfigure BM to the original MTU */
7528 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7529 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007530 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007531
7532out_start:
7533 mvpp2_start_dev(port);
7534 mvpp2_egress_enable(port);
7535 mvpp2_ingress_enable(port);
7536
7537 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007538log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007539 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007540 return err;
7541}
7542
stephen hemmingerbc1f4472017-01-06 19:12:52 -08007543static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03007544mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7545{
7546 struct mvpp2_port *port = netdev_priv(dev);
7547 unsigned int start;
7548 int cpu;
7549
7550 for_each_possible_cpu(cpu) {
7551 struct mvpp2_pcpu_stats *cpu_stats;
7552 u64 rx_packets;
7553 u64 rx_bytes;
7554 u64 tx_packets;
7555 u64 tx_bytes;
7556
7557 cpu_stats = per_cpu_ptr(port->stats, cpu);
7558 do {
7559 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7560 rx_packets = cpu_stats->rx_packets;
7561 rx_bytes = cpu_stats->rx_bytes;
7562 tx_packets = cpu_stats->tx_packets;
7563 tx_bytes = cpu_stats->tx_bytes;
7564 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7565
7566 stats->rx_packets += rx_packets;
7567 stats->rx_bytes += rx_bytes;
7568 stats->tx_packets += tx_packets;
7569 stats->tx_bytes += tx_bytes;
7570 }
7571
7572 stats->rx_errors = dev->stats.rx_errors;
7573 stats->rx_dropped = dev->stats.rx_dropped;
7574 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007575}
7576
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007577static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7578{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007579 int ret;
7580
Philippe Reynes8e072692016-06-28 00:08:11 +02007581 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007582 return -ENOTSUPP;
7583
Philippe Reynes8e072692016-06-28 00:08:11 +02007584 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007585 if (!ret)
7586 mvpp2_link_event(dev);
7587
7588 return ret;
7589}
7590
Maxime Chevallier56beda32018-02-28 10:14:13 +01007591static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7592{
7593 struct mvpp2_port *port = netdev_priv(dev);
7594 int ret;
7595
7596 ret = mvpp2_prs_vid_entry_add(port, vid);
7597 if (ret)
7598 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7599 MVPP2_PRS_VLAN_FILT_MAX - 1);
7600 return ret;
7601}
7602
7603static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7604{
7605 struct mvpp2_port *port = netdev_priv(dev);
7606
7607 mvpp2_prs_vid_entry_remove(port, vid);
7608 return 0;
7609}
7610
7611static int mvpp2_set_features(struct net_device *dev,
7612 netdev_features_t features)
7613{
7614 netdev_features_t changed = dev->features ^ features;
7615 struct mvpp2_port *port = netdev_priv(dev);
7616
7617 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7618 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7619 mvpp2_prs_vid_enable_filtering(port);
7620 } else {
7621 /* Invalidate all registered VID filters for this
7622 * port
7623 */
7624 mvpp2_prs_vid_remove_all(port);
7625
7626 mvpp2_prs_vid_disable_filtering(port);
7627 }
7628 }
7629
7630 return 0;
7631}
7632
Marcin Wojtas3f518502014-07-10 16:52:13 -03007633/* Ethtool methods */
7634
Marcin Wojtas3f518502014-07-10 16:52:13 -03007635/* Set interrupt coalescing for ethtools */
7636static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7637 struct ethtool_coalesce *c)
7638{
7639 struct mvpp2_port *port = netdev_priv(dev);
7640 int queue;
7641
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007642 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007643 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7644
7645 rxq->time_coal = c->rx_coalesce_usecs;
7646 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01007647 mvpp2_rx_pkts_coal_set(port, rxq);
7648 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007649 }
7650
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007651 if (port->has_tx_irqs) {
7652 port->tx_time_coal = c->tx_coalesce_usecs;
7653 mvpp2_tx_time_coal_set(port);
7654 }
7655
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007656 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007657 struct mvpp2_tx_queue *txq = port->txqs[queue];
7658
7659 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007660
7661 if (port->has_tx_irqs)
7662 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007663 }
7664
Marcin Wojtas3f518502014-07-10 16:52:13 -03007665 return 0;
7666}
7667
7668/* get coalescing for ethtools */
7669static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7670 struct ethtool_coalesce *c)
7671{
7672 struct mvpp2_port *port = netdev_priv(dev);
7673
Antoine Tenart385c2842017-12-11 09:13:27 +01007674 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7675 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7676 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
Antoine Tenart24b28cc2017-12-11 09:13:28 +01007677 c->tx_coalesce_usecs = port->tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007678 return 0;
7679}
7680
7681static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7682 struct ethtool_drvinfo *drvinfo)
7683{
7684 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7685 sizeof(drvinfo->driver));
7686 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7687 sizeof(drvinfo->version));
7688 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7689 sizeof(drvinfo->bus_info));
7690}
7691
7692static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7693 struct ethtool_ringparam *ring)
7694{
7695 struct mvpp2_port *port = netdev_priv(dev);
7696
Yan Markman7cf87e42017-12-11 09:13:26 +01007697 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7698 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007699 ring->rx_pending = port->rx_ring_size;
7700 ring->tx_pending = port->tx_ring_size;
7701}
7702
7703static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7704 struct ethtool_ringparam *ring)
7705{
7706 struct mvpp2_port *port = netdev_priv(dev);
7707 u16 prev_rx_ring_size = port->rx_ring_size;
7708 u16 prev_tx_ring_size = port->tx_ring_size;
7709 int err;
7710
7711 err = mvpp2_check_ringparam_valid(dev, ring);
7712 if (err)
7713 return err;
7714
7715 if (!netif_running(dev)) {
7716 port->rx_ring_size = ring->rx_pending;
7717 port->tx_ring_size = ring->tx_pending;
7718 return 0;
7719 }
7720
7721 /* The interface is running, so we have to force a
7722 * reallocation of the queues
7723 */
7724 mvpp2_stop_dev(port);
7725 mvpp2_cleanup_rxqs(port);
7726 mvpp2_cleanup_txqs(port);
7727
7728 port->rx_ring_size = ring->rx_pending;
7729 port->tx_ring_size = ring->tx_pending;
7730
7731 err = mvpp2_setup_rxqs(port);
7732 if (err) {
7733 /* Reallocate Rx queues with the original ring size */
7734 port->rx_ring_size = prev_rx_ring_size;
7735 ring->rx_pending = prev_rx_ring_size;
7736 err = mvpp2_setup_rxqs(port);
7737 if (err)
7738 goto err_out;
7739 }
7740 err = mvpp2_setup_txqs(port);
7741 if (err) {
7742 /* Reallocate Tx queues with the original ring size */
7743 port->tx_ring_size = prev_tx_ring_size;
7744 ring->tx_pending = prev_tx_ring_size;
7745 err = mvpp2_setup_txqs(port);
7746 if (err)
7747 goto err_clean_rxqs;
7748 }
7749
7750 mvpp2_start_dev(port);
7751 mvpp2_egress_enable(port);
7752 mvpp2_ingress_enable(port);
7753
7754 return 0;
7755
7756err_clean_rxqs:
7757 mvpp2_cleanup_rxqs(port);
7758err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02007759 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007760 return err;
7761}
7762
7763/* Device ops */
7764
7765static const struct net_device_ops mvpp2_netdev_ops = {
7766 .ndo_open = mvpp2_open,
7767 .ndo_stop = mvpp2_stop,
7768 .ndo_start_xmit = mvpp2_tx,
7769 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7770 .ndo_set_mac_address = mvpp2_set_mac_address,
7771 .ndo_change_mtu = mvpp2_change_mtu,
7772 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007773 .ndo_do_ioctl = mvpp2_ioctl,
Maxime Chevallier56beda32018-02-28 10:14:13 +01007774 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
7775 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
7776 .ndo_set_features = mvpp2_set_features,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007777};
7778
7779static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08007780 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007781 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007782 .set_coalesce = mvpp2_ethtool_set_coalesce,
7783 .get_coalesce = mvpp2_ethtool_get_coalesce,
7784 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7785 .get_ringparam = mvpp2_ethtool_get_ringparam,
7786 .set_ringparam = mvpp2_ethtool_set_ringparam,
Miquel Raynal118d6292017-11-06 22:56:53 +01007787 .get_strings = mvpp2_ethtool_get_strings,
7788 .get_ethtool_stats = mvpp2_ethtool_get_stats,
7789 .get_sset_count = mvpp2_ethtool_get_sset_count,
Philippe Reynesfb773e92016-06-28 00:08:12 +02007790 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7791 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007792};
7793
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007794/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7795 * had a single IRQ defined per-port.
7796 */
7797static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7798 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007799{
7800 struct mvpp2_queue_vector *v = &port->qvecs[0];
7801
7802 v->first_rxq = 0;
7803 v->nrxqs = port->nrxqs;
7804 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7805 v->sw_thread_id = 0;
7806 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7807 v->port = port;
7808 v->irq = irq_of_parse_and_map(port_node, 0);
7809 if (v->irq <= 0)
7810 return -EINVAL;
7811 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7812 NAPI_POLL_WEIGHT);
7813
7814 port->nqvecs = 1;
7815
7816 return 0;
7817}
7818
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007819static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7820 struct device_node *port_node)
7821{
7822 struct mvpp2_queue_vector *v;
7823 int i, ret;
7824
7825 port->nqvecs = num_possible_cpus();
7826 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7827 port->nqvecs += 1;
7828
7829 for (i = 0; i < port->nqvecs; i++) {
7830 char irqname[16];
7831
7832 v = port->qvecs + i;
7833
7834 v->port = port;
7835 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7836 v->sw_thread_id = i;
7837 v->sw_thread_mask = BIT(i);
7838
7839 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7840
7841 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7842 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7843 v->nrxqs = MVPP2_DEFAULT_RXQ;
7844 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7845 i == (port->nqvecs - 1)) {
7846 v->first_rxq = 0;
7847 v->nrxqs = port->nrxqs;
7848 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7849 strncpy(irqname, "rx-shared", sizeof(irqname));
7850 }
7851
Marcin Wojtasa75edc72018-01-18 13:31:44 +01007852 if (port_node)
7853 v->irq = of_irq_get_byname(port_node, irqname);
7854 else
7855 v->irq = fwnode_irq_get(port->fwnode, i);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007856 if (v->irq <= 0) {
7857 ret = -EINVAL;
7858 goto err;
7859 }
7860
7861 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7862 NAPI_POLL_WEIGHT);
7863 }
7864
7865 return 0;
7866
7867err:
7868 for (i = 0; i < port->nqvecs; i++)
7869 irq_dispose_mapping(port->qvecs[i].irq);
7870 return ret;
7871}
7872
7873static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7874 struct device_node *port_node)
7875{
7876 if (port->has_tx_irqs)
7877 return mvpp2_multi_queue_vectors_init(port, port_node);
7878 else
7879 return mvpp2_simple_queue_vectors_init(port, port_node);
7880}
7881
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007882static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7883{
7884 int i;
7885
7886 for (i = 0; i < port->nqvecs; i++)
7887 irq_dispose_mapping(port->qvecs[i].irq);
7888}
7889
7890/* Configure Rx queue group interrupt for this port */
7891static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7892{
7893 struct mvpp2 *priv = port->priv;
7894 u32 val;
7895 int i;
7896
7897 if (priv->hw_version == MVPP21) {
7898 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7899 port->nrxqs);
7900 return;
7901 }
7902
7903 /* Handle the more complicated PPv2.2 case */
7904 for (i = 0; i < port->nqvecs; i++) {
7905 struct mvpp2_queue_vector *qv = port->qvecs + i;
7906
7907 if (!qv->nrxqs)
7908 continue;
7909
7910 val = qv->sw_thread_id;
7911 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7912 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7913
7914 val = qv->first_rxq;
7915 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7916 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
7917 }
7918}
7919
Marcin Wojtas3f518502014-07-10 16:52:13 -03007920/* Initialize port HW */
7921static int mvpp2_port_init(struct mvpp2_port *port)
7922{
7923 struct device *dev = port->dev->dev.parent;
7924 struct mvpp2 *priv = port->priv;
7925 struct mvpp2_txq_pcpu *txq_pcpu;
7926 int queue, cpu, err;
7927
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007928 /* Checks for hardware constraints */
7929 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007930 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007931 return -EINVAL;
7932
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007933 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
7934 (port->ntxqs > MVPP2_MAX_TXQ))
7935 return -EINVAL;
7936
Marcin Wojtas3f518502014-07-10 16:52:13 -03007937 /* Disable port */
7938 mvpp2_egress_disable(port);
7939 mvpp2_port_disable(port);
7940
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007941 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
7942
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007943 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007944 GFP_KERNEL);
7945 if (!port->txqs)
7946 return -ENOMEM;
7947
7948 /* Associate physical Tx queues to this port and initialize.
7949 * The mapping is predefined.
7950 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007951 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007952 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
7953 struct mvpp2_tx_queue *txq;
7954
7955 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01007956 if (!txq) {
7957 err = -ENOMEM;
7958 goto err_free_percpu;
7959 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007960
7961 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
7962 if (!txq->pcpu) {
7963 err = -ENOMEM;
7964 goto err_free_percpu;
7965 }
7966
7967 txq->id = queue_phy_id;
7968 txq->log_id = queue;
7969 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
7970 for_each_present_cpu(cpu) {
7971 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
7972 txq_pcpu->cpu = cpu;
7973 }
7974
7975 port->txqs[queue] = txq;
7976 }
7977
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007978 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007979 GFP_KERNEL);
7980 if (!port->rxqs) {
7981 err = -ENOMEM;
7982 goto err_free_percpu;
7983 }
7984
7985 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007986 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007987 struct mvpp2_rx_queue *rxq;
7988
7989 /* Map physical Rx queue to port's logical Rx queue */
7990 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08007991 if (!rxq) {
7992 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007993 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08007994 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007995 /* Map this Rx queue to a physical queue */
7996 rxq->id = port->first_rxq + queue;
7997 rxq->port = port->id;
7998 rxq->logic_rxq = queue;
7999
8000 port->rxqs[queue] = rxq;
8001 }
8002
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008003 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008004
8005 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008006 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008007 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8008
8009 rxq->size = port->rx_ring_size;
8010 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8011 rxq->time_coal = MVPP2_RX_COAL_USEC;
8012 }
8013
8014 mvpp2_ingress_disable(port);
8015
8016 /* Port default configuration */
8017 mvpp2_defaults_set(port);
8018
8019 /* Port's classifier configuration */
8020 mvpp2_cls_oversize_rxq_set(port);
8021 mvpp2_cls_port_config(port);
8022
8023 /* Provide an initial Rx packet size */
8024 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8025
8026 /* Initialize pools for swf */
8027 err = mvpp2_swf_bm_pool_init(port);
8028 if (err)
8029 goto err_free_percpu;
8030
8031 return 0;
8032
8033err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008034 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008035 if (!port->txqs[queue])
8036 continue;
8037 free_percpu(port->txqs[queue]->pcpu);
8038 }
8039 return err;
8040}
8041
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008042/* Checks if the port DT description has the TX interrupts
8043 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8044 * there are available, but we need to keep support for old DTs.
8045 */
8046static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8047 struct device_node *port_node)
8048{
8049 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8050 "tx-cpu2", "tx-cpu3" };
8051 int ret, i;
8052
8053 if (priv->hw_version == MVPP21)
8054 return false;
8055
8056 for (i = 0; i < 5; i++) {
8057 ret = of_property_match_string(port_node, "interrupt-names",
8058 irqs[i]);
8059 if (ret < 0)
8060 return false;
8061 }
8062
8063 return true;
8064}
8065
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008066static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
Marcin Wojtas24812222018-01-18 13:31:43 +01008067 struct fwnode_handle *fwnode,
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008068 char **mac_from)
8069{
8070 struct mvpp2_port *port = netdev_priv(dev);
8071 char hw_mac_addr[ETH_ALEN] = {0};
Marcin Wojtas24812222018-01-18 13:31:43 +01008072 char fw_mac_addr[ETH_ALEN];
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008073
Marcin Wojtas24812222018-01-18 13:31:43 +01008074 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8075 *mac_from = "firmware node";
8076 ether_addr_copy(dev->dev_addr, fw_mac_addr);
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008077 return;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008078 }
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008079
8080 if (priv->hw_version == MVPP21) {
8081 mvpp21_get_mac_address(port, hw_mac_addr);
8082 if (is_valid_ether_addr(hw_mac_addr)) {
8083 *mac_from = "hardware";
8084 ether_addr_copy(dev->dev_addr, hw_mac_addr);
8085 return;
8086 }
8087 }
8088
8089 *mac_from = "random";
8090 eth_hw_addr_random(dev);
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008091}
8092
Marcin Wojtas3f518502014-07-10 16:52:13 -03008093/* Ports initialization */
8094static int mvpp2_port_probe(struct platform_device *pdev,
Marcin Wojtas24812222018-01-18 13:31:43 +01008095 struct fwnode_handle *port_fwnode,
Marcin Wojtasbf147152018-01-18 13:31:42 +01008096 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008097{
8098 struct device_node *phy_node;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008099 struct phy *comphy = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008100 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008101 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas24812222018-01-18 13:31:43 +01008102 struct device_node *port_node = to_of_node(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008103 struct net_device *dev;
8104 struct resource *res;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008105 char *mac_from = "";
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008106 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008107 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008108 u32 id;
8109 int features;
8110 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008111 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008112
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008113 if (port_node) {
8114 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8115 } else {
8116 has_tx_irqs = true;
8117 queue_mode = MVPP2_QDIST_MULTI_MODE;
8118 }
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008119
8120 if (!has_tx_irqs)
8121 queue_mode = MVPP2_QDIST_SINGLE_MODE;
8122
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008123 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008124 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8125 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8126 else
8127 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008128
8129 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008130 if (!dev)
8131 return -ENOMEM;
8132
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008133 if (port_node)
8134 phy_node = of_parse_phandle(port_node, "phy", 0);
8135 else
8136 phy_node = NULL;
8137
Marcin Wojtas24812222018-01-18 13:31:43 +01008138 phy_mode = fwnode_get_phy_mode(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008139 if (phy_mode < 0) {
8140 dev_err(&pdev->dev, "incorrect phy mode\n");
8141 err = phy_mode;
8142 goto err_free_netdev;
8143 }
8144
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008145 if (port_node) {
8146 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8147 if (IS_ERR(comphy)) {
8148 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8149 err = -EPROBE_DEFER;
8150 goto err_free_netdev;
8151 }
8152 comphy = NULL;
Antoine Tenart542897d2017-08-30 10:29:15 +02008153 }
Antoine Tenart542897d2017-08-30 10:29:15 +02008154 }
8155
Marcin Wojtas24812222018-01-18 13:31:43 +01008156 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008157 err = -EINVAL;
8158 dev_err(&pdev->dev, "missing port-id value\n");
8159 goto err_free_netdev;
8160 }
8161
Yan Markman7cf87e42017-12-11 09:13:26 +01008162 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008163 dev->watchdog_timeo = 5 * HZ;
8164 dev->netdev_ops = &mvpp2_netdev_ops;
8165 dev->ethtool_ops = &mvpp2_eth_tool_ops;
8166
8167 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008168 port->dev = dev;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008169 port->fwnode = port_fwnode;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008170 port->ntxqs = ntxqs;
8171 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008172 port->priv = priv;
8173 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008174
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008175 err = mvpp2_queue_vectors_init(port, port_node);
8176 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008177 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008178
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008179 if (port_node)
8180 port->link_irq = of_irq_get_byname(port_node, "link");
8181 else
8182 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008183 if (port->link_irq == -EPROBE_DEFER) {
8184 err = -EPROBE_DEFER;
8185 goto err_deinit_qvecs;
8186 }
8187 if (port->link_irq <= 0)
8188 /* the link irq is optional */
8189 port->link_irq = 0;
8190
Marcin Wojtas24812222018-01-18 13:31:43 +01008191 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
Marcin Wojtas3f518502014-07-10 16:52:13 -03008192 port->flags |= MVPP2_F_LOOPBACK;
8193
Marcin Wojtas3f518502014-07-10 16:52:13 -03008194 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008195 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008196 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008197 else
8198 port->first_rxq = port->id * priv->max_port_rxqs;
8199
Marcin Wojtas3f518502014-07-10 16:52:13 -03008200 port->phy_node = phy_node;
8201 port->phy_interface = phy_mode;
Antoine Tenart542897d2017-08-30 10:29:15 +02008202 port->comphy = comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008203
Thomas Petazzonia7868412017-03-07 16:53:13 +01008204 if (priv->hw_version == MVPP21) {
8205 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8206 port->base = devm_ioremap_resource(&pdev->dev, res);
8207 if (IS_ERR(port->base)) {
8208 err = PTR_ERR(port->base);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008209 goto err_free_irq;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008210 }
Miquel Raynal118d6292017-11-06 22:56:53 +01008211
8212 port->stats_base = port->priv->lms_base +
8213 MVPP21_MIB_COUNTERS_OFFSET +
8214 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008215 } else {
Marcin Wojtas24812222018-01-18 13:31:43 +01008216 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8217 &port->gop_id)) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008218 err = -EINVAL;
8219 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008220 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008221 }
8222
8223 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Miquel Raynal118d6292017-11-06 22:56:53 +01008224 port->stats_base = port->priv->iface_base +
8225 MVPP22_MIB_COUNTERS_OFFSET +
8226 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008227 }
8228
Miquel Raynal118d6292017-11-06 22:56:53 +01008229 /* Alloc per-cpu and ethtool stats */
Marcin Wojtas3f518502014-07-10 16:52:13 -03008230 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8231 if (!port->stats) {
8232 err = -ENOMEM;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008233 goto err_free_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008234 }
8235
Miquel Raynal118d6292017-11-06 22:56:53 +01008236 port->ethtool_stats = devm_kcalloc(&pdev->dev,
8237 ARRAY_SIZE(mvpp2_ethtool_regs),
8238 sizeof(u64), GFP_KERNEL);
8239 if (!port->ethtool_stats) {
8240 err = -ENOMEM;
8241 goto err_free_stats;
8242 }
8243
Miquel Raynale5c500e2017-11-08 08:59:40 +01008244 mutex_init(&port->gather_stats_lock);
8245 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8246
Marcin Wojtas24812222018-01-18 13:31:43 +01008247 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008248
Yan Markman7cf87e42017-12-11 09:13:26 +01008249 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8250 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008251 SET_NETDEV_DEV(dev, &pdev->dev);
8252
8253 err = mvpp2_port_init(port);
8254 if (err < 0) {
8255 dev_err(&pdev->dev, "failed to init port %d\n", id);
8256 goto err_free_stats;
8257 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01008258
Thomas Petazzoni26975822017-03-07 16:53:14 +01008259 mvpp2_port_periodic_xon_disable(port);
8260
8261 if (priv->hw_version == MVPP21)
8262 mvpp2_port_fc_adv_enable(port);
8263
8264 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008265
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008266 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8267 if (!port->pcpu) {
8268 err = -ENOMEM;
8269 goto err_free_txq_pcpu;
8270 }
8271
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008272 if (!port->has_tx_irqs) {
8273 for_each_present_cpu(cpu) {
8274 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008275
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008276 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8277 HRTIMER_MODE_REL_PINNED);
8278 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8279 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008280
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008281 tasklet_init(&port_pcpu->tx_done_tasklet,
8282 mvpp2_tx_proc_cb,
8283 (unsigned long)dev);
8284 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008285 }
8286
Antoine Ténart186cd4d2017-08-23 09:46:56 +02008287 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008288 dev->features = features | NETIF_F_RXCSUM;
Maxime Chevallier56beda32018-02-28 10:14:13 +01008289 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8290 NETIF_F_HW_VLAN_CTAG_FILTER;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008291 dev->vlan_features |= features;
Antoine Tenart1d17db02017-10-30 11:23:31 +01008292 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008293
Jarod Wilson57779872016-10-17 15:54:06 -04008294 /* MTU range: 68 - 9676 */
8295 dev->min_mtu = ETH_MIN_MTU;
8296 /* 9676 == 9700 - 20 and rounding to 8 */
8297 dev->max_mtu = 9676;
8298
Marcin Wojtas3f518502014-07-10 16:52:13 -03008299 err = register_netdev(dev);
8300 if (err < 0) {
8301 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008302 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008303 }
8304 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8305
Marcin Wojtasbf147152018-01-18 13:31:42 +01008306 priv->port_list[priv->port_count++] = port;
8307
Marcin Wojtas3f518502014-07-10 16:52:13 -03008308 return 0;
8309
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008310err_free_port_pcpu:
8311 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008312err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008313 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008314 free_percpu(port->txqs[i]->pcpu);
8315err_free_stats:
8316 free_percpu(port->stats);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008317err_free_irq:
8318 if (port->link_irq)
8319 irq_dispose_mapping(port->link_irq);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008320err_deinit_qvecs:
8321 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008322err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08008323 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008324 free_netdev(dev);
8325 return err;
8326}
8327
8328/* Ports removal routine */
8329static void mvpp2_port_remove(struct mvpp2_port *port)
8330{
8331 int i;
8332
8333 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08008334 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008335 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008336 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008337 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008338 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008339 mvpp2_queue_vectors_deinit(port);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008340 if (port->link_irq)
8341 irq_dispose_mapping(port->link_irq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008342 free_netdev(port->dev);
8343}
8344
8345/* Initialize decoding windows */
8346static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8347 struct mvpp2 *priv)
8348{
8349 u32 win_enable;
8350 int i;
8351
8352 for (i = 0; i < 6; i++) {
8353 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8354 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8355
8356 if (i < 4)
8357 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8358 }
8359
8360 win_enable = 0;
8361
8362 for (i = 0; i < dram->num_cs; i++) {
8363 const struct mbus_dram_window *cs = dram->cs + i;
8364
8365 mvpp2_write(priv, MVPP2_WIN_BASE(i),
8366 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8367 dram->mbus_dram_target_id);
8368
8369 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8370 (cs->size - 1) & 0xffff0000);
8371
8372 win_enable |= (1 << i);
8373 }
8374
8375 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8376}
8377
8378/* Initialize Rx FIFO's */
8379static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8380{
8381 int port;
8382
8383 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8384 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008385 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008386 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008387 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8388 }
8389
8390 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8391 MVPP2_RX_FIFO_PORT_MIN_PKT);
8392 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8393}
8394
8395static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8396{
8397 int port;
8398
8399 /* The FIFO size parameters are set depending on the maximum speed a
8400 * given port can handle:
8401 * - Port 0: 10Gbps
8402 * - Port 1: 2.5Gbps
8403 * - Ports 2 and 3: 1Gbps
8404 */
8405
8406 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8407 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8408 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8409 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8410
8411 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8412 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8413 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8414 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8415
8416 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8417 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8418 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8419 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8420 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008421 }
8422
8423 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8424 MVPP2_RX_FIFO_PORT_MIN_PKT);
8425 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8426}
8427
Antoine Tenart7c10f972017-10-30 11:23:29 +01008428/* Initialize Tx FIFO's */
8429static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8430{
8431 int port;
8432
8433 for (port = 0; port < MVPP2_MAX_PORTS; port++)
8434 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port),
8435 MVPP22_TX_FIFO_DATA_SIZE_3KB);
8436}
8437
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008438static void mvpp2_axi_init(struct mvpp2 *priv)
8439{
8440 u32 val, rdval, wrval;
8441
8442 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8443
8444 /* AXI Bridge Configuration */
8445
8446 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8447 << MVPP22_AXI_ATTR_CACHE_OFFS;
8448 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8449 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8450
8451 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8452 << MVPP22_AXI_ATTR_CACHE_OFFS;
8453 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8454 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8455
8456 /* BM */
8457 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8458 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8459
8460 /* Descriptors */
8461 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8462 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8463 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8464 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8465
8466 /* Buffer Data */
8467 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8468 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8469
8470 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8471 << MVPP22_AXI_CODE_CACHE_OFFS;
8472 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8473 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8474 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8475 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8476
8477 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8478 << MVPP22_AXI_CODE_CACHE_OFFS;
8479 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8480 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8481
8482 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8483
8484 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8485 << MVPP22_AXI_CODE_CACHE_OFFS;
8486 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8487 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8488
8489 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8490}
8491
Marcin Wojtas3f518502014-07-10 16:52:13 -03008492/* Initialize network controller common part HW */
8493static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8494{
8495 const struct mbus_dram_target_info *dram_target_info;
8496 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03008497 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008498
Marcin Wojtas3f518502014-07-10 16:52:13 -03008499 /* MBUS windows configuration */
8500 dram_target_info = mv_mbus_dram_info();
8501 if (dram_target_info)
8502 mvpp2_conf_mbus_windows(dram_target_info, priv);
8503
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008504 if (priv->hw_version == MVPP22)
8505 mvpp2_axi_init(priv);
8506
Marcin Wojtas08a23752014-07-21 13:48:12 -03008507 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01008508 if (priv->hw_version == MVPP21) {
8509 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8510 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8511 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8512 } else {
8513 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8514 val &= ~MVPP22_SMI_POLLING_EN;
8515 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8516 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03008517
Marcin Wojtas3f518502014-07-10 16:52:13 -03008518 /* Allocate and initialize aggregated TXQs */
8519 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02008520 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008521 GFP_KERNEL);
8522 if (!priv->aggr_txqs)
8523 return -ENOMEM;
8524
8525 for_each_present_cpu(i) {
8526 priv->aggr_txqs[i].id = i;
8527 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
Antoine Ténart85affd72017-08-23 09:46:55 +02008528 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008529 if (err < 0)
8530 return err;
8531 }
8532
Antoine Tenart7c10f972017-10-30 11:23:29 +01008533 /* Fifo Init */
8534 if (priv->hw_version == MVPP21) {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008535 mvpp2_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008536 } else {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008537 mvpp22_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008538 mvpp22_tx_fifo_init(priv);
8539 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008540
Thomas Petazzoni26975822017-03-07 16:53:14 +01008541 if (priv->hw_version == MVPP21)
8542 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8543 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008544
8545 /* Allow cache snoop when transmiting packets */
8546 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8547
8548 /* Buffer Manager initialization */
8549 err = mvpp2_bm_init(pdev, priv);
8550 if (err < 0)
8551 return err;
8552
8553 /* Parser default initialization */
8554 err = mvpp2_prs_default_init(pdev, priv);
8555 if (err < 0)
8556 return err;
8557
8558 /* Classifier default initialization */
8559 mvpp2_cls_init(priv);
8560
8561 return 0;
8562}
8563
8564static int mvpp2_probe(struct platform_device *pdev)
8565{
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008566 const struct acpi_device_id *acpi_id;
Marcin Wojtas24812222018-01-18 13:31:43 +01008567 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8568 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008569 struct mvpp2 *priv;
8570 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008571 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +01008572 int i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008573 int err;
8574
Markus Elfring0b92e592017-04-17 08:38:32 +02008575 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008576 if (!priv)
8577 return -ENOMEM;
8578
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008579 if (has_acpi_companion(&pdev->dev)) {
8580 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8581 &pdev->dev);
8582 priv->hw_version = (unsigned long)acpi_id->driver_data;
8583 } else {
8584 priv->hw_version =
8585 (unsigned long)of_device_get_match_data(&pdev->dev);
8586 }
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008587
Marcin Wojtas3f518502014-07-10 16:52:13 -03008588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01008589 base = devm_ioremap_resource(&pdev->dev, res);
8590 if (IS_ERR(base))
8591 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008592
Thomas Petazzonia7868412017-03-07 16:53:13 +01008593 if (priv->hw_version == MVPP21) {
8594 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8595 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8596 if (IS_ERR(priv->lms_base))
8597 return PTR_ERR(priv->lms_base);
8598 } else {
8599 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008600 if (has_acpi_companion(&pdev->dev)) {
8601 /* In case the MDIO memory region is declared in
8602 * the ACPI, it can already appear as 'in-use'
8603 * in the OS. Because it is overlapped by second
8604 * region of the network controller, make
8605 * sure it is released, before requesting it again.
8606 * The care is taken by mvpp2 driver to avoid
8607 * concurrent access to this memory region.
8608 */
8609 release_resource(res);
8610 }
Thomas Petazzonia7868412017-03-07 16:53:13 +01008611 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8612 if (IS_ERR(priv->iface_base))
8613 return PTR_ERR(priv->iface_base);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008614 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02008615
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008616 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
Antoine Ténartf84bf382017-08-22 19:08:27 +02008617 priv->sysctrl_base =
8618 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8619 "marvell,system-controller");
8620 if (IS_ERR(priv->sysctrl_base))
8621 /* The system controller regmap is optional for dt
8622 * compatibility reasons. When not provided, the
8623 * configuration of the GoP relies on the
8624 * firmware/bootloader.
8625 */
8626 priv->sysctrl_base = NULL;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008627 }
8628
Stefan Chulski01d04932018-03-05 15:16:50 +01008629 mvpp2_setup_bm_pool();
8630
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008631 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008632 u32 addr_space_sz;
8633
8634 addr_space_sz = (priv->hw_version == MVPP21 ?
8635 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008636 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008637 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008638
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008639 if (priv->hw_version == MVPP21)
8640 priv->max_port_rxqs = 8;
8641 else
8642 priv->max_port_rxqs = 32;
8643
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008644 if (dev_of_node(&pdev->dev)) {
8645 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8646 if (IS_ERR(priv->pp_clk))
8647 return PTR_ERR(priv->pp_clk);
8648 err = clk_prepare_enable(priv->pp_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008649 if (err < 0)
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008650 return err;
8651
8652 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8653 if (IS_ERR(priv->gop_clk)) {
8654 err = PTR_ERR(priv->gop_clk);
8655 goto err_pp_clk;
8656 }
8657 err = clk_prepare_enable(priv->gop_clk);
8658 if (err < 0)
8659 goto err_pp_clk;
8660
8661 if (priv->hw_version == MVPP22) {
8662 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8663 if (IS_ERR(priv->mg_clk)) {
8664 err = PTR_ERR(priv->mg_clk);
8665 goto err_gop_clk;
8666 }
8667
8668 err = clk_prepare_enable(priv->mg_clk);
8669 if (err < 0)
8670 goto err_gop_clk;
8671 }
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008672
8673 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8674 if (IS_ERR(priv->axi_clk)) {
8675 err = PTR_ERR(priv->axi_clk);
8676 if (err == -EPROBE_DEFER)
8677 goto err_gop_clk;
8678 priv->axi_clk = NULL;
8679 } else {
8680 err = clk_prepare_enable(priv->axi_clk);
8681 if (err < 0)
8682 goto err_gop_clk;
8683 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008684
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008685 /* Get system's tclk rate */
8686 priv->tclk = clk_get_rate(priv->pp_clk);
8687 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8688 &priv->tclk)) {
8689 dev_err(&pdev->dev, "missing clock-frequency value\n");
8690 return -EINVAL;
8691 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008692
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01008693 if (priv->hw_version == MVPP22) {
8694 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
8695 if (err)
8696 goto err_mg_clk;
8697 /* Sadly, the BM pools all share the same register to
8698 * store the high 32 bits of their address. So they
8699 * must all have the same high 32 bits, which forces
8700 * us to restrict coherent memory to DMA_BIT_MASK(32).
8701 */
8702 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8703 if (err)
8704 goto err_mg_clk;
8705 }
8706
Marcin Wojtas3f518502014-07-10 16:52:13 -03008707 /* Initialize network controller */
8708 err = mvpp2_init(pdev, priv);
8709 if (err < 0) {
8710 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008711 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008712 }
8713
Marcin Wojtasbf147152018-01-18 13:31:42 +01008714 /* Initialize ports */
Marcin Wojtas24812222018-01-18 13:31:43 +01008715 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8716 err = mvpp2_port_probe(pdev, port_fwnode, priv);
Marcin Wojtasbf147152018-01-18 13:31:42 +01008717 if (err < 0)
8718 goto err_port_probe;
8719 }
8720
Miquel Raynal118d6292017-11-06 22:56:53 +01008721 if (priv->port_count == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008722 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08008723 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008724 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008725 }
8726
Miquel Raynal118d6292017-11-06 22:56:53 +01008727 /* Statistics must be gathered regularly because some of them (like
8728 * packets counters) are 32-bit registers and could overflow quite
8729 * quickly. For instance, a 10Gb link used at full bandwidth with the
8730 * smallest packets (64B) will overflow a 32-bit counter in less than
8731 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8732 */
Miquel Raynal118d6292017-11-06 22:56:53 +01008733 snprintf(priv->queue_name, sizeof(priv->queue_name),
8734 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8735 priv->port_count > 1 ? "+" : "");
8736 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8737 if (!priv->stats_queue) {
8738 err = -ENOMEM;
Antoine Tenart26146b02017-11-28 14:19:49 +01008739 goto err_port_probe;
Miquel Raynal118d6292017-11-06 22:56:53 +01008740 }
8741
Marcin Wojtas3f518502014-07-10 16:52:13 -03008742 platform_set_drvdata(pdev, priv);
8743 return 0;
8744
Antoine Tenart26146b02017-11-28 14:19:49 +01008745err_port_probe:
8746 i = 0;
Marcin Wojtas24812222018-01-18 13:31:43 +01008747 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Antoine Tenart26146b02017-11-28 14:19:49 +01008748 if (priv->port_list[i])
8749 mvpp2_port_remove(priv->port_list[i]);
8750 i++;
8751 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008752err_mg_clk:
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008753 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008754 if (priv->hw_version == MVPP22)
8755 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008756err_gop_clk:
8757 clk_disable_unprepare(priv->gop_clk);
8758err_pp_clk:
8759 clk_disable_unprepare(priv->pp_clk);
8760 return err;
8761}
8762
8763static int mvpp2_remove(struct platform_device *pdev)
8764{
8765 struct mvpp2 *priv = platform_get_drvdata(pdev);
Marcin Wojtas24812222018-01-18 13:31:43 +01008766 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8767 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008768 int i = 0;
8769
Miquel Raynale5c500e2017-11-08 08:59:40 +01008770 flush_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008771 destroy_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008772
Marcin Wojtas24812222018-01-18 13:31:43 +01008773 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Miquel Raynale5c500e2017-11-08 08:59:40 +01008774 if (priv->port_list[i]) {
8775 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008776 mvpp2_port_remove(priv->port_list[i]);
Miquel Raynale5c500e2017-11-08 08:59:40 +01008777 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008778 i++;
8779 }
8780
8781 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8782 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8783
8784 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8785 }
8786
8787 for_each_present_cpu(i) {
8788 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8789
8790 dma_free_coherent(&pdev->dev,
8791 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8792 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01008793 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008794 }
8795
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008796 if (is_acpi_node(port_fwnode))
8797 return 0;
8798
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008799 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008800 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008801 clk_disable_unprepare(priv->pp_clk);
8802 clk_disable_unprepare(priv->gop_clk);
8803
8804 return 0;
8805}
8806
8807static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008808 {
8809 .compatible = "marvell,armada-375-pp2",
8810 .data = (void *)MVPP21,
8811 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01008812 {
8813 .compatible = "marvell,armada-7k-pp22",
8814 .data = (void *)MVPP22,
8815 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03008816 { }
8817};
8818MODULE_DEVICE_TABLE(of, mvpp2_match);
8819
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008820static const struct acpi_device_id mvpp2_acpi_match[] = {
8821 { "MRVL0110", MVPP22 },
8822 { },
8823};
8824MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8825
Marcin Wojtas3f518502014-07-10 16:52:13 -03008826static struct platform_driver mvpp2_driver = {
8827 .probe = mvpp2_probe,
8828 .remove = mvpp2_remove,
8829 .driver = {
8830 .name = MVPP2_DRIVER_NAME,
8831 .of_match_table = mvpp2_match,
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008832 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008833 },
8834};
8835
8836module_platform_driver(mvpp2_driver);
8837
8838MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8839MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03008840MODULE_LICENSE("GPL v2");