blob: 9418f6eed086471459d7821607fd5cf724c5a991 [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
Marcin Wojtasa75edc72018-01-18 13:31:44 +010013#include <linux/acpi.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030014#include <linux/kernel.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/platform_device.h>
18#include <linux/skbuff.h>
19#include <linux/inetdevice.h>
20#include <linux/mbus.h>
21#include <linux/module.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020022#include <linux/mfd/syscon.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030023#include <linux/interrupt.h>
24#include <linux/cpumask.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27#include <linux/of_mdio.h>
28#include <linux/of_net.h>
29#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010030#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030031#include <linux/phy.h>
Antoine Tenart542897d2017-08-30 10:29:15 +020032#include <linux/phy/phy.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020034#include <linux/hrtimer.h>
35#include <linux/ktime.h>
Antoine Ténartf84bf382017-08-22 19:08:27 +020036#include <linux/regmap.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030037#include <uapi/linux/ppp_defs.h>
38#include <net/ip.h>
39#include <net/ipv6.h>
Antoine Ténart186cd4d2017-08-23 09:46:56 +020040#include <net/tso.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030041
Antoine Tenart7c10f972017-10-30 11:23:29 +010042/* Fifo Registers */
Marcin Wojtas3f518502014-07-10 16:52:13 -030043#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
44#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
45#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
46#define MVPP2_RX_FIFO_INIT_REG 0x64
Antoine Tenart7c10f972017-10-30 11:23:29 +010047#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
Marcin Wojtas3f518502014-07-10 16:52:13 -030048
49/* RX DMA Top Registers */
50#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
51#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
52#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
53#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
54#define MVPP2_POOL_BUF_SIZE_OFFSET 5
55#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
56#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
57#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
58#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010059#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
60#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030061#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010062#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
63#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030064#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
65#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
66#define MVPP2_RXQ_DISABLE_MASK BIT(31)
67
Maxime Chevallier56beda32018-02-28 10:14:13 +010068/* Top Registers */
69#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
70#define MVPP2_DSA_EXTENDED BIT(5)
71
Marcin Wojtas3f518502014-07-10 16:52:13 -030072/* Parser Registers */
73#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
74#define MVPP2_PRS_PORT_LU_MAX 0xf
75#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
76#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
77#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
78#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
79#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
80#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
81#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
82#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
83#define MVPP2_PRS_TCAM_IDX_REG 0x1100
84#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
85#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
86#define MVPP2_PRS_SRAM_IDX_REG 0x1200
87#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
88#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
89#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
90
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010091/* RSS Registers */
92#define MVPP22_RSS_INDEX 0x1500
Antoine Tenart8a7b7412017-12-08 10:24:20 +010093#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) (idx)
Antoine Tenart1d7d15d2017-10-30 11:23:30 +010094#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8)
95#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16)
96#define MVPP22_RSS_TABLE_ENTRY 0x1508
97#define MVPP22_RSS_TABLE 0x1510
98#define MVPP22_RSS_TABLE_POINTER(p) (p)
99#define MVPP22_RSS_WIDTH 0x150c
100
Marcin Wojtas3f518502014-07-10 16:52:13 -0300101/* Classifier Registers */
102#define MVPP2_CLS_MODE_REG 0x1800
103#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
104#define MVPP2_CLS_PORT_WAY_REG 0x1810
105#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
106#define MVPP2_CLS_LKP_INDEX_REG 0x1814
107#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
108#define MVPP2_CLS_LKP_TBL_REG 0x1818
109#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
110#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
111#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
112#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
113#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
114#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
115#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
116#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
117#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
118#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
119#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
120#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
121
122/* Descriptor Manager Top Registers */
123#define MVPP2_RXQ_NUM_REG 0x2040
124#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100125#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300126#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
127#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
128#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
129#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
130#define MVPP2_RXQ_NUM_NEW_OFFSET 16
131#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
132#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
133#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
134#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
135#define MVPP2_RXQ_THRESH_REG 0x204c
136#define MVPP2_OCCUPIED_THRESH_OFFSET 0
137#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
138#define MVPP2_RXQ_INDEX_REG 0x2050
139#define MVPP2_TXQ_NUM_REG 0x2080
140#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
141#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
142#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200143#define MVPP2_TXQ_THRESH_REG 0x2094
144#define MVPP2_TXQ_THRESH_OFFSET 16
145#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300146#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300147#define MVPP2_TXQ_INDEX_REG 0x2098
148#define MVPP2_TXQ_PREF_BUF_REG 0x209c
149#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
150#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
151#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
152#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
153#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
154#define MVPP2_TXQ_PENDING_REG 0x20a0
155#define MVPP2_TXQ_PENDING_MASK 0x3fff
156#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
157#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
158#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
159#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
160#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
161#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
162#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
163#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
164#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
165#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
166#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100167#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300168#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
169#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
170#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
171#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
172#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
173
174/* MBUS bridge registers */
175#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
176#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
177#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
178#define MVPP2_BASE_ADDR_ENABLE 0x4060
179
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100180/* AXI Bridge Registers */
181#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
182#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
183#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
184#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
185#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
186#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
187#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
188#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
189#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
190#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
191#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
192#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
193
194/* Values for AXI Bridge registers */
195#define MVPP22_AXI_ATTR_CACHE_OFFS 0
196#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
197
198#define MVPP22_AXI_CODE_CACHE_OFFS 0
199#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
200
201#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
202#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
203#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
204
205#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
206#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
207
Marcin Wojtas3f518502014-07-10 16:52:13 -0300208/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200209#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
210#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
211
Marcin Wojtas3f518502014-07-10 16:52:13 -0300212#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100213#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200214#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100215
Antoine Ténart81b66302017-08-22 19:08:21 +0200216#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100217#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200218#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
219#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100220
221#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200222#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100223
Antoine Ténart81b66302017-08-22 19:08:21 +0200224#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
225#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
226#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
227#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100228
Marcin Wojtas3f518502014-07-10 16:52:13 -0300229#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
230#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
231#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
232#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
233#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
234#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200235#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300236#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
237#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
238#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
239#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
240#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
241#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
242#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
243#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
244#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
245#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
246#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
247#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
248
249/* Buffer Manager registers */
250#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
251#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
252#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
253#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
254#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
255#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
256#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
257#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
258#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
259#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
260#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
261#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
262#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
263#define MVPP2_BM_START_MASK BIT(0)
264#define MVPP2_BM_STOP_MASK BIT(1)
265#define MVPP2_BM_STATE_MASK BIT(4)
266#define MVPP2_BM_LOW_THRESH_OFFS 8
267#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
268#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
269 MVPP2_BM_LOW_THRESH_OFFS)
270#define MVPP2_BM_HIGH_THRESH_OFFS 16
271#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
272#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
273 MVPP2_BM_HIGH_THRESH_OFFS)
274#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
275#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
276#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
277#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
278#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
279#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
280#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
281#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
282#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
283#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100284#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
285#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
286#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
287#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300288#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
289#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
290#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
291#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
292#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100293#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
294#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
Antoine Ténart81b66302017-08-22 19:08:21 +0200295#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100296#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300297
298/* TX Scheduler registers */
299#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
300#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
301#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
302#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
303#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
304#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
305#define MVPP2_TXP_SCHED_MTU_REG 0x801c
306#define MVPP2_TXP_MTU_MAX 0x7FFFF
307#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
308#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
309#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
310#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
311#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
312#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
313#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
314#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
315#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
316#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
317#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
318#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
319#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
320#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
321
322/* TX general registers */
323#define MVPP2_TX_SNOOP_REG 0x8800
324#define MVPP2_TX_PORT_FLUSH_REG 0x8810
325#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
326
327/* LMS registers */
328#define MVPP2_SRC_ADDR_MIDDLE 0x24
329#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300330#define MVPP2_PHY_AN_CFG0_REG 0x34
331#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300332#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100333#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300334
335/* Per-port registers */
336#define MVPP2_GMAC_CTRL_0_REG 0x0
Antoine Ténart81b66302017-08-22 19:08:21 +0200337#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200338#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200339#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
340#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
341#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300342#define MVPP2_GMAC_CTRL_1_REG 0x4
Antoine Ténart81b66302017-08-22 19:08:21 +0200343#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
344#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
345#define MVPP2_GMAC_PCS_LB_EN_BIT 6
346#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
347#define MVPP2_GMAC_SA_LOW_OFFS 7
Marcin Wojtas3f518502014-07-10 16:52:13 -0300348#define MVPP2_GMAC_CTRL_2_REG 0x8
Antoine Ténart81b66302017-08-22 19:08:21 +0200349#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200350#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200351#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
Antoine Tenartc7dfc8c2017-09-25 14:59:48 +0200352#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
Antoine Ténart39193572017-08-22 19:08:24 +0200353#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
Antoine Ténart81b66302017-08-22 19:08:21 +0200354#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300355#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
Antoine Ténart81b66302017-08-22 19:08:21 +0200356#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
357#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Antoine Ténart39193572017-08-22 19:08:24 +0200358#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
359#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
Antoine Ténart81b66302017-08-22 19:08:21 +0200360#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
361#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
362#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
363#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Antoine Ténart39193572017-08-22 19:08:24 +0200364#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
Antoine Ténart81b66302017-08-22 19:08:21 +0200365#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
366#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200367#define MVPP2_GMAC_STATUS0 0x10
368#define MVPP2_GMAC_STATUS0_LINK_UP BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300369#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
Antoine Ténart81b66302017-08-22 19:08:21 +0200370#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
371#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
372#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
Marcin Wojtas3f518502014-07-10 16:52:13 -0300373 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200374#define MVPP22_GMAC_INT_STAT 0x20
375#define MVPP22_GMAC_INT_STAT_LINK BIT(1)
376#define MVPP22_GMAC_INT_MASK 0x24
377#define MVPP22_GMAC_INT_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100378#define MVPP22_GMAC_CTRL_4_REG 0x90
Antoine Ténart81b66302017-08-22 19:08:21 +0200379#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
380#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
Antoine Ténart1068ec72017-08-22 19:08:22 +0200381#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
Antoine Ténart81b66302017-08-22 19:08:21 +0200382#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200383#define MVPP22_GMAC_INT_SUM_MASK 0xa4
384#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100385
386/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
387 * relative to port->base.
388 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200389#define MVPP22_XLG_CTRL0_REG 0x100
Antoine Ténart81b66302017-08-22 19:08:21 +0200390#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
391#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
Antoine Ténart77321952017-08-22 19:08:25 +0200392#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7)
Antoine Ténart81b66302017-08-22 19:08:21 +0200393#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200394#define MVPP22_XLG_CTRL1_REG 0x104
Antoine Ténartec15ecd2017-08-25 15:24:46 +0200395#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS 0
Stefan Chulski76eb1b12017-08-22 19:08:26 +0200396#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200397#define MVPP22_XLG_STATUS 0x10c
398#define MVPP22_XLG_STATUS_LINK_UP BIT(0)
399#define MVPP22_XLG_INT_STAT 0x114
400#define MVPP22_XLG_INT_STAT_LINK BIT(1)
401#define MVPP22_XLG_INT_MASK 0x118
402#define MVPP22_XLG_INT_MASK_LINK BIT(1)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100403#define MVPP22_XLG_CTRL3_REG 0x11c
Antoine Ténart81b66302017-08-22 19:08:21 +0200404#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
405#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
406#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200407#define MVPP22_XLG_EXT_INT_MASK 0x15c
408#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
409#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
Antoine Ténart77321952017-08-22 19:08:25 +0200410#define MVPP22_XLG_CTRL4_REG 0x184
411#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
412#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
413#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
414
Thomas Petazzoni26975822017-03-07 16:53:14 +0100415/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
416#define MVPP22_SMI_MISC_CFG_REG 0x1204
Antoine Ténart81b66302017-08-22 19:08:21 +0200417#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300418
Thomas Petazzonia7868412017-03-07 16:53:13 +0100419#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
420
Marcin Wojtas3f518502014-07-10 16:52:13 -0300421#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
422
423/* Descriptor ring Macros */
424#define MVPP2_QUEUE_NEXT_DESC(q, index) \
425 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
426
Antoine Ténartf84bf382017-08-22 19:08:27 +0200427/* XPCS registers. PPv2.2 only */
428#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
429#define MVPP22_MPCS_CTRL 0x14
430#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
431#define MVPP22_MPCS_CLK_RESET 0x14c
432#define MAC_CLK_RESET_SD_TX BIT(0)
433#define MAC_CLK_RESET_SD_RX BIT(1)
434#define MAC_CLK_RESET_MAC BIT(2)
435#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
436#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
437
438/* XPCS registers. PPv2.2 only */
439#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
440#define MVPP22_XPCS_CFG0 0x0
441#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
442#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
443
444/* System controller registers. Accessed through a regmap. */
445#define GENCONF_SOFT_RESET1 0x1108
446#define GENCONF_SOFT_RESET1_GOP BIT(6)
447#define GENCONF_PORT_CTRL0 0x1110
448#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1)
449#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29)
450#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31)
451#define GENCONF_PORT_CTRL1 0x1114
452#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
453#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
454#define GENCONF_CTRL0 0x1120
455#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
456#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
457#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
458
Marcin Wojtas3f518502014-07-10 16:52:13 -0300459/* Various constants */
460
461/* Coalescing */
Antoine Tenart86162282017-12-11 09:13:29 +0100462#define MVPP2_TXDONE_COAL_PKTS_THRESH 64
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200463#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200464#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300465#define MVPP2_RX_COAL_PKTS 32
Antoine Tenart86162282017-12-11 09:13:29 +0100466#define MVPP2_RX_COAL_USEC 64
Marcin Wojtas3f518502014-07-10 16:52:13 -0300467
468/* The two bytes Marvell header. Either contains a special value used
469 * by Marvell switches when a specific hardware mode is enabled (not
470 * supported by this driver) or is filled automatically by zeroes on
471 * the RX side. Those two bytes being at the front of the Ethernet
472 * header, they allow to have the IP header aligned on a 4 bytes
473 * boundary automatically: the hardware skips those two bytes on its
474 * own.
475 */
476#define MVPP2_MH_SIZE 2
477#define MVPP2_ETH_TYPE_LEN 2
478#define MVPP2_PPPOE_HDR_SIZE 8
479#define MVPP2_VLAN_TAG_LEN 4
Maxime Chevallier56beda32018-02-28 10:14:13 +0100480#define MVPP2_VLAN_TAG_EDSA_LEN 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300481
482/* Lbtd 802.3 type */
483#define MVPP2_IP_LBDT_TYPE 0xfffa
484
Marcin Wojtas3f518502014-07-10 16:52:13 -0300485#define MVPP2_TX_CSUM_MAX_SIZE 9800
486
487/* Timeout constants */
488#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
489#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
490
491#define MVPP2_TX_MTU_MAX 0x7ffff
492
493/* Maximum number of T-CONTs of PON port */
494#define MVPP2_MAX_TCONT 16
495
496/* Maximum number of supported ports */
497#define MVPP2_MAX_PORTS 4
498
499/* Maximum number of TXQs used by single port */
500#define MVPP2_MAX_TXQ 8
501
Antoine Tenart1d17db02017-10-30 11:23:31 +0100502/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO
503 * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data),
504 * multiply this value by two to count the maximum number of skb descs needed.
505 */
506#define MVPP2_MAX_TSO_SEGS 300
507#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
508
Marcin Wojtas3f518502014-07-10 16:52:13 -0300509/* Dfault number of RXQs in use */
510#define MVPP2_DEFAULT_RXQ 4
511
Marcin Wojtas3f518502014-07-10 16:52:13 -0300512/* Max number of Rx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100513#define MVPP2_MAX_RXD_MAX 1024
514#define MVPP2_MAX_RXD_DFLT 128
Marcin Wojtas3f518502014-07-10 16:52:13 -0300515
516/* Max number of Tx descriptors */
Yan Markman7cf87e42017-12-11 09:13:26 +0100517#define MVPP2_MAX_TXD_MAX 2048
518#define MVPP2_MAX_TXD_DFLT 1024
Marcin Wojtas3f518502014-07-10 16:52:13 -0300519
520/* Amount of Tx descriptors that can be reserved at once by CPU */
521#define MVPP2_CPU_DESC_CHUNK 64
522
523/* Max number of Tx descriptors in each aggregated queue */
524#define MVPP2_AGGR_TXQ_SIZE 256
525
526/* Descriptor aligned size */
527#define MVPP2_DESC_ALIGNED_SIZE 32
528
529/* Descriptor alignment mask */
530#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
531
532/* RX FIFO constants */
Antoine Tenart2d1d7df2017-10-30 11:23:28 +0100533#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000
534#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000
535#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000
536#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200
537#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80
538#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40
539#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
Marcin Wojtas3f518502014-07-10 16:52:13 -0300540
Antoine Tenart7c10f972017-10-30 11:23:29 +0100541/* TX FIFO constants */
542#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
543#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
544
Marcin Wojtas3f518502014-07-10 16:52:13 -0300545/* RX buffer constants */
546#define MVPP2_SKB_SHINFO_SIZE \
547 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
548
549#define MVPP2_RX_PKT_SIZE(mtu) \
550 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800551 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300552
553#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
554#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
555#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
556 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
557
558#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
559
560/* IPv6 max L3 address size */
561#define MVPP2_MAX_L3_ADDR_SIZE 16
562
563/* Port flags */
564#define MVPP2_F_LOOPBACK BIT(0)
565
566/* Marvell tag types */
567enum mvpp2_tag_type {
568 MVPP2_TAG_TYPE_NONE = 0,
569 MVPP2_TAG_TYPE_MH = 1,
570 MVPP2_TAG_TYPE_DSA = 2,
571 MVPP2_TAG_TYPE_EDSA = 3,
572 MVPP2_TAG_TYPE_VLAN = 4,
573 MVPP2_TAG_TYPE_LAST = 5
574};
575
576/* Parser constants */
577#define MVPP2_PRS_TCAM_SRAM_SIZE 256
578#define MVPP2_PRS_TCAM_WORDS 6
579#define MVPP2_PRS_SRAM_WORDS 4
580#define MVPP2_PRS_FLOW_ID_SIZE 64
581#define MVPP2_PRS_FLOW_ID_MASK 0x3f
582#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
583#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
584#define MVPP2_PRS_IPV4_HEAD 0x40
585#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
586#define MVPP2_PRS_IPV4_MC 0xe0
587#define MVPP2_PRS_IPV4_MC_MASK 0xf0
588#define MVPP2_PRS_IPV4_BC_MASK 0xff
589#define MVPP2_PRS_IPV4_IHL 0x5
590#define MVPP2_PRS_IPV4_IHL_MASK 0xf
591#define MVPP2_PRS_IPV6_MC 0xff
592#define MVPP2_PRS_IPV6_MC_MASK 0xff
593#define MVPP2_PRS_IPV6_HOP_MASK 0xff
594#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
595#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
596#define MVPP2_PRS_DBL_VLANS_MAX 100
597
598/* Tcam structure:
599 * - lookup ID - 4 bits
600 * - port ID - 1 byte
601 * - additional information - 1 byte
602 * - header data - 8 bytes
603 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
604 */
605#define MVPP2_PRS_AI_BITS 8
606#define MVPP2_PRS_PORT_MASK 0xff
607#define MVPP2_PRS_LU_MASK 0xf
608#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
609 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
610#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
611 (((offs) * 2) - ((offs) % 2) + 2)
612#define MVPP2_PRS_TCAM_AI_BYTE 16
613#define MVPP2_PRS_TCAM_PORT_BYTE 17
614#define MVPP2_PRS_TCAM_LU_BYTE 20
615#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
616#define MVPP2_PRS_TCAM_INV_WORD 5
Maxime Chevallier56beda32018-02-28 10:14:13 +0100617
618#define MVPP2_PRS_VID_TCAM_BYTE 2
619
620/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
621 * 10 VLAN ID filter entries per port
622 * 1 default VLAN filter entry per port
623 * It is assumed that there are 3 ports for filter, not including loopback port
624 */
625#define MVPP2_PRS_VLAN_FILT_MAX 11
626#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
627
628#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
629#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
630
Marcin Wojtas3f518502014-07-10 16:52:13 -0300631/* Tcam entries ID */
632#define MVPP2_PE_DROP_ALL 0
633#define MVPP2_PE_FIRST_FREE_TID 1
Maxime Chevallier56beda32018-02-28 10:14:13 +0100634
635/* VLAN filtering range */
636#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
637#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
638 MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
639#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300640#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
641#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
642#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
643#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
644#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100645#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
646#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
647#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
648#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
649#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
650#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
651#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
652#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
653#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
654#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
655#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
656#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
657#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
658#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
659#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
660#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300661#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
662#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
663#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
664#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
665#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
666
Maxime Chevallier56beda32018-02-28 10:14:13 +0100667#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
668 ((port) * MVPP2_PRS_VLAN_FILT_MAX))
669#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
670 + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
671/* Index of default vid filter for given port */
672#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
673 + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
674
Marcin Wojtas3f518502014-07-10 16:52:13 -0300675/* Sram structure
676 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
677 */
678#define MVPP2_PRS_SRAM_RI_OFFS 0
679#define MVPP2_PRS_SRAM_RI_WORD 0
680#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
681#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
682#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
683#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
684#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
685#define MVPP2_PRS_SRAM_UDF_OFFS 73
686#define MVPP2_PRS_SRAM_UDF_BITS 8
687#define MVPP2_PRS_SRAM_UDF_MASK 0xff
688#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
689#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
690#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
691#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
692#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
693#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
694#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
695#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
696#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
697#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
698#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
699#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
700#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
701#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
702#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
703#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
704#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
705#define MVPP2_PRS_SRAM_AI_OFFS 90
706#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
707#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
708#define MVPP2_PRS_SRAM_AI_MASK 0xff
709#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
710#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
711#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
712#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
713
714/* Sram result info bits assignment */
715#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
716#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100717#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
718#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300719#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
720#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
721#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
722#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
723#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100724#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
725#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300726#define MVPP2_PRS_RI_L2_MCAST BIT(9)
727#define MVPP2_PRS_RI_L2_BCAST BIT(10)
728#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100729#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
730#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300731#define MVPP2_PRS_RI_L3_IP4 BIT(12)
732#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
733#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
734#define MVPP2_PRS_RI_L3_IP6 BIT(14)
735#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
736#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100737#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
738#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300739#define MVPP2_PRS_RI_L3_MCAST BIT(15)
740#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
741#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
Stefan Chulskiaff3da32017-09-25 14:59:46 +0200742#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300743#define MVPP2_PRS_RI_UDF3_MASK 0x300000
744#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
745#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
746#define MVPP2_PRS_RI_L4_TCP BIT(22)
747#define MVPP2_PRS_RI_L4_UDP BIT(23)
748#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
749#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
750#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
751#define MVPP2_PRS_RI_DROP_MASK 0x80000000
752
753/* Sram additional info bits assignment */
754#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
755#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
756#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
757#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
758#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
759#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
760#define MVPP2_PRS_SINGLE_VLAN_AI 0
761#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
Maxime Chevallier56beda32018-02-28 10:14:13 +0100762#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300763
764/* DSA/EDSA type */
765#define MVPP2_PRS_TAGGED true
766#define MVPP2_PRS_UNTAGGED false
767#define MVPP2_PRS_EDSA true
768#define MVPP2_PRS_DSA false
769
770/* MAC entries, shadow udf */
771enum mvpp2_prs_udf {
772 MVPP2_PRS_UDF_MAC_DEF,
773 MVPP2_PRS_UDF_MAC_RANGE,
774 MVPP2_PRS_UDF_L2_DEF,
775 MVPP2_PRS_UDF_L2_DEF_COPY,
776 MVPP2_PRS_UDF_L2_USER,
777};
778
779/* Lookup ID */
780enum mvpp2_prs_lookup {
781 MVPP2_PRS_LU_MH,
782 MVPP2_PRS_LU_MAC,
783 MVPP2_PRS_LU_DSA,
784 MVPP2_PRS_LU_VLAN,
Maxime Chevallier56beda32018-02-28 10:14:13 +0100785 MVPP2_PRS_LU_VID,
Marcin Wojtas3f518502014-07-10 16:52:13 -0300786 MVPP2_PRS_LU_L2,
787 MVPP2_PRS_LU_PPPOE,
788 MVPP2_PRS_LU_IP4,
789 MVPP2_PRS_LU_IP6,
790 MVPP2_PRS_LU_FLOWS,
791 MVPP2_PRS_LU_LAST,
792};
793
794/* L3 cast enum */
795enum mvpp2_prs_l3_cast {
796 MVPP2_PRS_L3_UNI_CAST,
797 MVPP2_PRS_L3_MULTI_CAST,
798 MVPP2_PRS_L3_BROAD_CAST
799};
800
801/* Classifier constants */
802#define MVPP2_CLS_FLOWS_TBL_SIZE 512
803#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
804#define MVPP2_CLS_LKP_TBL_SIZE 64
Antoine Tenart1d7d15d2017-10-30 11:23:30 +0100805#define MVPP2_CLS_RX_QUEUES 256
806
807/* RSS constants */
808#define MVPP22_RSS_TABLE_ENTRIES 32
Marcin Wojtas3f518502014-07-10 16:52:13 -0300809
810/* BM constants */
811#define MVPP2_BM_POOLS_NUM 8
812#define MVPP2_BM_LONG_BUF_NUM 1024
813#define MVPP2_BM_SHORT_BUF_NUM 2048
814#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
815#define MVPP2_BM_POOL_PTR_ALIGN 128
816#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
817#define MVPP2_BM_SWF_SHORT_POOL 3
818
819/* BM cookie (32 bits) definition */
820#define MVPP2_BM_COOKIE_POOL_OFFS 8
821#define MVPP2_BM_COOKIE_CPU_OFFS 24
822
823/* BM short pool packet size
824 * These value assure that for SWF the total number
825 * of bytes allocated for each buffer will be 512
826 */
827#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
828
Thomas Petazzonia7868412017-03-07 16:53:13 +0100829#define MVPP21_ADDR_SPACE_SZ 0
830#define MVPP22_ADDR_SPACE_SZ SZ_64K
831
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200832#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200833#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100834
Marcin Wojtas3f518502014-07-10 16:52:13 -0300835enum mvpp2_bm_type {
836 MVPP2_BM_FREE,
837 MVPP2_BM_SWF_LONG,
838 MVPP2_BM_SWF_SHORT
839};
840
Miquel Raynal118d6292017-11-06 22:56:53 +0100841/* GMAC MIB Counters register definitions */
842#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
843#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
844#define MVPP22_MIB_COUNTERS_OFFSET 0x0
845#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100
846
847#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0
848#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8
849#define MVPP2_MIB_CRC_ERRORS_SENT 0xc
850#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10
851#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18
852#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c
853#define MVPP2_MIB_FRAMES_64_OCTETS 0x20
854#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24
855#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28
856#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c
857#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30
858#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34
859#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38
860#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40
861#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48
862#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c
863#define MVPP2_MIB_FC_SENT 0x54
864#define MVPP2_MIB_FC_RCVD 0x58
865#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c
866#define MVPP2_MIB_UNDERSIZE_RCVD 0x60
867#define MVPP2_MIB_FRAGMENTS_RCVD 0x64
868#define MVPP2_MIB_OVERSIZE_RCVD 0x68
869#define MVPP2_MIB_JABBER_RCVD 0x6c
870#define MVPP2_MIB_MAC_RCV_ERROR 0x70
871#define MVPP2_MIB_BAD_CRC_EVENT 0x74
872#define MVPP2_MIB_COLLISION 0x78
873#define MVPP2_MIB_LATE_COLLISION 0x7c
874
875#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ)
876
Marcin Wojtas3f518502014-07-10 16:52:13 -0300877/* Definitions */
878
879/* Shared Packet Processor resources */
880struct mvpp2 {
881 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300882 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100883 void __iomem *iface_base;
884
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200885 /* On PPv2.2, each "software thread" can access the base
886 * register through a separate address space, each 64 KB apart
887 * from each other. Typically, such address spaces will be
888 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100889 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200890 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300891
Antoine Ténartf84bf382017-08-22 19:08:27 +0200892 /* On PPv2.2, some port control registers are located into the system
893 * controller space. These registers are accessible through a regmap.
894 */
895 struct regmap *sysctrl_base;
896
Marcin Wojtas3f518502014-07-10 16:52:13 -0300897 /* Common clocks */
898 struct clk *pp_clk;
899 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100900 struct clk *mg_clk;
Gregory CLEMENT4792ea02017-09-29 14:27:39 +0200901 struct clk *axi_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300902
903 /* List of pointers to port structures */
Miquel Raynal118d6292017-11-06 22:56:53 +0100904 int port_count;
Marcin Wojtasbf147152018-01-18 13:31:42 +0100905 struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300906
907 /* Aggregated TXQs */
908 struct mvpp2_tx_queue *aggr_txqs;
909
910 /* BM pools */
911 struct mvpp2_bm_pool *bm_pools;
912
913 /* PRS shadow table */
914 struct mvpp2_prs_shadow *prs_shadow;
915 /* PRS auxiliary table for double vlan entries control */
916 bool *prs_double_vlans;
917
918 /* Tclk value */
919 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100920
921 /* HW version */
922 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100923
924 /* Maximum number of RXQs per port */
925 unsigned int max_port_rxqs;
Miquel Raynal118d6292017-11-06 22:56:53 +0100926
Miquel Raynale5c500e2017-11-08 08:59:40 +0100927 /* Workqueue to gather hardware statistics */
Miquel Raynal118d6292017-11-06 22:56:53 +0100928 char queue_name[30];
929 struct workqueue_struct *stats_queue;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300930};
931
932struct mvpp2_pcpu_stats {
933 struct u64_stats_sync syncp;
934 u64 rx_packets;
935 u64 rx_bytes;
936 u64 tx_packets;
937 u64 tx_bytes;
938};
939
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200940/* Per-CPU port control */
941struct mvpp2_port_pcpu {
942 struct hrtimer tx_done_timer;
943 bool timer_scheduled;
944 /* Tasklet for egress finalization */
945 struct tasklet_struct tx_done_tasklet;
946};
947
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200948struct mvpp2_queue_vector {
949 int irq;
950 struct napi_struct napi;
951 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
952 int sw_thread_id;
953 u16 sw_thread_mask;
954 int first_rxq;
955 int nrxqs;
956 u32 pending_cause_rx;
957 struct mvpp2_port *port;
958};
959
Marcin Wojtas3f518502014-07-10 16:52:13 -0300960struct mvpp2_port {
961 u8 id;
962
Thomas Petazzonia7868412017-03-07 16:53:13 +0100963 /* Index of the port from the "group of ports" complex point
964 * of view
965 */
966 int gop_id;
967
Antoine Tenartfd3651b2017-09-01 11:04:54 +0200968 int link_irq;
969
Marcin Wojtas3f518502014-07-10 16:52:13 -0300970 struct mvpp2 *priv;
971
Marcin Wojtas24812222018-01-18 13:31:43 +0100972 /* Firmware node associated to the port */
973 struct fwnode_handle *fwnode;
974
Marcin Wojtas3f518502014-07-10 16:52:13 -0300975 /* Per-port registers' base address */
976 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +0100977 void __iomem *stats_base;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300978
979 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200980 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300981 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200982 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300983 struct net_device *dev;
984
985 int pkt_size;
986
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200987 /* Per-CPU port control */
988 struct mvpp2_port_pcpu __percpu *pcpu;
989
Marcin Wojtas3f518502014-07-10 16:52:13 -0300990 /* Flags */
991 unsigned long flags;
992
993 u16 tx_ring_size;
994 u16 rx_ring_size;
995 struct mvpp2_pcpu_stats __percpu *stats;
Miquel Raynal118d6292017-11-06 22:56:53 +0100996 u64 *ethtool_stats;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300997
Miquel Raynale5c500e2017-11-08 08:59:40 +0100998 /* Per-port work and its lock to gather hardware statistics */
999 struct mutex gather_stats_lock;
1000 struct delayed_work stats_work;
1001
Marcin Wojtas3f518502014-07-10 16:52:13 -03001002 phy_interface_t phy_interface;
1003 struct device_node *phy_node;
Antoine Tenart542897d2017-08-30 10:29:15 +02001004 struct phy *comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001005 unsigned int link;
1006 unsigned int duplex;
1007 unsigned int speed;
1008
1009 struct mvpp2_bm_pool *pool_long;
1010 struct mvpp2_bm_pool *pool_short;
1011
1012 /* Index of first port's physical RXQ */
1013 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02001014
1015 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
1016 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001017 bool has_tx_irqs;
1018
1019 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001020};
1021
1022/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1023 * layout of the transmit and reception DMA descriptors, and their
1024 * layout is therefore defined by the hardware design
1025 */
1026
1027#define MVPP2_TXD_L3_OFF_SHIFT 0
1028#define MVPP2_TXD_IP_HLEN_SHIFT 8
1029#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
1030#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
1031#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
1032#define MVPP2_TXD_PADDING_DISABLE BIT(23)
1033#define MVPP2_TXD_L4_UDP BIT(24)
1034#define MVPP2_TXD_L3_IP6 BIT(26)
1035#define MVPP2_TXD_L_DESC BIT(28)
1036#define MVPP2_TXD_F_DESC BIT(29)
1037
1038#define MVPP2_RXD_ERR_SUMMARY BIT(15)
1039#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
1040#define MVPP2_RXD_ERR_CRC 0x0
1041#define MVPP2_RXD_ERR_OVERRUN BIT(13)
1042#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
1043#define MVPP2_RXD_BM_POOL_ID_OFFS 16
1044#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
1045#define MVPP2_RXD_HWF_SYNC BIT(21)
1046#define MVPP2_RXD_L4_CSUM_OK BIT(22)
1047#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
1048#define MVPP2_RXD_L4_TCP BIT(25)
1049#define MVPP2_RXD_L4_UDP BIT(26)
1050#define MVPP2_RXD_L3_IP4 BIT(28)
1051#define MVPP2_RXD_L3_IP6 BIT(30)
1052#define MVPP2_RXD_BUF_HDR BIT(31)
1053
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001054/* HW TX descriptor for PPv2.1 */
1055struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001056 u32 command; /* Options used by HW for packet transmitting.*/
1057 u8 packet_offset; /* the offset from the buffer beginning */
1058 u8 phys_txq; /* destination queue ID */
1059 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001060 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001061 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
1062 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
1063 u32 reserved2; /* reserved (for future use) */
1064};
1065
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001066/* HW RX descriptor for PPv2.1 */
1067struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -03001068 u32 status; /* info about received packet */
1069 u16 reserved1; /* parser_info (for future use, PnC) */
1070 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001071 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001072 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
1073 u16 reserved2; /* gem_port_id (for future use, PON) */
1074 u16 reserved3; /* csum_l4 (for future use, PnC) */
1075 u8 reserved4; /* bm_qset (for future use, BM) */
1076 u8 reserved5;
1077 u16 reserved6; /* classify_info (for future use, PnC) */
1078 u32 reserved7; /* flow_id (for future use, PnC) */
1079 u32 reserved8;
1080};
1081
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001082/* HW TX descriptor for PPv2.2 */
1083struct mvpp22_tx_desc {
1084 u32 command;
1085 u8 packet_offset;
1086 u8 phys_txq;
1087 u16 data_size;
1088 u64 reserved1;
1089 u64 buf_dma_addr_ptp;
1090 u64 buf_cookie_misc;
1091};
1092
1093/* HW RX descriptor for PPv2.2 */
1094struct mvpp22_rx_desc {
1095 u32 status;
1096 u16 reserved1;
1097 u16 data_size;
1098 u32 reserved2;
1099 u32 reserved3;
1100 u64 buf_dma_addr_key_hash;
1101 u64 buf_cookie_misc;
1102};
1103
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001104/* Opaque type used by the driver to manipulate the HW TX and RX
1105 * descriptors
1106 */
1107struct mvpp2_tx_desc {
1108 union {
1109 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001110 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001111 };
1112};
1113
1114struct mvpp2_rx_desc {
1115 union {
1116 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001117 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +01001118 };
1119};
1120
Thomas Petazzoni83544912016-12-21 11:28:49 +01001121struct mvpp2_txq_pcpu_buf {
1122 /* Transmitted SKB */
1123 struct sk_buff *skb;
1124
1125 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001126 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +01001127
1128 /* Size transmitted */
1129 size_t size;
1130};
1131
Marcin Wojtas3f518502014-07-10 16:52:13 -03001132/* Per-CPU Tx queue control */
1133struct mvpp2_txq_pcpu {
1134 int cpu;
1135
1136 /* Number of Tx DMA descriptors in the descriptor ring */
1137 int size;
1138
1139 /* Number of currently used Tx DMA descriptor in the
1140 * descriptor ring
1141 */
1142 int count;
1143
Antoine Tenart1d17db02017-10-30 11:23:31 +01001144 int wake_threshold;
1145 int stop_threshold;
1146
Marcin Wojtas3f518502014-07-10 16:52:13 -03001147 /* Number of Tx DMA descriptors reserved for each CPU */
1148 int reserved_num;
1149
Thomas Petazzoni83544912016-12-21 11:28:49 +01001150 /* Infos about transmitted buffers */
1151 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001152
Marcin Wojtas3f518502014-07-10 16:52:13 -03001153 /* Index of last TX DMA descriptor that was inserted */
1154 int txq_put_index;
1155
1156 /* Index of the TX DMA descriptor to be cleaned up */
1157 int txq_get_index;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02001158
1159 /* DMA buffer for TSO headers */
1160 char *tso_headers;
1161 dma_addr_t tso_headers_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001162};
1163
1164struct mvpp2_tx_queue {
1165 /* Physical number of this Tx queue */
1166 u8 id;
1167
1168 /* Logical number of this Tx queue */
1169 u8 log_id;
1170
1171 /* Number of Tx DMA descriptors in the descriptor ring */
1172 int size;
1173
1174 /* Number of currently used Tx DMA descriptor in the descriptor ring */
1175 int count;
1176
1177 /* Per-CPU control of physical Tx queues */
1178 struct mvpp2_txq_pcpu __percpu *pcpu;
1179
Marcin Wojtas3f518502014-07-10 16:52:13 -03001180 u32 done_pkts_coal;
1181
1182 /* Virtual address of thex Tx DMA descriptors array */
1183 struct mvpp2_tx_desc *descs;
1184
1185 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001186 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001187
1188 /* Index of the last Tx DMA descriptor */
1189 int last_desc;
1190
1191 /* Index of the next Tx DMA descriptor to process */
1192 int next_desc_to_proc;
1193};
1194
1195struct mvpp2_rx_queue {
1196 /* RX queue number, in the range 0-31 for physical RXQs */
1197 u8 id;
1198
1199 /* Num of rx descriptors in the rx descriptor ring */
1200 int size;
1201
1202 u32 pkts_coal;
1203 u32 time_coal;
1204
1205 /* Virtual address of the RX DMA descriptors array */
1206 struct mvpp2_rx_desc *descs;
1207
1208 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001209 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001210
1211 /* Index of the last RX DMA descriptor */
1212 int last_desc;
1213
1214 /* Index of the next RX DMA descriptor to process */
1215 int next_desc_to_proc;
1216
1217 /* ID of port to which physical RXQ is mapped */
1218 int port;
1219
1220 /* Port's logic RXQ number to which physical RXQ is mapped */
1221 int logic_rxq;
1222};
1223
1224union mvpp2_prs_tcam_entry {
1225 u32 word[MVPP2_PRS_TCAM_WORDS];
1226 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1227};
1228
1229union mvpp2_prs_sram_entry {
1230 u32 word[MVPP2_PRS_SRAM_WORDS];
1231 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1232};
1233
1234struct mvpp2_prs_entry {
1235 u32 index;
1236 union mvpp2_prs_tcam_entry tcam;
1237 union mvpp2_prs_sram_entry sram;
1238};
1239
1240struct mvpp2_prs_shadow {
1241 bool valid;
1242 bool finish;
1243
1244 /* Lookup ID */
1245 int lu;
1246
1247 /* User defined offset */
1248 int udf;
1249
1250 /* Result info */
1251 u32 ri;
1252 u32 ri_mask;
1253};
1254
1255struct mvpp2_cls_flow_entry {
1256 u32 index;
1257 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1258};
1259
1260struct mvpp2_cls_lookup_entry {
1261 u32 lkpid;
1262 u32 way;
1263 u32 data;
1264};
1265
1266struct mvpp2_bm_pool {
1267 /* Pool number in the range 0-7 */
1268 int id;
1269 enum mvpp2_bm_type type;
1270
1271 /* Buffer Pointers Pool External (BPPE) size */
1272 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001273 /* BPPE size in bytes */
1274 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001275 /* Number of buffers for this pool */
1276 int buf_num;
1277 /* Pool buffer size */
1278 int buf_size;
1279 /* Packet size */
1280 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001281 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001282
1283 /* BPPE virtual base address */
1284 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001285 /* BPPE DMA base address */
1286 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001287
1288 /* Ports using BM pool */
1289 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001290};
1291
Antoine Tenart20920262017-10-23 15:24:30 +02001292#define IS_TSO_HEADER(txq_pcpu, addr) \
1293 ((addr) >= (txq_pcpu)->tso_headers_dma && \
1294 (addr) < (txq_pcpu)->tso_headers_dma + \
1295 (txq_pcpu)->size * TSO_HEADER_SIZE)
1296
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001297/* Queue modes */
1298#define MVPP2_QDIST_SINGLE_MODE 0
1299#define MVPP2_QDIST_MULTI_MODE 1
1300
1301static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1302
1303module_param(queue_mode, int, 0444);
1304MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1305
Marcin Wojtas3f518502014-07-10 16:52:13 -03001306#define MVPP2_DRIVER_NAME "mvpp2"
1307#define MVPP2_DRIVER_VERSION "1.0"
1308
1309/* Utility/helper methods */
1310
1311static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1312{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001313 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001314}
1315
1316static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1317{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001318 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001319}
1320
1321/* These accessors should be used to access:
1322 *
1323 * - per-CPU registers, where each CPU has its own copy of the
1324 * register.
1325 *
1326 * MVPP2_BM_VIRT_ALLOC_REG
1327 * MVPP2_BM_ADDR_HIGH_ALLOC
1328 * MVPP22_BM_ADDR_HIGH_RLS_REG
1329 * MVPP2_BM_VIRT_RLS_REG
1330 * MVPP2_ISR_RX_TX_CAUSE_REG
1331 * MVPP2_ISR_RX_TX_MASK_REG
1332 * MVPP2_TXQ_NUM_REG
1333 * MVPP2_AGGR_TXQ_UPDATE_REG
1334 * MVPP2_TXQ_RSVD_REQ_REG
1335 * MVPP2_TXQ_RSVD_RSLT_REG
1336 * MVPP2_TXQ_SENT_REG
1337 * MVPP2_RXQ_NUM_REG
1338 *
1339 * - global registers that must be accessed through a specific CPU
1340 * window, because they are related to an access to a per-CPU
1341 * register
1342 *
1343 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1344 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1345 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1346 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1347 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1348 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1349 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1350 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1351 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1352 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1353 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1354 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1355 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1356 */
1357static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1358 u32 offset, u32 data)
1359{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001360 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001361}
1362
1363static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1364 u32 offset)
1365{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001366 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001367}
1368
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001369static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1370 struct mvpp2_tx_desc *tx_desc)
1371{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001372 if (port->priv->hw_version == MVPP21)
1373 return tx_desc->pp21.buf_dma_addr;
1374 else
1375 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001376}
1377
1378static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1379 struct mvpp2_tx_desc *tx_desc,
1380 dma_addr_t dma_addr)
1381{
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001382 dma_addr_t addr, offset;
1383
1384 addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
1385 offset = dma_addr & MVPP2_TX_DESC_ALIGN;
1386
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001387 if (port->priv->hw_version == MVPP21) {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001388 tx_desc->pp21.buf_dma_addr = addr;
1389 tx_desc->pp21.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001390 } else {
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001391 u64 val = (u64)addr;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001392
1393 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1394 tx_desc->pp22.buf_dma_addr_ptp |= val;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01001395 tx_desc->pp22.packet_offset = offset;
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001396 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001397}
1398
1399static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1400 struct mvpp2_tx_desc *tx_desc)
1401{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001402 if (port->priv->hw_version == MVPP21)
1403 return tx_desc->pp21.data_size;
1404 else
1405 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001406}
1407
1408static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1409 struct mvpp2_tx_desc *tx_desc,
1410 size_t size)
1411{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001412 if (port->priv->hw_version == MVPP21)
1413 tx_desc->pp21.data_size = size;
1414 else
1415 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001416}
1417
1418static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1419 struct mvpp2_tx_desc *tx_desc,
1420 unsigned int txq)
1421{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001422 if (port->priv->hw_version == MVPP21)
1423 tx_desc->pp21.phys_txq = txq;
1424 else
1425 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001426}
1427
1428static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1429 struct mvpp2_tx_desc *tx_desc,
1430 unsigned int command)
1431{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001432 if (port->priv->hw_version == MVPP21)
1433 tx_desc->pp21.command = command;
1434 else
1435 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001436}
1437
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001438static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1439 struct mvpp2_tx_desc *tx_desc)
1440{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001441 if (port->priv->hw_version == MVPP21)
1442 return tx_desc->pp21.packet_offset;
1443 else
1444 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001445}
1446
1447static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1448 struct mvpp2_rx_desc *rx_desc)
1449{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001450 if (port->priv->hw_version == MVPP21)
1451 return rx_desc->pp21.buf_dma_addr;
1452 else
1453 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001454}
1455
1456static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1457 struct mvpp2_rx_desc *rx_desc)
1458{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001459 if (port->priv->hw_version == MVPP21)
1460 return rx_desc->pp21.buf_cookie;
1461 else
1462 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001463}
1464
1465static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1466 struct mvpp2_rx_desc *rx_desc)
1467{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001468 if (port->priv->hw_version == MVPP21)
1469 return rx_desc->pp21.data_size;
1470 else
1471 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001472}
1473
1474static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1475 struct mvpp2_rx_desc *rx_desc)
1476{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001477 if (port->priv->hw_version == MVPP21)
1478 return rx_desc->pp21.status;
1479 else
1480 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001481}
1482
Marcin Wojtas3f518502014-07-10 16:52:13 -03001483static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1484{
1485 txq_pcpu->txq_get_index++;
1486 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1487 txq_pcpu->txq_get_index = 0;
1488}
1489
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001490static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1491 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001492 struct sk_buff *skb,
1493 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001494{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001495 struct mvpp2_txq_pcpu_buf *tx_buf =
1496 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1497 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001498 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1499 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1500 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001501 txq_pcpu->txq_put_index++;
1502 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1503 txq_pcpu->txq_put_index = 0;
1504}
1505
1506/* Get number of physical egress port */
1507static inline int mvpp2_egress_port(struct mvpp2_port *port)
1508{
1509 return MVPP2_MAX_TCONT + port->id;
1510}
1511
1512/* Get number of physical TXQ */
1513static inline int mvpp2_txq_phys(int port, int txq)
1514{
1515 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1516}
1517
1518/* Parser configuration routines */
1519
1520/* Update parser tcam and sram hw entries */
1521static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1522{
1523 int i;
1524
1525 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1526 return -EINVAL;
1527
1528 /* Clear entry invalidation bit */
1529 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1530
1531 /* Write tcam index - indirect access */
1532 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1533 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1534 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1535
1536 /* Write sram index - indirect access */
1537 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1538 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1539 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1540
1541 return 0;
1542}
1543
1544/* Read tcam entry from hw */
1545static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1546{
1547 int i;
1548
1549 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1550 return -EINVAL;
1551
1552 /* Write tcam index - indirect access */
1553 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1554
1555 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1556 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1557 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1558 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1559
1560 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1561 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1562
1563 /* Write sram index - indirect access */
1564 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1565 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1566 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1567
1568 return 0;
1569}
1570
1571/* Invalidate tcam hw entry */
1572static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1573{
1574 /* Write index - indirect access */
1575 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1576 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1577 MVPP2_PRS_TCAM_INV_MASK);
1578}
1579
1580/* Enable shadow table entry and set its lookup ID */
1581static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1582{
1583 priv->prs_shadow[index].valid = true;
1584 priv->prs_shadow[index].lu = lu;
1585}
1586
1587/* Update ri fields in shadow table entry */
1588static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1589 unsigned int ri, unsigned int ri_mask)
1590{
1591 priv->prs_shadow[index].ri_mask = ri_mask;
1592 priv->prs_shadow[index].ri = ri;
1593}
1594
1595/* Update lookup field in tcam sw entry */
1596static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1597{
1598 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1599
1600 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1601 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1602}
1603
1604/* Update mask for single port in tcam sw entry */
1605static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1606 unsigned int port, bool add)
1607{
1608 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1609
1610 if (add)
1611 pe->tcam.byte[enable_off] &= ~(1 << port);
1612 else
1613 pe->tcam.byte[enable_off] |= 1 << port;
1614}
1615
1616/* Update port map in tcam sw entry */
1617static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1618 unsigned int ports)
1619{
1620 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1621 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1622
1623 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1624 pe->tcam.byte[enable_off] &= ~port_mask;
1625 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1626}
1627
1628/* Obtain port map from tcam sw entry */
1629static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1630{
1631 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1632
1633 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1634}
1635
1636/* Set byte of data and its enable bits in tcam sw entry */
1637static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1638 unsigned int offs, unsigned char byte,
1639 unsigned char enable)
1640{
1641 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1642 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1643}
1644
1645/* Get byte of data and its enable bits from tcam sw entry */
1646static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1647 unsigned int offs, unsigned char *byte,
1648 unsigned char *enable)
1649{
1650 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1651 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1652}
1653
1654/* Compare tcam data bytes with a pattern */
1655static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1656 u16 data)
1657{
1658 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1659 u16 tcam_data;
1660
Antoine Tenartef4816f2017-10-24 11:41:26 +02001661 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
Marcin Wojtas3f518502014-07-10 16:52:13 -03001662 if (tcam_data != data)
1663 return false;
1664 return true;
1665}
1666
1667/* Update ai bits in tcam sw entry */
1668static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1669 unsigned int bits, unsigned int enable)
1670{
1671 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1672
1673 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1674
1675 if (!(enable & BIT(i)))
1676 continue;
1677
1678 if (bits & BIT(i))
1679 pe->tcam.byte[ai_idx] |= 1 << i;
1680 else
1681 pe->tcam.byte[ai_idx] &= ~(1 << i);
1682 }
1683
1684 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1685}
1686
1687/* Get ai bits from tcam sw entry */
1688static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1689{
1690 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1691}
1692
1693/* Set ethertype in tcam sw entry */
1694static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1695 unsigned short ethertype)
1696{
1697 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1698 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1699}
1700
Maxime Chevallier56beda32018-02-28 10:14:13 +01001701/* Set vid in tcam sw entry */
1702static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
1703 unsigned short vid)
1704{
1705 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
1706 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
1707}
1708
Marcin Wojtas3f518502014-07-10 16:52:13 -03001709/* Set bits in sram sw entry */
1710static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1711 int val)
1712{
1713 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1714}
1715
1716/* Clear bits in sram sw entry */
1717static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1718 int val)
1719{
1720 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1721}
1722
1723/* Update ri bits in sram sw entry */
1724static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1725 unsigned int bits, unsigned int mask)
1726{
1727 unsigned int i;
1728
1729 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1730 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1731
1732 if (!(mask & BIT(i)))
1733 continue;
1734
1735 if (bits & BIT(i))
1736 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1737 else
1738 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1739
1740 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1741 }
1742}
1743
1744/* Obtain ri bits from sram sw entry */
1745static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1746{
1747 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1748}
1749
1750/* Update ai bits in sram sw entry */
1751static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1752 unsigned int bits, unsigned int mask)
1753{
1754 unsigned int i;
1755 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1756
1757 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1758
1759 if (!(mask & BIT(i)))
1760 continue;
1761
1762 if (bits & BIT(i))
1763 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1764 else
1765 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1766
1767 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1768 }
1769}
1770
1771/* Read ai bits from sram sw entry */
1772static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1773{
1774 u8 bits;
1775 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1776 int ai_en_off = ai_off + 1;
1777 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1778
1779 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1780 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1781
1782 return bits;
1783}
1784
1785/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1786 * lookup interation
1787 */
1788static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1789 unsigned int lu)
1790{
1791 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1792
1793 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1794 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1795 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1796}
1797
1798/* In the sram sw entry set sign and value of the next lookup offset
1799 * and the offset value generated to the classifier
1800 */
1801static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1802 unsigned int op)
1803{
1804 /* Set sign */
1805 if (shift < 0) {
1806 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1807 shift = 0 - shift;
1808 } else {
1809 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1810 }
1811
1812 /* Set value */
1813 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1814 (unsigned char)shift;
1815
1816 /* Reset and set operation */
1817 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1818 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1819 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1820
1821 /* Set base offset as current */
1822 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1823}
1824
1825/* In the sram sw entry set sign and value of the user defined offset
1826 * generated to the classifier
1827 */
1828static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1829 unsigned int type, int offset,
1830 unsigned int op)
1831{
1832 /* Set sign */
1833 if (offset < 0) {
1834 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1835 offset = 0 - offset;
1836 } else {
1837 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1838 }
1839
1840 /* Set value */
1841 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1842 MVPP2_PRS_SRAM_UDF_MASK);
1843 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1844 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1845 MVPP2_PRS_SRAM_UDF_BITS)] &=
1846 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1847 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1848 MVPP2_PRS_SRAM_UDF_BITS)] |=
1849 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1850
1851 /* Set offset type */
1852 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1853 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1854 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1855
1856 /* Set offset operation */
1857 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1858 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1859 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1860
1861 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1862 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1863 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1864 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1865
1866 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1867 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1868 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1869
1870 /* Set base offset as current */
1871 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1872}
1873
1874/* Find parser flow entry */
1875static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1876{
1877 struct mvpp2_prs_entry *pe;
1878 int tid;
1879
1880 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1881 if (!pe)
1882 return NULL;
1883 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1884
1885 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1886 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1887 u8 bits;
1888
1889 if (!priv->prs_shadow[tid].valid ||
1890 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1891 continue;
1892
1893 pe->index = tid;
1894 mvpp2_prs_hw_read(priv, pe);
1895 bits = mvpp2_prs_sram_ai_get(pe);
1896
1897 /* Sram store classification lookup ID in AI bits [5:0] */
1898 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1899 return pe;
1900 }
1901 kfree(pe);
1902
1903 return NULL;
1904}
1905
1906/* Return first free tcam index, seeking from start to end */
1907static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1908 unsigned char end)
1909{
1910 int tid;
1911
1912 if (start > end)
1913 swap(start, end);
1914
1915 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1916 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1917
1918 for (tid = start; tid <= end; tid++) {
1919 if (!priv->prs_shadow[tid].valid)
1920 return tid;
1921 }
1922
1923 return -EINVAL;
1924}
1925
1926/* Enable/disable dropping all mac da's */
1927static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1928{
1929 struct mvpp2_prs_entry pe;
1930
1931 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1932 /* Entry exist - update port only */
1933 pe.index = MVPP2_PE_DROP_ALL;
1934 mvpp2_prs_hw_read(priv, &pe);
1935 } else {
1936 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001937 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001938 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1939 pe.index = MVPP2_PE_DROP_ALL;
1940
1941 /* Non-promiscuous mode for all ports - DROP unknown packets */
1942 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1943 MVPP2_PRS_RI_DROP_MASK);
1944
1945 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1946 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1947
1948 /* Update shadow table */
1949 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1950
1951 /* Mask all ports */
1952 mvpp2_prs_tcam_port_map_set(&pe, 0);
1953 }
1954
1955 /* Update port mask */
1956 mvpp2_prs_tcam_port_set(&pe, port, add);
1957
1958 mvpp2_prs_hw_write(priv, &pe);
1959}
1960
1961/* Set port to promiscuous mode */
1962static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1963{
1964 struct mvpp2_prs_entry pe;
1965
Joe Perchesdbedd442015-03-06 20:49:12 -08001966 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001967
1968 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1969 /* Entry exist - update port only */
1970 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1971 mvpp2_prs_hw_read(priv, &pe);
1972 } else {
1973 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001974 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001975 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1976 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1977
1978 /* Continue - set next lookup */
1979 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1980
1981 /* Set result info bits */
1982 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1983 MVPP2_PRS_RI_L2_CAST_MASK);
1984
1985 /* Shift to ethertype */
1986 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1987 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1988
1989 /* Mask all ports */
1990 mvpp2_prs_tcam_port_map_set(&pe, 0);
1991
1992 /* Update shadow table */
1993 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1994 }
1995
1996 /* Update port mask */
1997 mvpp2_prs_tcam_port_set(&pe, port, add);
1998
1999 mvpp2_prs_hw_write(priv, &pe);
2000}
2001
2002/* Accept multicast */
2003static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
2004 bool add)
2005{
2006 struct mvpp2_prs_entry pe;
2007 unsigned char da_mc;
2008
2009 /* Ethernet multicast address first byte is
2010 * 0x01 for IPv4 and 0x33 for IPv6
2011 */
2012 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
2013
2014 if (priv->prs_shadow[index].valid) {
2015 /* Entry exist - update port only */
2016 pe.index = index;
2017 mvpp2_prs_hw_read(priv, &pe);
2018 } else {
2019 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002020 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002021 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2022 pe.index = index;
2023
2024 /* Continue - set next lookup */
2025 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2026
2027 /* Set result info bits */
2028 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
2029 MVPP2_PRS_RI_L2_CAST_MASK);
2030
2031 /* Update tcam entry data first byte */
2032 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
2033
2034 /* Shift to ethertype */
2035 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2036 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2037
2038 /* Mask all ports */
2039 mvpp2_prs_tcam_port_map_set(&pe, 0);
2040
2041 /* Update shadow table */
2042 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2043 }
2044
2045 /* Update port mask */
2046 mvpp2_prs_tcam_port_set(&pe, port, add);
2047
2048 mvpp2_prs_hw_write(priv, &pe);
2049}
2050
2051/* Set entry for dsa packets */
2052static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
2053 bool tagged, bool extend)
2054{
2055 struct mvpp2_prs_entry pe;
2056 int tid, shift;
2057
2058 if (extend) {
2059 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
2060 shift = 8;
2061 } else {
2062 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
2063 shift = 4;
2064 }
2065
2066 if (priv->prs_shadow[tid].valid) {
2067 /* Entry exist - update port only */
2068 pe.index = tid;
2069 mvpp2_prs_hw_read(priv, &pe);
2070 } else {
2071 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002072 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002073 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2074 pe.index = tid;
2075
Marcin Wojtas3f518502014-07-10 16:52:13 -03002076 /* Update shadow table */
2077 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2078
2079 if (tagged) {
2080 /* Set tagged bit in DSA tag */
2081 mvpp2_prs_tcam_data_byte_set(&pe, 0,
Maxime Chevallier56beda32018-02-28 10:14:13 +01002082 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2083 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2084
2085 /* Set ai bits for next iteration */
2086 if (extend)
2087 mvpp2_prs_sram_ai_update(&pe, 1,
2088 MVPP2_PRS_SRAM_AI_MASK);
2089 else
2090 mvpp2_prs_sram_ai_update(&pe, 0,
2091 MVPP2_PRS_SRAM_AI_MASK);
2092
2093 /* If packet is tagged continue check vid filtering */
2094 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002095 } else {
Maxime Chevallier56beda32018-02-28 10:14:13 +01002096 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
2097 mvpp2_prs_sram_shift_set(&pe, shift,
2098 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2099
Marcin Wojtas3f518502014-07-10 16:52:13 -03002100 /* Set result info bits to 'no vlans' */
2101 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2102 MVPP2_PRS_RI_VLAN_MASK);
2103 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2104 }
2105
2106 /* Mask all ports */
2107 mvpp2_prs_tcam_port_map_set(&pe, 0);
2108 }
2109
2110 /* Update port mask */
2111 mvpp2_prs_tcam_port_set(&pe, port, add);
2112
2113 mvpp2_prs_hw_write(priv, &pe);
2114}
2115
2116/* Set entry for dsa ethertype */
2117static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
2118 bool add, bool tagged, bool extend)
2119{
2120 struct mvpp2_prs_entry pe;
2121 int tid, shift, port_mask;
2122
2123 if (extend) {
2124 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
2125 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
2126 port_mask = 0;
2127 shift = 8;
2128 } else {
2129 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
2130 MVPP2_PE_ETYPE_DSA_UNTAGGED;
2131 port_mask = MVPP2_PRS_PORT_MASK;
2132 shift = 4;
2133 }
2134
2135 if (priv->prs_shadow[tid].valid) {
2136 /* Entry exist - update port only */
2137 pe.index = tid;
2138 mvpp2_prs_hw_read(priv, &pe);
2139 } else {
2140 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002141 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002142 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2143 pe.index = tid;
2144
2145 /* Set ethertype */
2146 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
2147 mvpp2_prs_match_etype(&pe, 2, 0);
2148
2149 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
2150 MVPP2_PRS_RI_DSA_MASK);
2151 /* Shift ethertype + 2 byte reserved + tag*/
2152 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
2153 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2154
2155 /* Update shadow table */
2156 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
2157
2158 if (tagged) {
2159 /* Set tagged bit in DSA tag */
2160 mvpp2_prs_tcam_data_byte_set(&pe,
2161 MVPP2_ETH_TYPE_LEN + 2 + 3,
2162 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
2163 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
2164 /* Clear all ai bits for next iteration */
2165 mvpp2_prs_sram_ai_update(&pe, 0,
2166 MVPP2_PRS_SRAM_AI_MASK);
2167 /* If packet is tagged continue check vlans */
2168 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2169 } else {
2170 /* Set result info bits to 'no vlans' */
2171 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2172 MVPP2_PRS_RI_VLAN_MASK);
2173 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2174 }
2175 /* Mask/unmask all ports, depending on dsa type */
2176 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
2177 }
2178
2179 /* Update port mask */
2180 mvpp2_prs_tcam_port_set(&pe, port, add);
2181
2182 mvpp2_prs_hw_write(priv, &pe);
2183}
2184
2185/* Search for existing single/triple vlan entry */
2186static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
2187 unsigned short tpid, int ai)
2188{
2189 struct mvpp2_prs_entry *pe;
2190 int tid;
2191
2192 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2193 if (!pe)
2194 return NULL;
2195 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2196
2197 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2198 for (tid = MVPP2_PE_FIRST_FREE_TID;
2199 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2200 unsigned int ri_bits, ai_bits;
2201 bool match;
2202
2203 if (!priv->prs_shadow[tid].valid ||
2204 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2205 continue;
2206
2207 pe->index = tid;
2208
2209 mvpp2_prs_hw_read(priv, pe);
2210 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
2211 if (!match)
2212 continue;
2213
2214 /* Get vlan type */
2215 ri_bits = mvpp2_prs_sram_ri_get(pe);
2216 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2217
2218 /* Get current ai value from tcam */
2219 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2220 /* Clear double vlan bit */
2221 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2222
2223 if (ai != ai_bits)
2224 continue;
2225
2226 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2227 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2228 return pe;
2229 }
2230 kfree(pe);
2231
2232 return NULL;
2233}
2234
2235/* Add/update single/triple vlan entry */
2236static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2237 unsigned int port_map)
2238{
2239 struct mvpp2_prs_entry *pe;
2240 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302241 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002242
2243 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2244
2245 if (!pe) {
2246 /* Create new tcam entry */
2247 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2248 MVPP2_PE_FIRST_FREE_TID);
2249 if (tid < 0)
2250 return tid;
2251
2252 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2253 if (!pe)
2254 return -ENOMEM;
2255
2256 /* Get last double vlan tid */
2257 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2258 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2259 unsigned int ri_bits;
2260
2261 if (!priv->prs_shadow[tid_aux].valid ||
2262 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2263 continue;
2264
2265 pe->index = tid_aux;
2266 mvpp2_prs_hw_read(priv, pe);
2267 ri_bits = mvpp2_prs_sram_ri_get(pe);
2268 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2269 MVPP2_PRS_RI_VLAN_DOUBLE)
2270 break;
2271 }
2272
Sudip Mukherjee43737472014-11-01 16:59:34 +05302273 if (tid <= tid_aux) {
2274 ret = -EINVAL;
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002275 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302276 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002277
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002278 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002279 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2280 pe->index = tid;
2281
2282 mvpp2_prs_match_etype(pe, 0, tpid);
2283
Maxime Chevallier56beda32018-02-28 10:14:13 +01002284 /* VLAN tag detected, proceed with VID filtering */
2285 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VID);
2286
Marcin Wojtas3f518502014-07-10 16:52:13 -03002287 /* Clear all ai bits for next iteration */
2288 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2289
2290 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2291 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2292 MVPP2_PRS_RI_VLAN_MASK);
2293 } else {
2294 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2295 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2296 MVPP2_PRS_RI_VLAN_MASK);
2297 }
2298 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2299
2300 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2301 }
2302 /* Update ports' mask */
2303 mvpp2_prs_tcam_port_map_set(pe, port_map);
2304
2305 mvpp2_prs_hw_write(priv, pe);
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002306free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002307 kfree(pe);
2308
Sudip Mukherjee43737472014-11-01 16:59:34 +05302309 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002310}
2311
2312/* Get first free double vlan ai number */
2313static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2314{
2315 int i;
2316
2317 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2318 if (!priv->prs_double_vlans[i])
2319 return i;
2320 }
2321
2322 return -EINVAL;
2323}
2324
2325/* Search for existing double vlan entry */
2326static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2327 unsigned short tpid1,
2328 unsigned short tpid2)
2329{
2330 struct mvpp2_prs_entry *pe;
2331 int tid;
2332
2333 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2334 if (!pe)
2335 return NULL;
2336 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2337
2338 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2339 for (tid = MVPP2_PE_FIRST_FREE_TID;
2340 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2341 unsigned int ri_mask;
2342 bool match;
2343
2344 if (!priv->prs_shadow[tid].valid ||
2345 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2346 continue;
2347
2348 pe->index = tid;
2349 mvpp2_prs_hw_read(priv, pe);
2350
2351 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2352 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2353
2354 if (!match)
2355 continue;
2356
2357 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2358 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2359 return pe;
2360 }
2361 kfree(pe);
2362
2363 return NULL;
2364}
2365
2366/* Add or update double vlan entry */
2367static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2368 unsigned short tpid2,
2369 unsigned int port_map)
2370{
2371 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302372 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002373
2374 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2375
2376 if (!pe) {
2377 /* Create new tcam entry */
2378 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2379 MVPP2_PE_LAST_FREE_TID);
2380 if (tid < 0)
2381 return tid;
2382
2383 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2384 if (!pe)
2385 return -ENOMEM;
2386
2387 /* Set ai value for new double vlan entry */
2388 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302389 if (ai < 0) {
2390 ret = ai;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002391 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302392 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002393
2394 /* Get first single/triple vlan tid */
2395 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2396 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2397 unsigned int ri_bits;
2398
2399 if (!priv->prs_shadow[tid_aux].valid ||
2400 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2401 continue;
2402
2403 pe->index = tid_aux;
2404 mvpp2_prs_hw_read(priv, pe);
2405 ri_bits = mvpp2_prs_sram_ri_get(pe);
2406 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2407 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2408 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2409 break;
2410 }
2411
Sudip Mukherjee43737472014-11-01 16:59:34 +05302412 if (tid >= tid_aux) {
2413 ret = -ERANGE;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002414 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302415 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002416
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002417 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002418 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2419 pe->index = tid;
2420
2421 priv->prs_double_vlans[ai] = true;
2422
2423 mvpp2_prs_match_etype(pe, 0, tpid1);
2424 mvpp2_prs_match_etype(pe, 4, tpid2);
2425
2426 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
Maxime Chevallier56beda32018-02-28 10:14:13 +01002427 /* Shift 4 bytes - skip outer vlan tag */
2428 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
Marcin Wojtas3f518502014-07-10 16:52:13 -03002429 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2430 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2431 MVPP2_PRS_RI_VLAN_MASK);
2432 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2433 MVPP2_PRS_SRAM_AI_MASK);
2434
2435 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2436 }
2437
2438 /* Update ports' mask */
2439 mvpp2_prs_tcam_port_map_set(pe, port_map);
2440 mvpp2_prs_hw_write(priv, pe);
Markus Elfringc9a7e122017-04-17 13:03:49 +02002441free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002442 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302443 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002444}
2445
2446/* IPv4 header parsing for fragmentation and L4 offset */
2447static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2448 unsigned int ri, unsigned int ri_mask)
2449{
2450 struct mvpp2_prs_entry pe;
2451 int tid;
2452
2453 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2454 (proto != IPPROTO_IGMP))
2455 return -EINVAL;
2456
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002457 /* Not fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002458 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2459 MVPP2_PE_LAST_FREE_TID);
2460 if (tid < 0)
2461 return tid;
2462
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002463 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002464 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2465 pe.index = tid;
2466
2467 /* Set next lu to IPv4 */
2468 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2469 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2470 /* Set L4 offset */
2471 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2472 sizeof(struct iphdr) - 4,
2473 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2474 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2475 MVPP2_PRS_IPV4_DIP_AI_BIT);
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002476 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2477
2478 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2479 MVPP2_PRS_TCAM_PROTO_MASK_L);
2480 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2481 MVPP2_PRS_TCAM_PROTO_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002482
2483 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2484 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2485 /* Unmask all ports */
2486 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2487
2488 /* Update shadow table and hw entry */
2489 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2490 mvpp2_prs_hw_write(priv, &pe);
2491
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002492 /* Fragmented packet */
Marcin Wojtas3f518502014-07-10 16:52:13 -03002493 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2494 MVPP2_PE_LAST_FREE_TID);
2495 if (tid < 0)
2496 return tid;
2497
2498 pe.index = tid;
2499 /* Clear ri before updating */
2500 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2501 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2502 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2503
Stefan Chulskiaff3da32017-09-25 14:59:46 +02002504 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2505 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2506
2507 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2508 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002509
2510 /* Update shadow table and hw entry */
2511 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2512 mvpp2_prs_hw_write(priv, &pe);
2513
2514 return 0;
2515}
2516
2517/* IPv4 L3 multicast or broadcast */
2518static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2519{
2520 struct mvpp2_prs_entry pe;
2521 int mask, tid;
2522
2523 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2524 MVPP2_PE_LAST_FREE_TID);
2525 if (tid < 0)
2526 return tid;
2527
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002528 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002529 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2530 pe.index = tid;
2531
2532 switch (l3_cast) {
2533 case MVPP2_PRS_L3_MULTI_CAST:
2534 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2535 MVPP2_PRS_IPV4_MC_MASK);
2536 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2537 MVPP2_PRS_RI_L3_ADDR_MASK);
2538 break;
2539 case MVPP2_PRS_L3_BROAD_CAST:
2540 mask = MVPP2_PRS_IPV4_BC_MASK;
2541 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2542 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2543 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2544 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2545 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2546 MVPP2_PRS_RI_L3_ADDR_MASK);
2547 break;
2548 default:
2549 return -EINVAL;
2550 }
2551
2552 /* Finished: go to flowid generation */
2553 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2554 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2555
2556 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2557 MVPP2_PRS_IPV4_DIP_AI_BIT);
2558 /* Unmask all ports */
2559 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2560
2561 /* Update shadow table and hw entry */
2562 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2563 mvpp2_prs_hw_write(priv, &pe);
2564
2565 return 0;
2566}
2567
2568/* Set entries for protocols over IPv6 */
2569static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2570 unsigned int ri, unsigned int ri_mask)
2571{
2572 struct mvpp2_prs_entry pe;
2573 int tid;
2574
2575 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2576 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2577 return -EINVAL;
2578
2579 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2580 MVPP2_PE_LAST_FREE_TID);
2581 if (tid < 0)
2582 return tid;
2583
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002584 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002585 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2586 pe.index = tid;
2587
2588 /* Finished: go to flowid generation */
2589 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2590 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2591 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2592 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2593 sizeof(struct ipv6hdr) - 6,
2594 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2595
2596 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2597 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2598 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2599 /* Unmask all ports */
2600 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2601
2602 /* Write HW */
2603 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2604 mvpp2_prs_hw_write(priv, &pe);
2605
2606 return 0;
2607}
2608
2609/* IPv6 L3 multicast entry */
2610static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2611{
2612 struct mvpp2_prs_entry pe;
2613 int tid;
2614
2615 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2616 return -EINVAL;
2617
2618 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2619 MVPP2_PE_LAST_FREE_TID);
2620 if (tid < 0)
2621 return tid;
2622
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002623 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002624 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2625 pe.index = tid;
2626
2627 /* Finished: go to flowid generation */
2628 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2629 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2630 MVPP2_PRS_RI_L3_ADDR_MASK);
2631 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2632 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2633 /* Shift back to IPv6 NH */
2634 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2635
2636 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2637 MVPP2_PRS_IPV6_MC_MASK);
2638 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2639 /* Unmask all ports */
2640 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2641
2642 /* Update shadow table and hw entry */
2643 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2644 mvpp2_prs_hw_write(priv, &pe);
2645
2646 return 0;
2647}
2648
2649/* Parser per-port initialization */
2650static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2651 int lu_max, int offset)
2652{
2653 u32 val;
2654
2655 /* Set lookup ID */
2656 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2657 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2658 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2659 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2660
2661 /* Set maximum number of loops for packet received from port */
2662 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2663 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2664 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2665 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2666
2667 /* Set initial offset for packet header extraction for the first
2668 * searching loop
2669 */
2670 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2671 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2672 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2673 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2674}
2675
2676/* Default flow entries initialization for all ports */
2677static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2678{
2679 struct mvpp2_prs_entry pe;
2680 int port;
2681
2682 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002683 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002684 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2685 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2686
2687 /* Mask all ports */
2688 mvpp2_prs_tcam_port_map_set(&pe, 0);
2689
2690 /* Set flow ID*/
2691 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2692 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2693
2694 /* Update shadow table and hw entry */
2695 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2696 mvpp2_prs_hw_write(priv, &pe);
2697 }
2698}
2699
2700/* Set default entry for Marvell Header field */
2701static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2702{
2703 struct mvpp2_prs_entry pe;
2704
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002705 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002706
2707 pe.index = MVPP2_PE_MH_DEFAULT;
2708 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2709 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2710 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2711 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2712
2713 /* Unmask all ports */
2714 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2715
2716 /* Update shadow table and hw entry */
2717 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2718 mvpp2_prs_hw_write(priv, &pe);
2719}
2720
2721/* Set default entires (place holder) for promiscuous, non-promiscuous and
2722 * multicast MAC addresses
2723 */
2724static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2725{
2726 struct mvpp2_prs_entry pe;
2727
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002728 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002729
2730 /* Non-promiscuous mode for all ports - DROP unknown packets */
2731 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2732 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2733
2734 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2735 MVPP2_PRS_RI_DROP_MASK);
2736 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2737 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2738
2739 /* Unmask all ports */
2740 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2741
2742 /* Update shadow table and hw entry */
2743 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2744 mvpp2_prs_hw_write(priv, &pe);
2745
2746 /* place holders only - no ports */
2747 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2748 mvpp2_prs_mac_promisc_set(priv, 0, false);
Antoine Tenart20746d72017-10-24 11:41:27 +02002749 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
2750 mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03002751}
2752
2753/* Set default entries for various types of dsa packets */
2754static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2755{
2756 struct mvpp2_prs_entry pe;
2757
2758 /* None tagged EDSA entry - place holder */
2759 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2760 MVPP2_PRS_EDSA);
2761
2762 /* Tagged EDSA entry - place holder */
2763 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2764
2765 /* None tagged DSA entry - place holder */
2766 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2767 MVPP2_PRS_DSA);
2768
2769 /* Tagged DSA entry - place holder */
2770 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2771
2772 /* None tagged EDSA ethertype entry - place holder*/
2773 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2774 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2775
2776 /* Tagged EDSA ethertype entry - place holder*/
2777 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2778 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2779
2780 /* None tagged DSA ethertype entry */
2781 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2782 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2783
2784 /* Tagged DSA ethertype entry */
2785 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2786 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2787
2788 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002789 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002790 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2791 pe.index = MVPP2_PE_DSA_DEFAULT;
2792 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2793
2794 /* Shift 0 bytes */
2795 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2796 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2797
2798 /* Clear all sram ai bits for next iteration */
2799 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2800
2801 /* Unmask all ports */
2802 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2803
2804 mvpp2_prs_hw_write(priv, &pe);
2805}
2806
Maxime Chevallier56beda32018-02-28 10:14:13 +01002807/* Initialize parser entries for VID filtering */
2808static void mvpp2_prs_vid_init(struct mvpp2 *priv)
2809{
2810 struct mvpp2_prs_entry pe;
2811
2812 memset(&pe, 0, sizeof(pe));
2813
2814 /* Set default vid entry */
2815 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
2816 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2817
2818 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
2819
2820 /* Skip VLAN header - Set offset to 4 bytes */
2821 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
2822 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2823
2824 /* Clear all ai bits for next iteration */
2825 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2826
2827 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2828
2829 /* Unmask all ports */
2830 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2831
2832 /* Update shadow table and hw entry */
2833 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2834 mvpp2_prs_hw_write(priv, &pe);
2835
2836 /* Set default vid entry for extended DSA*/
2837 memset(&pe, 0, sizeof(pe));
2838
2839 /* Set default vid entry */
2840 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
2841 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2842
2843 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
2844 MVPP2_PRS_EDSA_VID_AI_BIT);
2845
2846 /* Skip VLAN header - Set offset to 8 bytes */
2847 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
2848 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2849
2850 /* Clear all ai bits for next iteration */
2851 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2852
2853 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2854
2855 /* Unmask all ports */
2856 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2857
2858 /* Update shadow table and hw entry */
2859 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2860 mvpp2_prs_hw_write(priv, &pe);
2861}
2862
Marcin Wojtas3f518502014-07-10 16:52:13 -03002863/* Match basic ethertypes */
2864static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2865{
2866 struct mvpp2_prs_entry pe;
2867 int tid;
2868
2869 /* Ethertype: PPPoE */
2870 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2871 MVPP2_PE_LAST_FREE_TID);
2872 if (tid < 0)
2873 return tid;
2874
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002875 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002876 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2877 pe.index = tid;
2878
2879 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2880
2881 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2882 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2883 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2884 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2885 MVPP2_PRS_RI_PPPOE_MASK);
2886
2887 /* Update shadow table and hw entry */
2888 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2889 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2890 priv->prs_shadow[pe.index].finish = false;
2891 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2892 MVPP2_PRS_RI_PPPOE_MASK);
2893 mvpp2_prs_hw_write(priv, &pe);
2894
2895 /* Ethertype: ARP */
2896 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2897 MVPP2_PE_LAST_FREE_TID);
2898 if (tid < 0)
2899 return tid;
2900
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002901 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002902 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2903 pe.index = tid;
2904
2905 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2906
2907 /* Generate flow in the next iteration*/
2908 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2909 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2910 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2911 MVPP2_PRS_RI_L3_PROTO_MASK);
2912 /* Set L3 offset */
2913 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2914 MVPP2_ETH_TYPE_LEN,
2915 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2916
2917 /* Update shadow table and hw entry */
2918 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2919 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2920 priv->prs_shadow[pe.index].finish = true;
2921 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2922 MVPP2_PRS_RI_L3_PROTO_MASK);
2923 mvpp2_prs_hw_write(priv, &pe);
2924
2925 /* Ethertype: LBTD */
2926 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2927 MVPP2_PE_LAST_FREE_TID);
2928 if (tid < 0)
2929 return tid;
2930
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002931 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002932 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2933 pe.index = tid;
2934
2935 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2936
2937 /* Generate flow in the next iteration*/
2938 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2939 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2940 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2941 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2942 MVPP2_PRS_RI_CPU_CODE_MASK |
2943 MVPP2_PRS_RI_UDF3_MASK);
2944 /* Set L3 offset */
2945 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2946 MVPP2_ETH_TYPE_LEN,
2947 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2948
2949 /* Update shadow table and hw entry */
2950 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2951 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2952 priv->prs_shadow[pe.index].finish = true;
2953 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2954 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2955 MVPP2_PRS_RI_CPU_CODE_MASK |
2956 MVPP2_PRS_RI_UDF3_MASK);
2957 mvpp2_prs_hw_write(priv, &pe);
2958
2959 /* Ethertype: IPv4 without options */
2960 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2961 MVPP2_PE_LAST_FREE_TID);
2962 if (tid < 0)
2963 return tid;
2964
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002965 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002966 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2967 pe.index = tid;
2968
2969 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2970 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2971 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2972 MVPP2_PRS_IPV4_HEAD_MASK |
2973 MVPP2_PRS_IPV4_IHL_MASK);
2974
2975 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2976 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2977 MVPP2_PRS_RI_L3_PROTO_MASK);
2978 /* Skip eth_type + 4 bytes of IP header */
2979 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2980 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2981 /* Set L3 offset */
2982 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2983 MVPP2_ETH_TYPE_LEN,
2984 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2985
2986 /* Update shadow table and hw entry */
2987 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2988 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2989 priv->prs_shadow[pe.index].finish = false;
2990 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2991 MVPP2_PRS_RI_L3_PROTO_MASK);
2992 mvpp2_prs_hw_write(priv, &pe);
2993
2994 /* Ethertype: IPv4 with options */
2995 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2996 MVPP2_PE_LAST_FREE_TID);
2997 if (tid < 0)
2998 return tid;
2999
3000 pe.index = tid;
3001
3002 /* Clear tcam data before updating */
3003 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
3004 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
3005
3006 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3007 MVPP2_PRS_IPV4_HEAD,
3008 MVPP2_PRS_IPV4_HEAD_MASK);
3009
3010 /* Clear ri before updating */
3011 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3012 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3013 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3014 MVPP2_PRS_RI_L3_PROTO_MASK);
3015
3016 /* Update shadow table and hw entry */
3017 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3018 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3019 priv->prs_shadow[pe.index].finish = false;
3020 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
3021 MVPP2_PRS_RI_L3_PROTO_MASK);
3022 mvpp2_prs_hw_write(priv, &pe);
3023
3024 /* Ethertype: IPv6 without options */
3025 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3026 MVPP2_PE_LAST_FREE_TID);
3027 if (tid < 0)
3028 return tid;
3029
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003030 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003031 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3032 pe.index = tid;
3033
3034 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
3035
3036 /* Skip DIP of IPV6 header */
3037 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
3038 MVPP2_MAX_L3_ADDR_SIZE,
3039 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3040 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3041 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3042 MVPP2_PRS_RI_L3_PROTO_MASK);
3043 /* Set L3 offset */
3044 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3045 MVPP2_ETH_TYPE_LEN,
3046 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3047
3048 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3049 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3050 priv->prs_shadow[pe.index].finish = false;
3051 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
3052 MVPP2_PRS_RI_L3_PROTO_MASK);
3053 mvpp2_prs_hw_write(priv, &pe);
3054
3055 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
3056 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3057 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
3058 pe.index = MVPP2_PE_ETH_TYPE_UN;
3059
3060 /* Unmask all ports */
3061 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3062
3063 /* Generate flow in the next iteration*/
3064 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3065 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3066 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3067 MVPP2_PRS_RI_L3_PROTO_MASK);
3068 /* Set L3 offset even it's unknown L3 */
3069 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3070 MVPP2_ETH_TYPE_LEN,
3071 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3072
3073 /* Update shadow table and hw entry */
3074 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
3075 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
3076 priv->prs_shadow[pe.index].finish = true;
3077 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
3078 MVPP2_PRS_RI_L3_PROTO_MASK);
3079 mvpp2_prs_hw_write(priv, &pe);
3080
3081 return 0;
3082}
3083
3084/* Configure vlan entries and detect up to 2 successive VLAN tags.
3085 * Possible options:
3086 * 0x8100, 0x88A8
3087 * 0x8100, 0x8100
3088 * 0x8100
3089 * 0x88A8
3090 */
3091static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
3092{
3093 struct mvpp2_prs_entry pe;
3094 int err;
3095
3096 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
3097 MVPP2_PRS_DBL_VLANS_MAX,
3098 GFP_KERNEL);
3099 if (!priv->prs_double_vlans)
3100 return -ENOMEM;
3101
3102 /* Double VLAN: 0x8100, 0x88A8 */
3103 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
3104 MVPP2_PRS_PORT_MASK);
3105 if (err)
3106 return err;
3107
3108 /* Double VLAN: 0x8100, 0x8100 */
3109 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
3110 MVPP2_PRS_PORT_MASK);
3111 if (err)
3112 return err;
3113
3114 /* Single VLAN: 0x88a8 */
3115 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
3116 MVPP2_PRS_PORT_MASK);
3117 if (err)
3118 return err;
3119
3120 /* Single VLAN: 0x8100 */
3121 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
3122 MVPP2_PRS_PORT_MASK);
3123 if (err)
3124 return err;
3125
3126 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003127 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003128 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3129 pe.index = MVPP2_PE_VLAN_DBL;
3130
Maxime Chevallier56beda32018-02-28 10:14:13 +01003131 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
3132
Marcin Wojtas3f518502014-07-10 16:52:13 -03003133 /* Clear ai for next iterations */
3134 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3135 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
3136 MVPP2_PRS_RI_VLAN_MASK);
3137
3138 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
3139 MVPP2_PRS_DBL_VLAN_AI_BIT);
3140 /* Unmask all ports */
3141 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3142
3143 /* Update shadow table and hw entry */
3144 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3145 mvpp2_prs_hw_write(priv, &pe);
3146
3147 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003148 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003149 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3150 pe.index = MVPP2_PE_VLAN_NONE;
3151
3152 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3153 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3154 MVPP2_PRS_RI_VLAN_MASK);
3155
3156 /* Unmask all ports */
3157 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3158
3159 /* Update shadow table and hw entry */
3160 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
3161 mvpp2_prs_hw_write(priv, &pe);
3162
3163 return 0;
3164}
3165
3166/* Set entries for PPPoE ethertype */
3167static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
3168{
3169 struct mvpp2_prs_entry pe;
3170 int tid;
3171
3172 /* IPv4 over PPPoE with options */
3173 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3174 MVPP2_PE_LAST_FREE_TID);
3175 if (tid < 0)
3176 return tid;
3177
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003178 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003179 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3180 pe.index = tid;
3181
3182 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
3183
3184 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3185 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
3186 MVPP2_PRS_RI_L3_PROTO_MASK);
3187 /* Skip eth_type + 4 bytes of IP header */
3188 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3189 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3190 /* Set L3 offset */
3191 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3192 MVPP2_ETH_TYPE_LEN,
3193 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3194
3195 /* Update shadow table and hw entry */
3196 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3197 mvpp2_prs_hw_write(priv, &pe);
3198
3199 /* IPv4 over PPPoE without options */
3200 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3201 MVPP2_PE_LAST_FREE_TID);
3202 if (tid < 0)
3203 return tid;
3204
3205 pe.index = tid;
3206
3207 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
3208 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
3209 MVPP2_PRS_IPV4_HEAD_MASK |
3210 MVPP2_PRS_IPV4_IHL_MASK);
3211
3212 /* Clear ri before updating */
3213 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
3214 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
3215 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
3216 MVPP2_PRS_RI_L3_PROTO_MASK);
3217
3218 /* Update shadow table and hw entry */
3219 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3220 mvpp2_prs_hw_write(priv, &pe);
3221
3222 /* IPv6 over PPPoE */
3223 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3224 MVPP2_PE_LAST_FREE_TID);
3225 if (tid < 0)
3226 return tid;
3227
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003228 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003229 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3230 pe.index = tid;
3231
3232 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
3233
3234 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3235 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
3236 MVPP2_PRS_RI_L3_PROTO_MASK);
3237 /* Skip eth_type + 4 bytes of IPv6 header */
3238 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
3239 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3240 /* Set L3 offset */
3241 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3242 MVPP2_ETH_TYPE_LEN,
3243 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3244
3245 /* Update shadow table and hw entry */
3246 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3247 mvpp2_prs_hw_write(priv, &pe);
3248
3249 /* Non-IP over PPPoE */
3250 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3251 MVPP2_PE_LAST_FREE_TID);
3252 if (tid < 0)
3253 return tid;
3254
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003255 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003256 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
3257 pe.index = tid;
3258
3259 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
3260 MVPP2_PRS_RI_L3_PROTO_MASK);
3261
3262 /* Finished: go to flowid generation */
3263 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3264 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3265 /* Set L3 offset even if it's unknown L3 */
3266 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
3267 MVPP2_ETH_TYPE_LEN,
3268 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3269
3270 /* Update shadow table and hw entry */
3271 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
3272 mvpp2_prs_hw_write(priv, &pe);
3273
3274 return 0;
3275}
3276
3277/* Initialize entries for IPv4 */
3278static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3279{
3280 struct mvpp2_prs_entry pe;
3281 int err;
3282
3283 /* Set entries for TCP, UDP and IGMP over IPv4 */
3284 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3285 MVPP2_PRS_RI_L4_PROTO_MASK);
3286 if (err)
3287 return err;
3288
3289 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3290 MVPP2_PRS_RI_L4_PROTO_MASK);
3291 if (err)
3292 return err;
3293
3294 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3295 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3296 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3297 MVPP2_PRS_RI_CPU_CODE_MASK |
3298 MVPP2_PRS_RI_UDF3_MASK);
3299 if (err)
3300 return err;
3301
3302 /* IPv4 Broadcast */
3303 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3304 if (err)
3305 return err;
3306
3307 /* IPv4 Multicast */
3308 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3309 if (err)
3310 return err;
3311
3312 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003313 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003314 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3315 pe.index = MVPP2_PE_IP4_PROTO_UN;
3316
3317 /* Set next lu to IPv4 */
3318 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3319 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3320 /* Set L4 offset */
3321 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3322 sizeof(struct iphdr) - 4,
3323 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3324 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3325 MVPP2_PRS_IPV4_DIP_AI_BIT);
3326 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3327 MVPP2_PRS_RI_L4_PROTO_MASK);
3328
3329 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3330 /* Unmask all ports */
3331 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3332
3333 /* Update shadow table and hw entry */
3334 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3335 mvpp2_prs_hw_write(priv, &pe);
3336
3337 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003338 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003339 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3340 pe.index = MVPP2_PE_IP4_ADDR_UN;
3341
3342 /* Finished: go to flowid generation */
3343 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3344 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3345 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3346 MVPP2_PRS_RI_L3_ADDR_MASK);
3347
3348 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3349 MVPP2_PRS_IPV4_DIP_AI_BIT);
3350 /* Unmask all ports */
3351 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3352
3353 /* Update shadow table and hw entry */
3354 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3355 mvpp2_prs_hw_write(priv, &pe);
3356
3357 return 0;
3358}
3359
3360/* Initialize entries for IPv6 */
3361static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3362{
3363 struct mvpp2_prs_entry pe;
3364 int tid, err;
3365
3366 /* Set entries for TCP, UDP and ICMP over IPv6 */
3367 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3368 MVPP2_PRS_RI_L4_TCP,
3369 MVPP2_PRS_RI_L4_PROTO_MASK);
3370 if (err)
3371 return err;
3372
3373 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3374 MVPP2_PRS_RI_L4_UDP,
3375 MVPP2_PRS_RI_L4_PROTO_MASK);
3376 if (err)
3377 return err;
3378
3379 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3380 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3381 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3382 MVPP2_PRS_RI_CPU_CODE_MASK |
3383 MVPP2_PRS_RI_UDF3_MASK);
3384 if (err)
3385 return err;
3386
3387 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3388 /* Result Info: UDF7=1, DS lite */
3389 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3390 MVPP2_PRS_RI_UDF7_IP6_LITE,
3391 MVPP2_PRS_RI_UDF7_MASK);
3392 if (err)
3393 return err;
3394
3395 /* IPv6 multicast */
3396 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3397 if (err)
3398 return err;
3399
3400 /* Entry for checking hop limit */
3401 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3402 MVPP2_PE_LAST_FREE_TID);
3403 if (tid < 0)
3404 return tid;
3405
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003406 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003407 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3408 pe.index = tid;
3409
3410 /* Finished: go to flowid generation */
3411 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3412 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3413 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3414 MVPP2_PRS_RI_DROP_MASK,
3415 MVPP2_PRS_RI_L3_PROTO_MASK |
3416 MVPP2_PRS_RI_DROP_MASK);
3417
3418 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3419 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3420 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3421
3422 /* Update shadow table and hw entry */
3423 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3424 mvpp2_prs_hw_write(priv, &pe);
3425
3426 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003427 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003428 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3429 pe.index = MVPP2_PE_IP6_PROTO_UN;
3430
3431 /* Finished: go to flowid generation */
3432 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3433 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3434 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3435 MVPP2_PRS_RI_L4_PROTO_MASK);
3436 /* Set L4 offset relatively to our current place */
3437 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3438 sizeof(struct ipv6hdr) - 4,
3439 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3440
3441 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3442 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3443 /* Unmask all ports */
3444 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3445
3446 /* Update shadow table and hw entry */
3447 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3448 mvpp2_prs_hw_write(priv, &pe);
3449
3450 /* Default IPv6 entry for unknown ext protocols */
3451 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3452 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3453 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3454
3455 /* Finished: go to flowid generation */
3456 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3457 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3458 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3459 MVPP2_PRS_RI_L4_PROTO_MASK);
3460
3461 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3462 MVPP2_PRS_IPV6_EXT_AI_BIT);
3463 /* Unmask all ports */
3464 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3465
3466 /* Update shadow table and hw entry */
3467 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3468 mvpp2_prs_hw_write(priv, &pe);
3469
3470 /* Default IPv6 entry for unicast address */
3471 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3472 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3473 pe.index = MVPP2_PE_IP6_ADDR_UN;
3474
3475 /* Finished: go to IPv6 again */
3476 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3477 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3478 MVPP2_PRS_RI_L3_ADDR_MASK);
3479 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3480 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3481 /* Shift back to IPV6 NH */
3482 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3483
3484 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3485 /* Unmask all ports */
3486 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3487
3488 /* Update shadow table and hw entry */
3489 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3490 mvpp2_prs_hw_write(priv, &pe);
3491
3492 return 0;
3493}
3494
Maxime Chevallier56beda32018-02-28 10:14:13 +01003495/* Find tcam entry with matched pair <vid,port> */
3496static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
3497 u16 mask)
3498{
3499 unsigned char byte[2], enable[2];
3500 struct mvpp2_prs_entry pe;
3501 u16 rvid, rmask;
3502 int tid;
3503
3504 /* Go through the all entries with MVPP2_PRS_LU_VID */
3505 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
3506 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
3507 if (!priv->prs_shadow[tid].valid ||
3508 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
3509 continue;
3510
3511 pe.index = tid;
3512
3513 mvpp2_prs_hw_read(priv, &pe);
3514 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
3515 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
3516
3517 rvid = ((byte[0] & 0xf) << 8) + byte[1];
3518 rmask = ((enable[0] & 0xf) << 8) + enable[1];
3519
3520 if (rvid != vid || rmask != mask)
3521 continue;
3522
3523 return tid;
3524 }
3525
3526 return 0;
3527}
3528
3529/* Write parser entry for VID filtering */
3530static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
3531{
3532 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
3533 port->id * MVPP2_PRS_VLAN_FILT_MAX;
3534 unsigned int mask = 0xfff, reg_val, shift;
3535 struct mvpp2 *priv = port->priv;
3536 struct mvpp2_prs_entry pe;
3537 int tid;
3538
3539 /* Scan TCAM and see if entry with this <vid,port> already exist */
3540 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
3541
3542 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3543 if (reg_val & MVPP2_DSA_EXTENDED)
3544 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3545 else
3546 shift = MVPP2_VLAN_TAG_LEN;
3547
3548 /* No such entry */
3549 if (!tid) {
3550 memset(&pe, 0, sizeof(pe));
3551
3552 /* Go through all entries from first to last in vlan range */
3553 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
3554 vid_start +
3555 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
3556
3557 /* There isn't room for a new VID filter */
3558 if (tid < 0)
3559 return tid;
3560
3561 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3562 pe.index = tid;
3563
3564 /* Mask all ports */
3565 mvpp2_prs_tcam_port_map_set(&pe, 0);
3566 } else {
3567 mvpp2_prs_hw_read(priv, &pe);
3568 }
3569
3570 /* Enable the current port */
3571 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3572
3573 /* Continue - set next lookup */
3574 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3575
3576 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3577 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3578
3579 /* Set match on VID */
3580 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
3581
3582 /* Clear all ai bits for next iteration */
3583 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3584
3585 /* Update shadow table */
3586 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3587 mvpp2_prs_hw_write(priv, &pe);
3588
3589 return 0;
3590}
3591
3592/* Write parser entry for VID filtering */
3593static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
3594{
3595 struct mvpp2 *priv = port->priv;
3596 int tid;
3597
3598 /* Scan TCAM and see if entry with this <vid,port> already exist */
3599 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
3600
3601 /* No such entry */
3602 if (tid)
3603 return;
3604
3605 mvpp2_prs_hw_inv(priv, tid);
3606 priv->prs_shadow[tid].valid = false;
3607}
3608
3609/* Remove all existing VID filters on this port */
3610static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
3611{
3612 struct mvpp2 *priv = port->priv;
3613 int tid;
3614
3615 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
3616 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
3617 if (priv->prs_shadow[tid].valid)
3618 mvpp2_prs_vid_entry_remove(port, tid);
3619 }
3620}
3621
3622/* Remove VID filering entry for this port */
3623static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
3624{
3625 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3626 struct mvpp2 *priv = port->priv;
3627
3628 /* Invalidate the guard entry */
3629 mvpp2_prs_hw_inv(priv, tid);
3630
3631 priv->prs_shadow[tid].valid = false;
3632}
3633
3634/* Add guard entry that drops packets when no VID is matched on this port */
3635static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
3636{
3637 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
3638 struct mvpp2 *priv = port->priv;
3639 unsigned int reg_val, shift;
3640 struct mvpp2_prs_entry pe;
3641
3642 if (priv->prs_shadow[tid].valid)
3643 return;
3644
3645 memset(&pe, 0, sizeof(pe));
3646
3647 pe.index = tid;
3648
3649 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
3650 if (reg_val & MVPP2_DSA_EXTENDED)
3651 shift = MVPP2_VLAN_TAG_EDSA_LEN;
3652 else
3653 shift = MVPP2_VLAN_TAG_LEN;
3654
3655 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
3656
3657 /* Mask all ports */
3658 mvpp2_prs_tcam_port_map_set(&pe, 0);
3659
3660 /* Update port mask */
3661 mvpp2_prs_tcam_port_set(&pe, port->id, true);
3662
3663 /* Continue - set next lookup */
3664 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3665
3666 /* Skip VLAN header - Set offset to 4 or 8 bytes */
3667 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3668
3669 /* Drop VLAN packets that don't belong to any VIDs on this port */
3670 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3671 MVPP2_PRS_RI_DROP_MASK);
3672
3673 /* Clear all ai bits for next iteration */
3674 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3675
3676 /* Update shadow table */
3677 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
3678 mvpp2_prs_hw_write(priv, &pe);
3679}
3680
Marcin Wojtas3f518502014-07-10 16:52:13 -03003681/* Parser default initialization */
3682static int mvpp2_prs_default_init(struct platform_device *pdev,
3683 struct mvpp2 *priv)
3684{
3685 int err, index, i;
3686
3687 /* Enable tcam table */
3688 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3689
3690 /* Clear all tcam and sram entries */
3691 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3692 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3693 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3694 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3695
3696 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3697 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3698 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3699 }
3700
3701 /* Invalidate all tcam entries */
3702 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3703 mvpp2_prs_hw_inv(priv, index);
3704
3705 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003706 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003707 GFP_KERNEL);
3708 if (!priv->prs_shadow)
3709 return -ENOMEM;
3710
3711 /* Always start from lookup = 0 */
3712 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3713 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3714 MVPP2_PRS_PORT_LU_MAX, 0);
3715
3716 mvpp2_prs_def_flow_init(priv);
3717
3718 mvpp2_prs_mh_init(priv);
3719
3720 mvpp2_prs_mac_init(priv);
3721
3722 mvpp2_prs_dsa_init(priv);
3723
Maxime Chevallier56beda32018-02-28 10:14:13 +01003724 mvpp2_prs_vid_init(priv);
3725
Marcin Wojtas3f518502014-07-10 16:52:13 -03003726 err = mvpp2_prs_etype_init(priv);
3727 if (err)
3728 return err;
3729
3730 err = mvpp2_prs_vlan_init(pdev, priv);
3731 if (err)
3732 return err;
3733
3734 err = mvpp2_prs_pppoe_init(priv);
3735 if (err)
3736 return err;
3737
3738 err = mvpp2_prs_ip6_init(priv);
3739 if (err)
3740 return err;
3741
3742 err = mvpp2_prs_ip4_init(priv);
3743 if (err)
3744 return err;
3745
3746 return 0;
3747}
3748
3749/* Compare MAC DA with tcam entry data */
3750static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3751 const u8 *da, unsigned char *mask)
3752{
3753 unsigned char tcam_byte, tcam_mask;
3754 int index;
3755
3756 for (index = 0; index < ETH_ALEN; index++) {
3757 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3758 if (tcam_mask != mask[index])
3759 return false;
3760
3761 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3762 return false;
3763 }
3764
3765 return true;
3766}
3767
3768/* Find tcam entry with matched pair <MAC DA, port> */
3769static struct mvpp2_prs_entry *
3770mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3771 unsigned char *mask, int udf_type)
3772{
3773 struct mvpp2_prs_entry *pe;
3774 int tid;
3775
Antoine Tenart239dd4e2017-10-24 11:41:28 +02003776 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003777 if (!pe)
3778 return NULL;
3779 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3780
3781 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3782 for (tid = MVPP2_PE_FIRST_FREE_TID;
3783 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3784 unsigned int entry_pmap;
3785
3786 if (!priv->prs_shadow[tid].valid ||
3787 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3788 (priv->prs_shadow[tid].udf != udf_type))
3789 continue;
3790
3791 pe->index = tid;
3792 mvpp2_prs_hw_read(priv, pe);
3793 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3794
3795 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3796 entry_pmap == pmap)
3797 return pe;
3798 }
3799 kfree(pe);
3800
3801 return NULL;
3802}
3803
3804/* Update parser's mac da entry */
3805static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3806 const u8 *da, bool add)
3807{
3808 struct mvpp2_prs_entry *pe;
3809 unsigned int pmap, len, ri;
3810 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3811 int tid;
3812
3813 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3814 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3815 MVPP2_PRS_UDF_MAC_DEF);
3816
3817 /* No such entry */
3818 if (!pe) {
3819 if (!add)
3820 return 0;
3821
3822 /* Create new TCAM entry */
3823 /* Find first range mac entry*/
3824 for (tid = MVPP2_PE_FIRST_FREE_TID;
3825 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3826 if (priv->prs_shadow[tid].valid &&
3827 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3828 (priv->prs_shadow[tid].udf ==
3829 MVPP2_PRS_UDF_MAC_RANGE))
3830 break;
3831
3832 /* Go through the all entries from first to last */
3833 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3834 tid - 1);
3835 if (tid < 0)
3836 return tid;
3837
Antoine Tenart239dd4e2017-10-24 11:41:28 +02003838 pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003839 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303840 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003841 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3842 pe->index = tid;
3843
3844 /* Mask all ports */
3845 mvpp2_prs_tcam_port_map_set(pe, 0);
3846 }
3847
3848 /* Update port mask */
3849 mvpp2_prs_tcam_port_set(pe, port, add);
3850
3851 /* Invalidate the entry if no ports are left enabled */
3852 pmap = mvpp2_prs_tcam_port_map_get(pe);
3853 if (pmap == 0) {
3854 if (add) {
3855 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303856 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003857 }
3858 mvpp2_prs_hw_inv(priv, pe->index);
3859 priv->prs_shadow[pe->index].valid = false;
3860 kfree(pe);
3861 return 0;
3862 }
3863
3864 /* Continue - set next lookup */
3865 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3866
3867 /* Set match on DA */
3868 len = ETH_ALEN;
3869 while (len--)
3870 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3871
3872 /* Set result info bits */
3873 if (is_broadcast_ether_addr(da))
3874 ri = MVPP2_PRS_RI_L2_BCAST;
3875 else if (is_multicast_ether_addr(da))
3876 ri = MVPP2_PRS_RI_L2_MCAST;
3877 else
3878 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3879
3880 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3881 MVPP2_PRS_RI_MAC_ME_MASK);
3882 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3883 MVPP2_PRS_RI_MAC_ME_MASK);
3884
3885 /* Shift to ethertype */
3886 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3887 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3888
3889 /* Update shadow table and hw entry */
3890 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3891 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3892 mvpp2_prs_hw_write(priv, pe);
3893
3894 kfree(pe);
3895
3896 return 0;
3897}
3898
3899static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3900{
3901 struct mvpp2_port *port = netdev_priv(dev);
3902 int err;
3903
3904 /* Remove old parser entry */
3905 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3906 false);
3907 if (err)
3908 return err;
3909
3910 /* Add new parser entry */
3911 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3912 if (err)
3913 return err;
3914
3915 /* Set addr in the device */
3916 ether_addr_copy(dev->dev_addr, da);
3917
3918 return 0;
3919}
3920
3921/* Delete all port's multicast simple (not range) entries */
3922static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3923{
3924 struct mvpp2_prs_entry pe;
3925 int index, tid;
3926
3927 for (tid = MVPP2_PE_FIRST_FREE_TID;
3928 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3929 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3930
3931 if (!priv->prs_shadow[tid].valid ||
3932 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3933 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3934 continue;
3935
3936 /* Only simple mac entries */
3937 pe.index = tid;
3938 mvpp2_prs_hw_read(priv, &pe);
3939
3940 /* Read mac addr from entry */
3941 for (index = 0; index < ETH_ALEN; index++)
3942 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3943 &da_mask[index]);
3944
3945 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3946 /* Delete this entry */
3947 mvpp2_prs_mac_da_accept(priv, port, da, false);
3948 }
3949}
3950
3951static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3952{
3953 switch (type) {
3954 case MVPP2_TAG_TYPE_EDSA:
3955 /* Add port to EDSA entries */
3956 mvpp2_prs_dsa_tag_set(priv, port, true,
3957 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3958 mvpp2_prs_dsa_tag_set(priv, port, true,
3959 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3960 /* Remove port from DSA entries */
3961 mvpp2_prs_dsa_tag_set(priv, port, false,
3962 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3963 mvpp2_prs_dsa_tag_set(priv, port, false,
3964 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3965 break;
3966
3967 case MVPP2_TAG_TYPE_DSA:
3968 /* Add port to DSA entries */
3969 mvpp2_prs_dsa_tag_set(priv, port, true,
3970 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3971 mvpp2_prs_dsa_tag_set(priv, port, true,
3972 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3973 /* Remove port from EDSA entries */
3974 mvpp2_prs_dsa_tag_set(priv, port, false,
3975 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3976 mvpp2_prs_dsa_tag_set(priv, port, false,
3977 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3978 break;
3979
3980 case MVPP2_TAG_TYPE_MH:
3981 case MVPP2_TAG_TYPE_NONE:
3982 /* Remove port form EDSA and DSA entries */
3983 mvpp2_prs_dsa_tag_set(priv, port, false,
3984 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3985 mvpp2_prs_dsa_tag_set(priv, port, false,
3986 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3987 mvpp2_prs_dsa_tag_set(priv, port, false,
3988 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3989 mvpp2_prs_dsa_tag_set(priv, port, false,
3990 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3991 break;
3992
3993 default:
3994 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3995 return -EINVAL;
3996 }
3997
3998 return 0;
3999}
4000
4001/* Set prs flow for the port */
4002static int mvpp2_prs_def_flow(struct mvpp2_port *port)
4003{
4004 struct mvpp2_prs_entry *pe;
4005 int tid;
4006
4007 pe = mvpp2_prs_flow_find(port->priv, port->id);
4008
4009 /* Such entry not exist */
4010 if (!pe) {
4011 /* Go through the all entires from last to first */
4012 tid = mvpp2_prs_tcam_first_free(port->priv,
4013 MVPP2_PE_LAST_FREE_TID,
4014 MVPP2_PE_FIRST_FREE_TID);
4015 if (tid < 0)
4016 return tid;
4017
4018 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
4019 if (!pe)
4020 return -ENOMEM;
4021
4022 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
4023 pe->index = tid;
4024
4025 /* Set flow ID*/
4026 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
4027 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4028
4029 /* Update shadow table */
4030 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
4031 }
4032
4033 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
4034 mvpp2_prs_hw_write(port->priv, pe);
4035 kfree(pe);
4036
4037 return 0;
4038}
4039
4040/* Classifier configuration routines */
4041
4042/* Update classification flow table registers */
4043static void mvpp2_cls_flow_write(struct mvpp2 *priv,
4044 struct mvpp2_cls_flow_entry *fe)
4045{
4046 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4047 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4048 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4049 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4050}
4051
4052/* Update classification lookup table register */
4053static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
4054 struct mvpp2_cls_lookup_entry *le)
4055{
4056 u32 val;
4057
4058 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4059 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
4060 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
4061}
4062
4063/* Classifier default initialization */
4064static void mvpp2_cls_init(struct mvpp2 *priv)
4065{
4066 struct mvpp2_cls_lookup_entry le;
4067 struct mvpp2_cls_flow_entry fe;
4068 int index;
4069
4070 /* Enable classifier */
4071 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4072
4073 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01004074 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004075 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4076 fe.index = index;
4077 mvpp2_cls_flow_write(priv, &fe);
4078 }
4079
4080 /* Clear classifier lookup table */
4081 le.data = 0;
4082 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4083 le.lkpid = index;
4084 le.way = 0;
4085 mvpp2_cls_lookup_write(priv, &le);
4086
4087 le.way = 1;
4088 mvpp2_cls_lookup_write(priv, &le);
4089 }
4090}
4091
4092static void mvpp2_cls_port_config(struct mvpp2_port *port)
4093{
4094 struct mvpp2_cls_lookup_entry le;
4095 u32 val;
4096
4097 /* Set way for the port */
4098 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
4099 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
4100 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
4101
4102 /* Pick the entry to be accessed in lookup ID decoding table
4103 * according to the way and lkpid.
4104 */
4105 le.lkpid = port->id;
4106 le.way = 0;
4107 le.data = 0;
4108
4109 /* Set initial CPU queue for receiving packets */
4110 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4111 le.data |= port->first_rxq;
4112
4113 /* Disable classification engines */
4114 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4115
4116 /* Update lookup ID table entry */
4117 mvpp2_cls_lookup_write(port->priv, &le);
4118}
4119
4120/* Set CPU queue number for oversize packets */
4121static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4122{
4123 u32 val;
4124
4125 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
4126 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4127
4128 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
4129 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
4130
4131 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
4132 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
4133 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4134}
4135
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004136static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
4137{
4138 if (likely(pool->frag_size <= PAGE_SIZE))
4139 return netdev_alloc_frag(pool->frag_size);
4140 else
4141 return kmalloc(pool->frag_size, GFP_ATOMIC);
4142}
4143
4144static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
4145{
4146 if (likely(pool->frag_size <= PAGE_SIZE))
4147 skb_free_frag(data);
4148 else
4149 kfree(data);
4150}
4151
Marcin Wojtas3f518502014-07-10 16:52:13 -03004152/* Buffer Manager configuration routines */
4153
4154/* Create pool */
4155static int mvpp2_bm_pool_create(struct platform_device *pdev,
4156 struct mvpp2 *priv,
4157 struct mvpp2_bm_pool *bm_pool, int size)
4158{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004159 u32 val;
4160
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004161 /* Number of buffer pointers must be a multiple of 16, as per
4162 * hardware constraints
4163 */
4164 if (!IS_ALIGNED(size, 16))
4165 return -EINVAL;
4166
4167 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
4168 * bytes per buffer pointer
4169 */
4170 if (priv->hw_version == MVPP21)
4171 bm_pool->size_bytes = 2 * sizeof(u32) * size;
4172 else
4173 bm_pool->size_bytes = 2 * sizeof(u64) * size;
4174
4175 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004176 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004177 GFP_KERNEL);
4178 if (!bm_pool->virt_addr)
4179 return -ENOMEM;
4180
Thomas Petazzonid3158802017-02-21 11:28:13 +01004181 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
4182 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004183 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
4184 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004185 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
4186 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
4187 return -ENOMEM;
4188 }
4189
4190 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004191 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004192 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
4193
4194 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4195 val |= MVPP2_BM_START_MASK;
4196 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4197
4198 bm_pool->type = MVPP2_BM_FREE;
4199 bm_pool->size = size;
4200 bm_pool->pkt_size = 0;
4201 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004202
4203 return 0;
4204}
4205
4206/* Set pool buffer size */
4207static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
4208 struct mvpp2_bm_pool *bm_pool,
4209 int buf_size)
4210{
4211 u32 val;
4212
4213 bm_pool->buf_size = buf_size;
4214
4215 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
4216 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
4217}
4218
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004219static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
4220 struct mvpp2_bm_pool *bm_pool,
4221 dma_addr_t *dma_addr,
4222 phys_addr_t *phys_addr)
4223{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004224 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004225
4226 *dma_addr = mvpp2_percpu_read(priv, cpu,
4227 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
4228 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004229
4230 if (priv->hw_version == MVPP22) {
4231 u32 val;
4232 u32 dma_addr_highbits, phys_addr_highbits;
4233
Thomas Petazzonia7868412017-03-07 16:53:13 +01004234 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004235 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
4236 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
4237 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
4238
4239 if (sizeof(dma_addr_t) == 8)
4240 *dma_addr |= (u64)dma_addr_highbits << 32;
4241
4242 if (sizeof(phys_addr_t) == 8)
4243 *phys_addr |= (u64)phys_addr_highbits << 32;
4244 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004245
4246 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004247}
4248
Ezequiel Garcia7861f122014-07-21 13:48:14 -03004249/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004250static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
4251 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004252{
4253 int i;
4254
Ezequiel Garcia7861f122014-07-21 13:48:14 -03004255 for (i = 0; i < bm_pool->buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01004256 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004257 phys_addr_t buf_phys_addr;
4258 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004259
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004260 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
4261 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01004262
Thomas Petazzoni20396132017-03-07 16:53:00 +01004263 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01004264 bm_pool->buf_size, DMA_FROM_DEVICE);
4265
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004266 data = (void *)phys_to_virt(buf_phys_addr);
4267 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004268 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004269
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004270 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004271 }
4272
4273 /* Update BM driver with number of buffers removed from pool */
4274 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004275}
4276
4277/* Cleanup pool */
4278static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
4279 struct mvpp2 *priv,
4280 struct mvpp2_bm_pool *bm_pool)
4281{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004282 u32 val;
4283
Marcin Wojtas4229d502015-12-03 15:20:50 +01004284 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004285 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004286 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
4287 return 0;
4288 }
4289
4290 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
4291 val |= MVPP2_BM_STOP_MASK;
4292 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
4293
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004294 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03004295 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004296 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004297 return 0;
4298}
4299
4300static int mvpp2_bm_pools_init(struct platform_device *pdev,
4301 struct mvpp2 *priv)
4302{
4303 int i, err, size;
4304 struct mvpp2_bm_pool *bm_pool;
4305
4306 /* Create all pools with maximum size */
4307 size = MVPP2_BM_POOL_SIZE_MAX;
4308 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4309 bm_pool = &priv->bm_pools[i];
4310 bm_pool->id = i;
4311 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
4312 if (err)
4313 goto err_unroll_pools;
4314 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
4315 }
4316 return 0;
4317
4318err_unroll_pools:
4319 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
4320 for (i = i - 1; i >= 0; i--)
4321 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
4322 return err;
4323}
4324
4325static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
4326{
4327 int i, err;
4328
4329 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
4330 /* Mask BM all interrupts */
4331 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
4332 /* Clear BM cause register */
4333 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
4334 }
4335
4336 /* Allocate and initialize BM pools */
4337 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02004338 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004339 if (!priv->bm_pools)
4340 return -ENOMEM;
4341
4342 err = mvpp2_bm_pools_init(pdev, priv);
4343 if (err < 0)
4344 return err;
4345 return 0;
4346}
4347
4348/* Attach long pool to rxq */
4349static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
4350 int lrxq, int long_pool)
4351{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004352 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004353 int prxq;
4354
4355 /* Get queue physical ID */
4356 prxq = port->rxqs[lrxq]->id;
4357
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004358 if (port->priv->hw_version == MVPP21)
4359 mask = MVPP21_RXQ_POOL_LONG_MASK;
4360 else
4361 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004362
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004363 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4364 val &= ~mask;
4365 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004366 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4367}
4368
4369/* Attach short pool to rxq */
4370static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
4371 int lrxq, int short_pool)
4372{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004373 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004374 int prxq;
4375
4376 /* Get queue physical ID */
4377 prxq = port->rxqs[lrxq]->id;
4378
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004379 if (port->priv->hw_version == MVPP21)
4380 mask = MVPP21_RXQ_POOL_SHORT_MASK;
4381 else
4382 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004383
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01004384 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4385 val &= ~mask;
4386 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004387 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4388}
4389
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004390static void *mvpp2_buf_alloc(struct mvpp2_port *port,
4391 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004392 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004393 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004394 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004395{
Thomas Petazzoni20396132017-03-07 16:53:00 +01004396 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004397 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004398
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004399 data = mvpp2_frag_alloc(bm_pool);
4400 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004401 return NULL;
4402
Thomas Petazzoni20396132017-03-07 16:53:00 +01004403 dma_addr = dma_map_single(port->dev->dev.parent, data,
4404 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
4405 DMA_FROM_DEVICE);
4406 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004407 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004408 return NULL;
4409 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01004410 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004411 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004412
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004413 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004414}
4415
Marcin Wojtas3f518502014-07-10 16:52:13 -03004416/* Release buffer to BM */
4417static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01004418 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004419 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004420{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004421 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004422
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004423 if (port->priv->hw_version == MVPP22) {
4424 u32 val = 0;
4425
4426 if (sizeof(dma_addr_t) == 8)
4427 val |= upper_32_bits(buf_dma_addr) &
4428 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
4429
4430 if (sizeof(phys_addr_t) == 8)
4431 val |= (upper_32_bits(buf_phys_addr)
4432 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
4433 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
4434
Thomas Petazzonia7868412017-03-07 16:53:13 +01004435 mvpp2_percpu_write(port->priv, cpu,
4436 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01004437 }
4438
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004439 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
4440 * returned in the "cookie" field of the RX
4441 * descriptor. Instead of storing the virtual address, we
4442 * store the physical address
4443 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004444 mvpp2_percpu_write(port->priv, cpu,
4445 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
4446 mvpp2_percpu_write(port->priv, cpu,
4447 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004448
4449 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004450}
4451
Marcin Wojtas3f518502014-07-10 16:52:13 -03004452/* Allocate buffers for the pool */
4453static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
4454 struct mvpp2_bm_pool *bm_pool, int buf_num)
4455{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004456 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01004457 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004458 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004459 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004460
4461 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
4462 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4463
4464 if (buf_num < 0 ||
4465 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4466 netdev_err(port->dev,
4467 "cannot allocate %d buffers for pool %d\n",
4468 buf_num, bm_pool->id);
4469 return 0;
4470 }
4471
Marcin Wojtas3f518502014-07-10 16:52:13 -03004472 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004473 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4474 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004475 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004476 break;
4477
Thomas Petazzoni20396132017-03-07 16:53:00 +01004478 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004479 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004480 }
4481
4482 /* Update BM driver with number of buffers added to pool */
4483 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004484
4485 netdev_dbg(port->dev,
4486 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4487 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4488 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4489
4490 netdev_dbg(port->dev,
4491 "%s pool %d: %d of %d buffers added\n",
4492 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4493 bm_pool->id, i, buf_num);
4494 return i;
4495}
4496
4497/* Notify the driver that BM pool is being used as specific type and return the
4498 * pool pointer on success
4499 */
4500static struct mvpp2_bm_pool *
4501mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4502 int pkt_size)
4503{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004504 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4505 int num;
4506
4507 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4508 netdev_err(port->dev, "mixing pool types is forbidden\n");
4509 return NULL;
4510 }
4511
Marcin Wojtas3f518502014-07-10 16:52:13 -03004512 if (new_pool->type == MVPP2_BM_FREE)
4513 new_pool->type = type;
4514
4515 /* Allocate buffers in case BM pool is used as long pool, but packet
4516 * size doesn't match MTU or BM pool hasn't being used yet
4517 */
4518 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4519 (new_pool->pkt_size == 0)) {
4520 int pkts_num;
4521
4522 /* Set default buffer number or free all the buffers in case
4523 * the pool is not empty
4524 */
4525 pkts_num = new_pool->buf_num;
4526 if (pkts_num == 0)
4527 pkts_num = type == MVPP2_BM_SWF_LONG ?
4528 MVPP2_BM_LONG_BUF_NUM :
4529 MVPP2_BM_SHORT_BUF_NUM;
4530 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004531 mvpp2_bm_bufs_free(port->dev->dev.parent,
4532 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004533
4534 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004535 new_pool->frag_size =
4536 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4537 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004538
4539 /* Allocate buffers for this pool */
4540 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4541 if (num != pkts_num) {
4542 WARN(1, "pool %d: %d of %d allocated\n",
4543 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004544 return NULL;
4545 }
4546 }
4547
4548 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4549 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4550
Marcin Wojtas3f518502014-07-10 16:52:13 -03004551 return new_pool;
4552}
4553
4554/* Initialize pools for swf */
4555static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4556{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004557 int rxq;
4558
4559 if (!port->pool_long) {
4560 port->pool_long =
4561 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4562 MVPP2_BM_SWF_LONG,
4563 port->pkt_size);
4564 if (!port->pool_long)
4565 return -ENOMEM;
4566
Marcin Wojtas3f518502014-07-10 16:52:13 -03004567 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004568
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004569 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004570 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4571 }
4572
4573 if (!port->pool_short) {
4574 port->pool_short =
4575 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4576 MVPP2_BM_SWF_SHORT,
4577 MVPP2_BM_SHORT_PKT_SIZE);
4578 if (!port->pool_short)
4579 return -ENOMEM;
4580
Marcin Wojtas3f518502014-07-10 16:52:13 -03004581 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004582
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004583 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004584 mvpp2_rxq_short_pool_set(port, rxq,
4585 port->pool_short->id);
4586 }
4587
4588 return 0;
4589}
4590
4591static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4592{
4593 struct mvpp2_port *port = netdev_priv(dev);
4594 struct mvpp2_bm_pool *port_pool = port->pool_long;
4595 int num, pkts_num = port_pool->buf_num;
4596 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4597
4598 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004599 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004600 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004601 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4602 return -EIO;
4603 }
4604
4605 port_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004606 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4607 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004608 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4609 if (num != pkts_num) {
4610 WARN(1, "pool %d: %d of %d allocated\n",
4611 port_pool->id, num, pkts_num);
4612 return -EIO;
4613 }
4614
4615 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4616 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4617 dev->mtu = mtu;
4618 netdev_update_features(dev);
4619 return 0;
4620}
4621
4622static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4623{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004624 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004625
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004626 for (i = 0; i < port->nqvecs; i++)
4627 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4628
Marcin Wojtas3f518502014-07-10 16:52:13 -03004629 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004630 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004631}
4632
4633static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4634{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004635 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004636
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004637 for (i = 0; i < port->nqvecs; i++)
4638 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4639
Marcin Wojtas3f518502014-07-10 16:52:13 -03004640 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004641 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4642}
4643
4644static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4645{
4646 struct mvpp2_port *port = qvec->port;
4647
4648 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4649 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4650}
4651
4652static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4653{
4654 struct mvpp2_port *port = qvec->port;
4655
4656 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4657 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004658}
4659
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004660/* Mask the current CPU's Rx/Tx interrupts
4661 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4662 * using smp_processor_id() is OK.
4663 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004664static void mvpp2_interrupts_mask(void *arg)
4665{
4666 struct mvpp2_port *port = arg;
4667
Thomas Petazzonia7868412017-03-07 16:53:13 +01004668 mvpp2_percpu_write(port->priv, smp_processor_id(),
4669 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004670}
4671
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004672/* Unmask the current CPU's Rx/Tx interrupts.
4673 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4674 * using smp_processor_id() is OK.
4675 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004676static void mvpp2_interrupts_unmask(void *arg)
4677{
4678 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004679 u32 val;
4680
4681 val = MVPP2_CAUSE_MISC_SUM_MASK |
4682 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4683 if (port->has_tx_irqs)
4684 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004685
Thomas Petazzonia7868412017-03-07 16:53:13 +01004686 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004687 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4688}
4689
4690static void
4691mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4692{
4693 u32 val;
4694 int i;
4695
4696 if (port->priv->hw_version != MVPP22)
4697 return;
4698
4699 if (mask)
4700 val = 0;
4701 else
4702 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4703
4704 for (i = 0; i < port->nqvecs; i++) {
4705 struct mvpp2_queue_vector *v = port->qvecs + i;
4706
4707 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4708 continue;
4709
4710 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4711 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4712 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004713}
4714
4715/* Port configuration routines */
4716
Antoine Ténartf84bf382017-08-22 19:08:27 +02004717static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
4718{
4719 struct mvpp2 *priv = port->priv;
4720 u32 val;
4721
4722 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4723 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
4724 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4725
4726 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4727 if (port->gop_id == 2)
4728 val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
4729 else if (port->gop_id == 3)
4730 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
4731 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4732}
4733
4734static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
4735{
4736 struct mvpp2 *priv = port->priv;
4737 u32 val;
4738
4739 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4740 val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
4741 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
4742 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4743
4744 if (port->gop_id > 1) {
4745 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
4746 if (port->gop_id == 2)
4747 val &= ~GENCONF_CTRL0_PORT0_RGMII;
4748 else if (port->gop_id == 3)
4749 val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
4750 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
4751 }
4752}
4753
4754static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
4755{
4756 struct mvpp2 *priv = port->priv;
4757 void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
4758 void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
4759 u32 val;
4760
4761 /* XPCS */
4762 val = readl(xpcs + MVPP22_XPCS_CFG0);
4763 val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
4764 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
4765 val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
4766 writel(val, xpcs + MVPP22_XPCS_CFG0);
4767
4768 /* MPCS */
4769 val = readl(mpcs + MVPP22_MPCS_CTRL);
4770 val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
4771 writel(val, mpcs + MVPP22_MPCS_CTRL);
4772
4773 val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
4774 val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC |
4775 MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
4776 val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
4777 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4778
4779 val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
4780 val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX;
4781 writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
4782}
4783
4784static int mvpp22_gop_init(struct mvpp2_port *port)
4785{
4786 struct mvpp2 *priv = port->priv;
4787 u32 val;
4788
4789 if (!priv->sysctrl_base)
4790 return 0;
4791
4792 switch (port->phy_interface) {
4793 case PHY_INTERFACE_MODE_RGMII:
4794 case PHY_INTERFACE_MODE_RGMII_ID:
4795 case PHY_INTERFACE_MODE_RGMII_RXID:
4796 case PHY_INTERFACE_MODE_RGMII_TXID:
4797 if (port->gop_id == 0)
4798 goto invalid_conf;
4799 mvpp22_gop_init_rgmii(port);
4800 break;
4801 case PHY_INTERFACE_MODE_SGMII:
4802 mvpp22_gop_init_sgmii(port);
4803 break;
4804 case PHY_INTERFACE_MODE_10GKR:
4805 if (port->gop_id != 0)
4806 goto invalid_conf;
4807 mvpp22_gop_init_10gkr(port);
4808 break;
4809 default:
4810 goto unsupported_conf;
4811 }
4812
4813 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
4814 val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
4815 GENCONF_PORT_CTRL1_EN(port->gop_id);
4816 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
4817
4818 regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
4819 val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
4820 regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
4821
4822 regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
4823 val |= GENCONF_SOFT_RESET1_GOP;
4824 regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
4825
4826unsupported_conf:
4827 return 0;
4828
4829invalid_conf:
4830 netdev_err(port->dev, "Invalid port configuration\n");
4831 return -EINVAL;
4832}
4833
Antoine Tenartfd3651b2017-09-01 11:04:54 +02004834static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
4835{
4836 u32 val;
4837
4838 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4839 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4840 /* Enable the GMAC link status irq for this port */
4841 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4842 val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4843 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4844 }
4845
4846 if (port->gop_id == 0) {
4847 /* Enable the XLG/GIG irqs for this port */
4848 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4849 if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4850 val |= MVPP22_XLG_EXT_INT_MASK_XLG;
4851 else
4852 val |= MVPP22_XLG_EXT_INT_MASK_GIG;
4853 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4854 }
4855}
4856
4857static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
4858{
4859 u32 val;
4860
4861 if (port->gop_id == 0) {
4862 val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
4863 val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
4864 MVPP22_XLG_EXT_INT_MASK_GIG);
4865 writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
4866 }
4867
4868 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4869 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4870 val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
4871 val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
4872 writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
4873 }
4874}
4875
4876static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
4877{
4878 u32 val;
4879
4880 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
4881 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4882 val = readl(port->base + MVPP22_GMAC_INT_MASK);
4883 val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
4884 writel(val, port->base + MVPP22_GMAC_INT_MASK);
4885 }
4886
4887 if (port->gop_id == 0) {
4888 val = readl(port->base + MVPP22_XLG_INT_MASK);
4889 val |= MVPP22_XLG_INT_MASK_LINK;
4890 writel(val, port->base + MVPP22_XLG_INT_MASK);
4891 }
4892
4893 mvpp22_gop_unmask_irq(port);
4894}
4895
Antoine Tenart542897d2017-08-30 10:29:15 +02004896static int mvpp22_comphy_init(struct mvpp2_port *port)
4897{
4898 enum phy_mode mode;
4899 int ret;
4900
4901 if (!port->comphy)
4902 return 0;
4903
4904 switch (port->phy_interface) {
4905 case PHY_INTERFACE_MODE_SGMII:
4906 mode = PHY_MODE_SGMII;
4907 break;
4908 case PHY_INTERFACE_MODE_10GKR:
4909 mode = PHY_MODE_10GKR;
4910 break;
4911 default:
4912 return -EINVAL;
4913 }
4914
4915 ret = phy_set_mode(port->comphy, mode);
4916 if (ret)
4917 return ret;
4918
4919 return phy_power_on(port->comphy);
4920}
4921
Antoine Ténart39193572017-08-22 19:08:24 +02004922static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4923{
4924 u32 val;
4925
4926 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4927 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4928 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4929 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4930 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4931 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Tenart1df22702017-09-01 11:04:52 +02004932 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02004933 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4934 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4935 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4936 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4937 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4938 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
Antoine Ténart39193572017-08-22 19:08:24 +02004939 }
4940
4941 /* The port is connected to a copper PHY */
4942 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4943 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4944 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4945
4946 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4947 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4948 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4949 MVPP2_GMAC_AN_DUPLEX_EN;
4950 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4951 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
4952 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4953}
4954
4955static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4956{
4957 u32 val;
4958
4959 /* Force link down */
4960 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4961 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4962 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4963 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4964
4965 /* Set the GMAC in a reset state */
4966 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4967 val |= MVPP2_GMAC_PORT_RESET_MASK;
4968 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4969
4970 /* Configure the PCS and in-band AN */
4971 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4972 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4973 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Tenart1df22702017-09-01 11:04:52 +02004974 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
Antoine Ténart39193572017-08-22 19:08:24 +02004975 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
Antoine Ténart39193572017-08-22 19:08:24 +02004976 }
4977 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4978
4979 mvpp2_port_mii_gmac_configure_mode(port);
4980
4981 /* Unset the GMAC reset state */
4982 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4983 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
4984 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4985
4986 /* Stop forcing link down */
4987 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4988 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4989 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4990}
4991
Antoine Ténart77321952017-08-22 19:08:25 +02004992static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port)
4993{
4994 u32 val;
4995
4996 if (port->gop_id != 0)
4997 return;
4998
4999 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5000 val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
5001 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5002
5003 val = readl(port->base + MVPP22_XLG_CTRL4_REG);
5004 val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
5005 val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
5006 writel(val, port->base + MVPP22_XLG_CTRL4_REG);
5007}
5008
Thomas Petazzoni26975822017-03-07 16:53:14 +01005009static void mvpp22_port_mii_set(struct mvpp2_port *port)
5010{
5011 u32 val;
5012
Thomas Petazzoni26975822017-03-07 16:53:14 +01005013 /* Only GOP port 0 has an XLG MAC */
5014 if (port->gop_id == 0) {
5015 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
5016 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02005017
5018 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5019 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5020 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
5021 else
5022 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
5023
Thomas Petazzoni26975822017-03-07 16:53:14 +01005024 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
5025 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01005026}
5027
Marcin Wojtas3f518502014-07-10 16:52:13 -03005028static void mvpp2_port_mii_set(struct mvpp2_port *port)
5029{
Thomas Petazzoni26975822017-03-07 16:53:14 +01005030 if (port->priv->hw_version == MVPP22)
5031 mvpp22_port_mii_set(port);
5032
Antoine Tenart1df22702017-09-01 11:04:52 +02005033 if (phy_interface_mode_is_rgmii(port->phy_interface) ||
Antoine Ténart39193572017-08-22 19:08:24 +02005034 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5035 mvpp2_port_mii_gmac_configure(port);
Antoine Ténart77321952017-08-22 19:08:25 +02005036 else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR)
5037 mvpp2_port_mii_xlg_configure(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03005038}
5039
5040static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
5041{
5042 u32 val;
5043
5044 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5045 val |= MVPP2_GMAC_FC_ADV_EN;
5046 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005047}
5048
5049static void mvpp2_port_enable(struct mvpp2_port *port)
5050{
5051 u32 val;
5052
Antoine Ténart725757a2017-06-12 16:01:39 +02005053 /* Only GOP port 0 has an XLG MAC */
5054 if (port->gop_id == 0 &&
5055 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5056 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5057 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5058 val |= MVPP22_XLG_CTRL0_PORT_EN |
5059 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
5060 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
5061 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5062 } else {
5063 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5064 val |= MVPP2_GMAC_PORT_EN_MASK;
5065 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
5066 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5067 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005068}
5069
5070static void mvpp2_port_disable(struct mvpp2_port *port)
5071{
5072 u32 val;
5073
Antoine Ténart725757a2017-06-12 16:01:39 +02005074 /* Only GOP port 0 has an XLG MAC */
5075 if (port->gop_id == 0 &&
5076 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
5077 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
5078 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5079 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
5080 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5081 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5082 } else {
5083 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5084 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
5085 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5086 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005087}
5088
5089/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
5090static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
5091{
5092 u32 val;
5093
5094 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
5095 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
5096 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5097}
5098
5099/* Configure loopback port */
5100static void mvpp2_port_loopback_set(struct mvpp2_port *port)
5101{
5102 u32 val;
5103
5104 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
5105
5106 if (port->speed == 1000)
5107 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
5108 else
5109 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
5110
5111 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
5112 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
5113 else
5114 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
5115
5116 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
5117}
5118
Miquel Raynal118d6292017-11-06 22:56:53 +01005119struct mvpp2_ethtool_counter {
5120 unsigned int offset;
5121 const char string[ETH_GSTRING_LEN];
5122 bool reg_is_64b;
5123};
5124
5125static u64 mvpp2_read_count(struct mvpp2_port *port,
5126 const struct mvpp2_ethtool_counter *counter)
5127{
5128 u64 val;
5129
5130 val = readl(port->stats_base + counter->offset);
5131 if (counter->reg_is_64b)
5132 val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
5133
5134 return val;
5135}
5136
5137/* Due to the fact that software statistics and hardware statistics are, by
5138 * design, incremented at different moments in the chain of packet processing,
5139 * it is very likely that incoming packets could have been dropped after being
5140 * counted by hardware but before reaching software statistics (most probably
5141 * multicast packets), and in the oppposite way, during transmission, FCS bytes
5142 * are added in between as well as TSO skb will be split and header bytes added.
5143 * Hence, statistics gathered from userspace with ifconfig (software) and
5144 * ethtool (hardware) cannot be compared.
5145 */
5146static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = {
5147 { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
5148 { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
5149 { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
5150 { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
5151 { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
5152 { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
5153 { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
5154 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
5155 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
5156 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
5157 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
5158 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
5159 { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
5160 { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
5161 { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
5162 { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
5163 { MVPP2_MIB_FC_SENT, "fc_sent" },
5164 { MVPP2_MIB_FC_RCVD, "fc_received" },
5165 { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
5166 { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
5167 { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
5168 { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
5169 { MVPP2_MIB_JABBER_RCVD, "jabber_received" },
5170 { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
5171 { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
5172 { MVPP2_MIB_COLLISION, "collision" },
5173 { MVPP2_MIB_LATE_COLLISION, "late_collision" },
5174};
5175
5176static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
5177 u8 *data)
5178{
5179 if (sset == ETH_SS_STATS) {
5180 int i;
5181
5182 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5183 memcpy(data + i * ETH_GSTRING_LEN,
5184 &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
5185 }
5186}
5187
5188static void mvpp2_gather_hw_statistics(struct work_struct *work)
5189{
5190 struct delayed_work *del_work = to_delayed_work(work);
Miquel Raynale5c500e2017-11-08 08:59:40 +01005191 struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
5192 stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005193 u64 *pstats;
Miquel Raynale5c500e2017-11-08 08:59:40 +01005194 int i;
Miquel Raynal118d6292017-11-06 22:56:53 +01005195
Miquel Raynale5c500e2017-11-08 08:59:40 +01005196 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005197
Miquel Raynale5c500e2017-11-08 08:59:40 +01005198 pstats = port->ethtool_stats;
5199 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5200 *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Miquel Raynal118d6292017-11-06 22:56:53 +01005201
5202 /* No need to read again the counters right after this function if it
5203 * was called asynchronously by the user (ie. use of ethtool).
5204 */
Miquel Raynale5c500e2017-11-08 08:59:40 +01005205 cancel_delayed_work(&port->stats_work);
5206 queue_delayed_work(port->priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01005207 MVPP2_MIB_COUNTERS_STATS_DELAY);
5208
Miquel Raynale5c500e2017-11-08 08:59:40 +01005209 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005210}
5211
5212static void mvpp2_ethtool_get_stats(struct net_device *dev,
5213 struct ethtool_stats *stats, u64 *data)
5214{
5215 struct mvpp2_port *port = netdev_priv(dev);
5216
Miquel Raynale5c500e2017-11-08 08:59:40 +01005217 /* Update statistics for the given port, then take the lock to avoid
5218 * concurrent accesses on the ethtool_stats structure during its copy.
5219 */
5220 mvpp2_gather_hw_statistics(&port->stats_work.work);
Miquel Raynal118d6292017-11-06 22:56:53 +01005221
Miquel Raynale5c500e2017-11-08 08:59:40 +01005222 mutex_lock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005223 memcpy(data, port->ethtool_stats,
5224 sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs));
Miquel Raynale5c500e2017-11-08 08:59:40 +01005225 mutex_unlock(&port->gather_stats_lock);
Miquel Raynal118d6292017-11-06 22:56:53 +01005226}
5227
5228static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
5229{
5230 if (sset == ETH_SS_STATS)
5231 return ARRAY_SIZE(mvpp2_ethtool_regs);
5232
5233 return -EOPNOTSUPP;
5234}
5235
Marcin Wojtas3f518502014-07-10 16:52:13 -03005236static void mvpp2_port_reset(struct mvpp2_port *port)
5237{
5238 u32 val;
Miquel Raynal118d6292017-11-06 22:56:53 +01005239 unsigned int i;
5240
5241 /* Read the GOP statistics to reset the hardware counters */
5242 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
5243 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005244
5245 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5246 ~MVPP2_GMAC_PORT_RESET_MASK;
5247 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
5248
5249 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
5250 MVPP2_GMAC_PORT_RESET_MASK)
5251 continue;
5252}
5253
5254/* Change maximum receive size of the port */
5255static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
5256{
5257 u32 val;
5258
5259 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5260 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
5261 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
5262 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
5263 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
5264}
5265
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005266/* Change maximum receive size of the port */
5267static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
5268{
5269 u32 val;
5270
5271 val = readl(port->base + MVPP22_XLG_CTRL1_REG);
5272 val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
5273 val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
Antoine Ténartec15ecd2017-08-25 15:24:46 +02005274 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
Stefan Chulski76eb1b12017-08-22 19:08:26 +02005275 writel(val, port->base + MVPP22_XLG_CTRL1_REG);
5276}
5277
Marcin Wojtas3f518502014-07-10 16:52:13 -03005278/* Set defaults to the MVPP2 port */
5279static void mvpp2_defaults_set(struct mvpp2_port *port)
5280{
5281 int tx_port_num, val, queue, ptxq, lrxq;
5282
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005283 if (port->priv->hw_version == MVPP21) {
5284 /* Configure port to loopback if needed */
5285 if (port->flags & MVPP2_F_LOOPBACK)
5286 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005287
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01005288 /* Update TX FIFO MIN Threshold */
5289 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5290 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
5291 /* Min. TX threshold must be less than minimal packet length */
5292 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
5293 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
5294 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005295
5296 /* Disable Legacy WRR, Disable EJP, Release from reset */
5297 tx_port_num = mvpp2_egress_port(port);
5298 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
5299 tx_port_num);
5300 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
5301
5302 /* Close bandwidth for all queues */
5303 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
5304 ptxq = mvpp2_txq_phys(port->id, queue);
5305 mvpp2_write(port->priv,
5306 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
5307 }
5308
5309 /* Set refill period to 1 usec, refill tokens
5310 * and bucket size to maximum
5311 */
5312 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
5313 port->priv->tclk / USEC_PER_SEC);
5314 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
5315 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
5316 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
5317 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
5318 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
5319 val = MVPP2_TXP_TOKEN_SIZE_MAX;
5320 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5321
5322 /* Set MaximumLowLatencyPacketSize value to 256 */
5323 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
5324 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
5325 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
5326
5327 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005328 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005329 queue = port->rxqs[lrxq]->id;
5330 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5331 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
5332 MVPP2_SNOOP_BUF_HDR_MASK;
5333 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5334 }
5335
5336 /* At default, mask all interrupts to all present cpus */
5337 mvpp2_interrupts_disable(port);
5338}
5339
5340/* Enable/disable receiving packets */
5341static void mvpp2_ingress_enable(struct mvpp2_port *port)
5342{
5343 u32 val;
5344 int lrxq, queue;
5345
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005346 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005347 queue = port->rxqs[lrxq]->id;
5348 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5349 val &= ~MVPP2_RXQ_DISABLE_MASK;
5350 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5351 }
5352}
5353
5354static void mvpp2_ingress_disable(struct mvpp2_port *port)
5355{
5356 u32 val;
5357 int lrxq, queue;
5358
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005359 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005360 queue = port->rxqs[lrxq]->id;
5361 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
5362 val |= MVPP2_RXQ_DISABLE_MASK;
5363 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
5364 }
5365}
5366
5367/* Enable transmit via physical egress queue
5368 * - HW starts take descriptors from DRAM
5369 */
5370static void mvpp2_egress_enable(struct mvpp2_port *port)
5371{
5372 u32 qmap;
5373 int queue;
5374 int tx_port_num = mvpp2_egress_port(port);
5375
5376 /* Enable all initialized TXs. */
5377 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005378 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005379 struct mvpp2_tx_queue *txq = port->txqs[queue];
5380
Markus Elfringdbbb2f02017-04-17 14:07:52 +02005381 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005382 qmap |= (1 << queue);
5383 }
5384
5385 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5386 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
5387}
5388
5389/* Disable transmit via physical egress queue
5390 * - HW doesn't take descriptors from DRAM
5391 */
5392static void mvpp2_egress_disable(struct mvpp2_port *port)
5393{
5394 u32 reg_data;
5395 int delay;
5396 int tx_port_num = mvpp2_egress_port(port);
5397
5398 /* Issue stop command for active channels only */
5399 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5400 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
5401 MVPP2_TXP_SCHED_ENQ_MASK;
5402 if (reg_data != 0)
5403 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
5404 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
5405
5406 /* Wait for all Tx activity to terminate. */
5407 delay = 0;
5408 do {
5409 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
5410 netdev_warn(port->dev,
5411 "Tx stop timed out, status=0x%08x\n",
5412 reg_data);
5413 break;
5414 }
5415 mdelay(1);
5416 delay++;
5417
5418 /* Check port TX Command register that all
5419 * Tx queues are stopped
5420 */
5421 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
5422 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
5423}
5424
5425/* Rx descriptors helper methods */
5426
5427/* Get number of Rx descriptors occupied by received packets */
5428static inline int
5429mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
5430{
5431 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
5432
5433 return val & MVPP2_RXQ_OCCUPIED_MASK;
5434}
5435
5436/* Update Rx queue status with the number of occupied and available
5437 * Rx descriptor slots.
5438 */
5439static inline void
5440mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
5441 int used_count, int free_count)
5442{
5443 /* Decrement the number of used descriptors and increment count
5444 * increment the number of free descriptors.
5445 */
5446 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
5447
5448 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
5449}
5450
5451/* Get pointer to next RX descriptor to be processed by SW */
5452static inline struct mvpp2_rx_desc *
5453mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
5454{
5455 int rx_desc = rxq->next_desc_to_proc;
5456
5457 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
5458 prefetch(rxq->descs + rxq->next_desc_to_proc);
5459 return rxq->descs + rx_desc;
5460}
5461
5462/* Set rx queue offset */
5463static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
5464 int prxq, int offset)
5465{
5466 u32 val;
5467
5468 /* Convert offset from bytes to units of 32 bytes */
5469 offset = offset >> 5;
5470
5471 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
5472 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
5473
5474 /* Offset is in */
5475 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
5476 MVPP2_RXQ_PACKET_OFFSET_MASK);
5477
5478 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
5479}
5480
Marcin Wojtas3f518502014-07-10 16:52:13 -03005481/* Tx descriptors helper methods */
5482
Marcin Wojtas3f518502014-07-10 16:52:13 -03005483/* Get pointer to next Tx descriptor to be processed (send) by HW */
5484static struct mvpp2_tx_desc *
5485mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
5486{
5487 int tx_desc = txq->next_desc_to_proc;
5488
5489 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
5490 return txq->descs + tx_desc;
5491}
5492
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005493/* Update HW with number of aggregated Tx descriptors to be sent
5494 *
5495 * Called only from mvpp2_tx(), so migration is disabled, using
5496 * smp_processor_id() is OK.
5497 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005498static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
5499{
5500 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005501 mvpp2_percpu_write(port->priv, smp_processor_id(),
5502 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005503}
5504
5505
5506/* Check if there are enough free descriptors in aggregated txq.
5507 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005508 *
5509 * Called only from mvpp2_tx(), so migration is disabled, using
5510 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005511 */
5512static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
5513 struct mvpp2_tx_queue *aggr_txq, int num)
5514{
Antoine Tenart02856a32017-10-30 11:23:32 +01005515 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005516 /* Update number of occupied aggregated Tx descriptors */
5517 int cpu = smp_processor_id();
5518 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
5519
5520 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
5521 }
5522
Antoine Tenart02856a32017-10-30 11:23:32 +01005523 if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005524 return -ENOMEM;
5525
5526 return 0;
5527}
5528
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005529/* Reserved Tx descriptors allocation request
5530 *
5531 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
5532 * only by mvpp2_tx(), so migration is disabled, using
5533 * smp_processor_id() is OK.
5534 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005535static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
5536 struct mvpp2_tx_queue *txq, int num)
5537{
5538 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005539 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005540
5541 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005542 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005543
Thomas Petazzonia7868412017-03-07 16:53:13 +01005544 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005545
5546 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
5547}
5548
5549/* Check if there are enough reserved descriptors for transmission.
5550 * If not, request chunk of reserved descriptors and check again.
5551 */
5552static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
5553 struct mvpp2_tx_queue *txq,
5554 struct mvpp2_txq_pcpu *txq_pcpu,
5555 int num)
5556{
5557 int req, cpu, desc_count;
5558
5559 if (txq_pcpu->reserved_num >= num)
5560 return 0;
5561
5562 /* Not enough descriptors reserved! Update the reserved descriptor
5563 * count and check again.
5564 */
5565
5566 desc_count = 0;
5567 /* Compute total of used descriptors */
5568 for_each_present_cpu(cpu) {
5569 struct mvpp2_txq_pcpu *txq_pcpu_aux;
5570
5571 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
5572 desc_count += txq_pcpu_aux->count;
5573 desc_count += txq_pcpu_aux->reserved_num;
5574 }
5575
5576 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
5577 desc_count += req;
5578
5579 if (desc_count >
5580 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
5581 return -ENOMEM;
5582
5583 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
5584
5585 /* OK, the descriptor cound has been updated: check again. */
5586 if (txq_pcpu->reserved_num < num)
5587 return -ENOMEM;
5588 return 0;
5589}
5590
5591/* Release the last allocated Tx descriptor. Useful to handle DMA
5592 * mapping failures in the Tx path.
5593 */
5594static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
5595{
5596 if (txq->next_desc_to_proc == 0)
5597 txq->next_desc_to_proc = txq->last_desc - 1;
5598 else
5599 txq->next_desc_to_proc--;
5600}
5601
5602/* Set Tx descriptors fields relevant for CSUM calculation */
5603static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
5604 int ip_hdr_len, int l4_proto)
5605{
5606 u32 command;
5607
5608 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
5609 * G_L4_chk, L4_type required only for checksum calculation
5610 */
5611 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
5612 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
5613 command |= MVPP2_TXD_IP_CSUM_DISABLE;
5614
5615 if (l3_proto == swab16(ETH_P_IP)) {
5616 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
5617 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
5618 } else {
5619 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
5620 }
5621
5622 if (l4_proto == IPPROTO_TCP) {
5623 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
5624 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5625 } else if (l4_proto == IPPROTO_UDP) {
5626 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
5627 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
5628 } else {
5629 command |= MVPP2_TXD_L4_CSUM_NOT;
5630 }
5631
5632 return command;
5633}
5634
5635/* Get number of sent descriptors and decrement counter.
5636 * The number of sent descriptors is returned.
5637 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005638 *
5639 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
5640 * (migration disabled) and from the TX completion tasklet (migration
5641 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03005642 */
5643static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
5644 struct mvpp2_tx_queue *txq)
5645{
5646 u32 val;
5647
5648 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01005649 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
5650 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005651
5652 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
5653 MVPP2_TRANSMITTED_COUNT_OFFSET;
5654}
5655
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02005656/* Called through on_each_cpu(), so runs on all CPUs, with migration
5657 * disabled, therefore using smp_processor_id() is OK.
5658 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03005659static void mvpp2_txq_sent_counter_clear(void *arg)
5660{
5661 struct mvpp2_port *port = arg;
5662 int queue;
5663
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005664 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005665 int id = port->txqs[queue]->id;
5666
Thomas Petazzonia7868412017-03-07 16:53:13 +01005667 mvpp2_percpu_read(port->priv, smp_processor_id(),
5668 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005669 }
5670}
5671
5672/* Set max sizes for Tx queues */
5673static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
5674{
5675 u32 val, size, mtu;
5676 int txq, tx_port_num;
5677
5678 mtu = port->pkt_size * 8;
5679 if (mtu > MVPP2_TXP_MTU_MAX)
5680 mtu = MVPP2_TXP_MTU_MAX;
5681
5682 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
5683 mtu = 3 * mtu;
5684
5685 /* Indirect access to registers */
5686 tx_port_num = mvpp2_egress_port(port);
5687 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5688
5689 /* Set MTU */
5690 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
5691 val &= ~MVPP2_TXP_MTU_MAX;
5692 val |= mtu;
5693 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
5694
5695 /* TXP token size and all TXQs token size must be larger that MTU */
5696 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
5697 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
5698 if (size < mtu) {
5699 size = mtu;
5700 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
5701 val |= size;
5702 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
5703 }
5704
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005705 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005706 val = mvpp2_read(port->priv,
5707 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
5708 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
5709
5710 if (size < mtu) {
5711 size = mtu;
5712 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
5713 val |= size;
5714 mvpp2_write(port->priv,
5715 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
5716 val);
5717 }
5718 }
5719}
5720
5721/* Set the number of packets that will be received before Rx interrupt
5722 * will be generated by HW.
5723 */
5724static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005725 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005726{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005727 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005728
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01005729 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
5730 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005731
Thomas Petazzonia7868412017-03-07 16:53:13 +01005732 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5733 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
5734 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005735
5736 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005737}
5738
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005739/* For some reason in the LSP this is done on each CPU. Why ? */
5740static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
5741 struct mvpp2_tx_queue *txq)
5742{
5743 int cpu = get_cpu();
5744 u32 val;
5745
5746 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
5747 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
5748
5749 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
5750 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5751 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
5752
5753 put_cpu();
5754}
5755
Thomas Petazzoniab426762017-02-21 11:28:04 +01005756static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
5757{
5758 u64 tmp = (u64)clk_hz * usec;
5759
5760 do_div(tmp, USEC_PER_SEC);
5761
5762 return tmp > U32_MAX ? U32_MAX : tmp;
5763}
5764
5765static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
5766{
5767 u64 tmp = (u64)cycles * USEC_PER_SEC;
5768
5769 do_div(tmp, clk_hz);
5770
5771 return tmp > U32_MAX ? U32_MAX : tmp;
5772}
5773
Marcin Wojtas3f518502014-07-10 16:52:13 -03005774/* Set the time delay in usec before Rx interrupt */
5775static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005776 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005777{
Thomas Petazzoniab426762017-02-21 11:28:04 +01005778 unsigned long freq = port->priv->tclk;
5779 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005780
Thomas Petazzoniab426762017-02-21 11:28:04 +01005781 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
5782 rxq->time_coal =
5783 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
5784
5785 /* re-evaluate to get actual register value */
5786 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
5787 }
5788
Marcin Wojtas3f518502014-07-10 16:52:13 -03005789 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005790}
5791
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005792static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
5793{
5794 unsigned long freq = port->priv->tclk;
5795 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5796
5797 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
5798 port->tx_time_coal =
5799 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
5800
5801 /* re-evaluate to get actual register value */
5802 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5803 }
5804
5805 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5806}
5807
Marcin Wojtas3f518502014-07-10 16:52:13 -03005808/* Free Tx queue skbuffs */
5809static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5810 struct mvpp2_tx_queue *txq,
5811 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5812{
5813 int i;
5814
5815 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01005816 struct mvpp2_txq_pcpu_buf *tx_buf =
5817 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005818
Antoine Tenart20920262017-10-23 15:24:30 +02005819 if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
5820 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
5821 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01005822 if (tx_buf->skb)
5823 dev_kfree_skb_any(tx_buf->skb);
5824
5825 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005826 }
5827}
5828
5829static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5830 u32 cause)
5831{
5832 int queue = fls(cause) - 1;
5833
5834 return port->rxqs[queue];
5835}
5836
5837static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5838 u32 cause)
5839{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005840 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005841
5842 return port->txqs[queue];
5843}
5844
5845/* Handle end of transmission */
5846static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5847 struct mvpp2_txq_pcpu *txq_pcpu)
5848{
5849 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5850 int tx_done;
5851
5852 if (txq_pcpu->cpu != smp_processor_id())
5853 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5854
5855 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5856 if (!tx_done)
5857 return;
5858 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5859
5860 txq_pcpu->count -= tx_done;
5861
5862 if (netif_tx_queue_stopped(nq))
Antoine Tenart1d17db02017-10-30 11:23:31 +01005863 if (txq_pcpu->count <= txq_pcpu->wake_threshold)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005864 netif_tx_wake_queue(nq);
5865}
5866
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005867static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5868 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005869{
5870 struct mvpp2_tx_queue *txq;
5871 struct mvpp2_txq_pcpu *txq_pcpu;
5872 unsigned int tx_todo = 0;
5873
5874 while (cause) {
5875 txq = mvpp2_get_tx_queue(port, cause);
5876 if (!txq)
5877 break;
5878
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005879 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005880
5881 if (txq_pcpu->count) {
5882 mvpp2_txq_done(port, txq, txq_pcpu);
5883 tx_todo += txq_pcpu->count;
5884 }
5885
5886 cause &= ~(1 << txq->log_id);
5887 }
5888 return tx_todo;
5889}
5890
Marcin Wojtas3f518502014-07-10 16:52:13 -03005891/* Rx/Tx queue initialization/cleanup methods */
5892
5893/* Allocate and initialize descriptors for aggr TXQ */
5894static int mvpp2_aggr_txq_init(struct platform_device *pdev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005895 struct mvpp2_tx_queue *aggr_txq, int cpu,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005896 struct mvpp2 *priv)
5897{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005898 u32 txq_dma;
5899
Marcin Wojtas3f518502014-07-10 16:52:13 -03005900 /* Allocate memory for TX descriptors */
Yan Markmana154f8e2017-11-30 10:49:46 +01005901 aggr_txq->descs = dma_zalloc_coherent(&pdev->dev,
Antoine Ténart85affd72017-08-23 09:46:55 +02005902 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005903 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005904 if (!aggr_txq->descs)
5905 return -ENOMEM;
5906
Antoine Tenart02856a32017-10-30 11:23:32 +01005907 aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005908
5909 /* Aggr TXQ no reset WA */
5910 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5911 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5912
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005913 /* Set Tx descriptors queue starting address indirect
5914 * access
5915 */
5916 if (priv->hw_version == MVPP21)
5917 txq_dma = aggr_txq->descs_dma;
5918 else
5919 txq_dma = aggr_txq->descs_dma >>
5920 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5921
5922 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Antoine Ténart85affd72017-08-23 09:46:55 +02005923 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu),
5924 MVPP2_AGGR_TXQ_SIZE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005925
5926 return 0;
5927}
5928
5929/* Create a specified Rx queue */
5930static int mvpp2_rxq_init(struct mvpp2_port *port,
5931 struct mvpp2_rx_queue *rxq)
5932
5933{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005934 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005935 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005936
Marcin Wojtas3f518502014-07-10 16:52:13 -03005937 rxq->size = port->rx_ring_size;
5938
5939 /* Allocate memory for RX descriptors */
5940 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5941 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005942 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005943 if (!rxq->descs)
5944 return -ENOMEM;
5945
Marcin Wojtas3f518502014-07-10 16:52:13 -03005946 rxq->last_desc = rxq->size - 1;
5947
5948 /* Zero occupied and non-occupied counters - direct access */
5949 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5950
5951 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005952 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005953 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005954 if (port->priv->hw_version == MVPP21)
5955 rxq_dma = rxq->descs_dma;
5956 else
5957 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005958 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5959 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5960 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005961 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005962
5963 /* Set Offset */
5964 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5965
5966 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005967 mvpp2_rx_pkts_coal_set(port, rxq);
5968 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005969
5970 /* Add number of descriptors ready for receiving packets */
5971 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5972
5973 return 0;
5974}
5975
5976/* Push packets received by the RXQ to BM pool */
5977static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5978 struct mvpp2_rx_queue *rxq)
5979{
5980 int rx_received, i;
5981
5982 rx_received = mvpp2_rxq_received(port, rxq->id);
5983 if (!rx_received)
5984 return;
5985
5986 for (i = 0; i < rx_received; i++) {
5987 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005988 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5989 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005990
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005991 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5992 MVPP2_RXD_BM_POOL_ID_OFFS;
5993
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005994 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005995 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5996 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005997 }
5998 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5999}
6000
6001/* Cleanup Rx queue */
6002static void mvpp2_rxq_deinit(struct mvpp2_port *port,
6003 struct mvpp2_rx_queue *rxq)
6004{
Thomas Petazzonia7868412017-03-07 16:53:13 +01006005 int cpu;
6006
Marcin Wojtas3f518502014-07-10 16:52:13 -03006007 mvpp2_rxq_drop_pkts(port, rxq);
6008
6009 if (rxq->descs)
6010 dma_free_coherent(port->dev->dev.parent,
6011 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
6012 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006013 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006014
6015 rxq->descs = NULL;
6016 rxq->last_desc = 0;
6017 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006018 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006019
6020 /* Clear Rx descriptors queue starting address and size;
6021 * free descriptor number
6022 */
6023 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006024 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006025 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
6026 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
6027 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006028 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006029}
6030
6031/* Create and initialize a Tx queue */
6032static int mvpp2_txq_init(struct mvpp2_port *port,
6033 struct mvpp2_tx_queue *txq)
6034{
6035 u32 val;
6036 int cpu, desc, desc_per_txq, tx_port_num;
6037 struct mvpp2_txq_pcpu *txq_pcpu;
6038
6039 txq->size = port->tx_ring_size;
6040
6041 /* Allocate memory for Tx descriptors */
6042 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
6043 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006044 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006045 if (!txq->descs)
6046 return -ENOMEM;
6047
Marcin Wojtas3f518502014-07-10 16:52:13 -03006048 txq->last_desc = txq->size - 1;
6049
6050 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006051 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006052 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6053 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
6054 txq->descs_dma);
6055 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
6056 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
6057 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
6058 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
6059 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
6060 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006061 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006062 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006063
6064 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
6065 * for each existing TXQ.
6066 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
6067 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
6068 */
6069 desc_per_txq = 16;
6070 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
6071 (txq->log_id * desc_per_txq);
6072
Thomas Petazzonia7868412017-03-07 16:53:13 +01006073 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
6074 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
6075 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006076 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006077
6078 /* WRR / EJP configuration - indirect access */
6079 tx_port_num = mvpp2_egress_port(port);
6080 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
6081
6082 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
6083 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
6084 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
6085 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
6086 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
6087
6088 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
6089 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
6090 val);
6091
6092 for_each_present_cpu(cpu) {
6093 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6094 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02006095 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
6096 sizeof(*txq_pcpu->buffs),
6097 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006098 if (!txq_pcpu->buffs)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006099 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006100
6101 txq_pcpu->count = 0;
6102 txq_pcpu->reserved_num = 0;
6103 txq_pcpu->txq_put_index = 0;
6104 txq_pcpu->txq_get_index = 0;
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006105 txq_pcpu->tso_headers = NULL;
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006106
Antoine Tenart1d17db02017-10-30 11:23:31 +01006107 txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
6108 txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
6109
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006110 txq_pcpu->tso_headers =
6111 dma_alloc_coherent(port->dev->dev.parent,
Yan Markman822eaf72017-10-23 15:24:29 +02006112 txq_pcpu->size * TSO_HEADER_SIZE,
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006113 &txq_pcpu->tso_headers_dma,
6114 GFP_KERNEL);
6115 if (!txq_pcpu->tso_headers)
Antoine Tenartba2d8d82017-11-28 14:19:48 +01006116 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006117 }
6118
6119 return 0;
6120}
6121
6122/* Free allocated TXQ resources */
6123static void mvpp2_txq_deinit(struct mvpp2_port *port,
6124 struct mvpp2_tx_queue *txq)
6125{
6126 struct mvpp2_txq_pcpu *txq_pcpu;
6127 int cpu;
6128
6129 for_each_present_cpu(cpu) {
6130 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01006131 kfree(txq_pcpu->buffs);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006132
Antoine Tenartb70d4a52017-12-11 09:13:25 +01006133 if (txq_pcpu->tso_headers)
6134 dma_free_coherent(port->dev->dev.parent,
6135 txq_pcpu->size * TSO_HEADER_SIZE,
6136 txq_pcpu->tso_headers,
6137 txq_pcpu->tso_headers_dma);
6138
6139 txq_pcpu->tso_headers = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006140 }
6141
6142 if (txq->descs)
6143 dma_free_coherent(port->dev->dev.parent,
6144 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01006145 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006146
6147 txq->descs = NULL;
6148 txq->last_desc = 0;
6149 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006150 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006151
6152 /* Set minimum bandwidth for disabled TXQs */
6153 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
6154
6155 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006156 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006157 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6158 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
6159 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006160 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006161}
6162
6163/* Cleanup Tx ports */
6164static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
6165{
6166 struct mvpp2_txq_pcpu *txq_pcpu;
6167 int delay, pending, cpu;
6168 u32 val;
6169
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006170 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01006171 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
6172 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006173 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006174 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006175
6176 /* The napi queue has been stopped so wait for all packets
6177 * to be transmitted.
6178 */
6179 delay = 0;
6180 do {
6181 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
6182 netdev_warn(port->dev,
6183 "port %d: cleaning queue %d timed out\n",
6184 port->id, txq->log_id);
6185 break;
6186 }
6187 mdelay(1);
6188 delay++;
6189
Thomas Petazzonia7868412017-03-07 16:53:13 +01006190 pending = mvpp2_percpu_read(port->priv, cpu,
6191 MVPP2_TXQ_PENDING_REG);
6192 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006193 } while (pending);
6194
6195 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006196 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02006197 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03006198
6199 for_each_present_cpu(cpu) {
6200 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6201
6202 /* Release all packets */
6203 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
6204
6205 /* Reset queue */
6206 txq_pcpu->count = 0;
6207 txq_pcpu->txq_put_index = 0;
6208 txq_pcpu->txq_get_index = 0;
6209 }
6210}
6211
6212/* Cleanup all Tx queues */
6213static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
6214{
6215 struct mvpp2_tx_queue *txq;
6216 int queue;
6217 u32 val;
6218
6219 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
6220
6221 /* Reset Tx ports and delete Tx queues */
6222 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
6223 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6224
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006225 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006226 txq = port->txqs[queue];
6227 mvpp2_txq_clean(port, txq);
6228 mvpp2_txq_deinit(port, txq);
6229 }
6230
6231 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6232
6233 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
6234 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
6235}
6236
6237/* Cleanup all Rx queues */
6238static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
6239{
6240 int queue;
6241
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006242 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006243 mvpp2_rxq_deinit(port, port->rxqs[queue]);
6244}
6245
6246/* Init all Rx queues for port */
6247static int mvpp2_setup_rxqs(struct mvpp2_port *port)
6248{
6249 int queue, err;
6250
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006251 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006252 err = mvpp2_rxq_init(port, port->rxqs[queue]);
6253 if (err)
6254 goto err_cleanup;
6255 }
6256 return 0;
6257
6258err_cleanup:
6259 mvpp2_cleanup_rxqs(port);
6260 return err;
6261}
6262
6263/* Init all tx queues for port */
6264static int mvpp2_setup_txqs(struct mvpp2_port *port)
6265{
6266 struct mvpp2_tx_queue *txq;
6267 int queue, err;
6268
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006269 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006270 txq = port->txqs[queue];
6271 err = mvpp2_txq_init(port, txq);
6272 if (err)
6273 goto err_cleanup;
6274 }
6275
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006276 if (port->has_tx_irqs) {
6277 mvpp2_tx_time_coal_set(port);
6278 for (queue = 0; queue < port->ntxqs; queue++) {
6279 txq = port->txqs[queue];
6280 mvpp2_tx_pkts_coal_set(port, txq);
6281 }
6282 }
6283
Marcin Wojtas3f518502014-07-10 16:52:13 -03006284 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
6285 return 0;
6286
6287err_cleanup:
6288 mvpp2_cleanup_txqs(port);
6289 return err;
6290}
6291
6292/* The callback for per-port interrupt */
6293static irqreturn_t mvpp2_isr(int irq, void *dev_id)
6294{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006295 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006296
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006297 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006298
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006299 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006300
6301 return IRQ_HANDLED;
6302}
6303
Antoine Tenartfd3651b2017-09-01 11:04:54 +02006304/* Per-port interrupt for link status changes */
6305static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
6306{
6307 struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
6308 struct net_device *dev = port->dev;
6309 bool event = false, link = false;
6310 u32 val;
6311
6312 mvpp22_gop_mask_irq(port);
6313
6314 if (port->gop_id == 0 &&
6315 port->phy_interface == PHY_INTERFACE_MODE_10GKR) {
6316 val = readl(port->base + MVPP22_XLG_INT_STAT);
6317 if (val & MVPP22_XLG_INT_STAT_LINK) {
6318 event = true;
6319 val = readl(port->base + MVPP22_XLG_STATUS);
6320 if (val & MVPP22_XLG_STATUS_LINK_UP)
6321 link = true;
6322 }
6323 } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
6324 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6325 val = readl(port->base + MVPP22_GMAC_INT_STAT);
6326 if (val & MVPP22_GMAC_INT_STAT_LINK) {
6327 event = true;
6328 val = readl(port->base + MVPP2_GMAC_STATUS0);
6329 if (val & MVPP2_GMAC_STATUS0_LINK_UP)
6330 link = true;
6331 }
6332 }
6333
6334 if (!netif_running(dev) || !event)
6335 goto handled;
6336
6337 if (link) {
6338 mvpp2_interrupts_enable(port);
6339
6340 mvpp2_egress_enable(port);
6341 mvpp2_ingress_enable(port);
6342 netif_carrier_on(dev);
6343 netif_tx_wake_all_queues(dev);
6344 } else {
6345 netif_tx_stop_all_queues(dev);
6346 netif_carrier_off(dev);
6347 mvpp2_ingress_disable(port);
6348 mvpp2_egress_disable(port);
6349
6350 mvpp2_interrupts_disable(port);
6351 }
6352
6353handled:
6354 mvpp22_gop_unmask_irq(port);
6355 return IRQ_HANDLED;
6356}
6357
Antoine Tenart65a2c092017-08-30 10:29:18 +02006358static void mvpp2_gmac_set_autoneg(struct mvpp2_port *port,
6359 struct phy_device *phydev)
6360{
6361 u32 val;
6362
6363 if (port->phy_interface != PHY_INTERFACE_MODE_RGMII &&
6364 port->phy_interface != PHY_INTERFACE_MODE_RGMII_ID &&
6365 port->phy_interface != PHY_INTERFACE_MODE_RGMII_RXID &&
6366 port->phy_interface != PHY_INTERFACE_MODE_RGMII_TXID &&
6367 port->phy_interface != PHY_INTERFACE_MODE_SGMII)
6368 return;
6369
6370 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6371 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
6372 MVPP2_GMAC_CONFIG_GMII_SPEED |
6373 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
6374 MVPP2_GMAC_AN_SPEED_EN |
6375 MVPP2_GMAC_AN_DUPLEX_EN);
6376
6377 if (phydev->duplex)
6378 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6379
6380 if (phydev->speed == SPEED_1000)
6381 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6382 else if (phydev->speed == SPEED_100)
6383 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6384
6385 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Antoine Tenart65a2c092017-08-30 10:29:18 +02006386}
6387
Marcin Wojtas3f518502014-07-10 16:52:13 -03006388/* Adjust link */
6389static void mvpp2_link_event(struct net_device *dev)
6390{
6391 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02006392 struct phy_device *phydev = dev->phydev;
Antoine Tenart89273bc2017-08-30 10:29:19 +02006393 bool link_reconfigured = false;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006394 u32 val;
6395
6396 if (phydev->link) {
Antoine Tenart89273bc2017-08-30 10:29:19 +02006397 if (port->phy_interface != phydev->interface && port->comphy) {
6398 /* disable current port for reconfiguration */
6399 mvpp2_interrupts_disable(port);
6400 netif_carrier_off(port->dev);
6401 mvpp2_port_disable(port);
6402 phy_power_off(port->comphy);
6403
6404 /* comphy reconfiguration */
6405 port->phy_interface = phydev->interface;
6406 mvpp22_comphy_init(port);
6407
6408 /* gop/mac reconfiguration */
6409 mvpp22_gop_init(port);
6410 mvpp2_port_mii_set(port);
6411
6412 link_reconfigured = true;
6413 }
6414
Marcin Wojtas3f518502014-07-10 16:52:13 -03006415 if ((port->speed != phydev->speed) ||
6416 (port->duplex != phydev->duplex)) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006417 mvpp2_gmac_set_autoneg(port, phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006418
6419 port->duplex = phydev->duplex;
6420 port->speed = phydev->speed;
6421 }
6422 }
6423
Antoine Tenart89273bc2017-08-30 10:29:19 +02006424 if (phydev->link != port->link || link_reconfigured) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006425 port->link = phydev->link;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006426
Marcin Wojtas3f518502014-07-10 16:52:13 -03006427 if (phydev->link) {
Antoine Tenart65a2c092017-08-30 10:29:18 +02006428 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
6429 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
6430 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
6431 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
6432 port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
6433 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6434 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
6435 MVPP2_GMAC_FORCE_LINK_DOWN);
6436 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6437 }
Antoine Tenartf55744a2017-08-30 10:29:17 +02006438
6439 mvpp2_interrupts_enable(port);
6440 mvpp2_port_enable(port);
6441
Marcin Wojtas3f518502014-07-10 16:52:13 -03006442 mvpp2_egress_enable(port);
6443 mvpp2_ingress_enable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006444 netif_carrier_on(dev);
6445 netif_tx_wake_all_queues(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006446 } else {
Antoine Tenart968b2112017-08-30 10:29:16 +02006447 port->duplex = -1;
6448 port->speed = 0;
6449
Antoine Tenartf55744a2017-08-30 10:29:17 +02006450 netif_tx_stop_all_queues(dev);
6451 netif_carrier_off(dev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006452 mvpp2_ingress_disable(port);
6453 mvpp2_egress_disable(port);
Antoine Tenartf55744a2017-08-30 10:29:17 +02006454
6455 mvpp2_port_disable(port);
6456 mvpp2_interrupts_disable(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006457 }
Antoine Tenart968b2112017-08-30 10:29:16 +02006458
Marcin Wojtas3f518502014-07-10 16:52:13 -03006459 phy_print_status(phydev);
6460 }
6461}
6462
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006463static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
6464{
6465 ktime_t interval;
6466
6467 if (!port_pcpu->timer_scheduled) {
6468 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01006469 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006470 hrtimer_start(&port_pcpu->tx_done_timer, interval,
6471 HRTIMER_MODE_REL_PINNED);
6472 }
6473}
6474
6475static void mvpp2_tx_proc_cb(unsigned long data)
6476{
6477 struct net_device *dev = (struct net_device *)data;
6478 struct mvpp2_port *port = netdev_priv(dev);
6479 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6480 unsigned int tx_todo, cause;
6481
6482 if (!netif_running(dev))
6483 return;
6484 port_pcpu->timer_scheduled = false;
6485
6486 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006487 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006488 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006489
6490 /* Set the timer in case not all the packets were processed */
6491 if (tx_todo)
6492 mvpp2_timer_set(port_pcpu);
6493}
6494
6495static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
6496{
6497 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
6498 struct mvpp2_port_pcpu,
6499 tx_done_timer);
6500
6501 tasklet_schedule(&port_pcpu->tx_done_tasklet);
6502
6503 return HRTIMER_NORESTART;
6504}
6505
Marcin Wojtas3f518502014-07-10 16:52:13 -03006506/* Main RX/TX processing routines */
6507
6508/* Display more error info */
6509static void mvpp2_rx_error(struct mvpp2_port *port,
6510 struct mvpp2_rx_desc *rx_desc)
6511{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006512 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
6513 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006514
6515 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
6516 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006517 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
6518 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006519 break;
6520 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006521 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
6522 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006523 break;
6524 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006525 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
6526 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006527 break;
6528 }
6529}
6530
6531/* Handle RX checksum offload */
6532static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
6533 struct sk_buff *skb)
6534{
6535 if (((status & MVPP2_RXD_L3_IP4) &&
6536 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
6537 (status & MVPP2_RXD_L3_IP6))
6538 if (((status & MVPP2_RXD_L4_UDP) ||
6539 (status & MVPP2_RXD_L4_TCP)) &&
6540 (status & MVPP2_RXD_L4_CSUM_OK)) {
6541 skb->csum = 0;
6542 skb->ip_summed = CHECKSUM_UNNECESSARY;
6543 return;
6544 }
6545
6546 skb->ip_summed = CHECKSUM_NONE;
6547}
6548
6549/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
6550static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006551 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006552{
Thomas Petazzoni20396132017-03-07 16:53:00 +01006553 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006554 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006555 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006556
Marcin Wojtas3f518502014-07-10 16:52:13 -03006557 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01006558 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
6559 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006560 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006561 return -ENOMEM;
6562
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006563 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01006564
Marcin Wojtas3f518502014-07-10 16:52:13 -03006565 return 0;
6566}
6567
6568/* Handle tx checksum */
6569static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
6570{
6571 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6572 int ip_hdr_len = 0;
6573 u8 l4_proto;
6574
6575 if (skb->protocol == htons(ETH_P_IP)) {
6576 struct iphdr *ip4h = ip_hdr(skb);
6577
6578 /* Calculate IPv4 checksum and L4 checksum */
6579 ip_hdr_len = ip4h->ihl;
6580 l4_proto = ip4h->protocol;
6581 } else if (skb->protocol == htons(ETH_P_IPV6)) {
6582 struct ipv6hdr *ip6h = ipv6_hdr(skb);
6583
6584 /* Read l4_protocol from one of IPv6 extra headers */
6585 if (skb_network_header_len(skb) > 0)
6586 ip_hdr_len = (skb_network_header_len(skb) >> 2);
6587 l4_proto = ip6h->nexthdr;
6588 } else {
6589 return MVPP2_TXD_L4_CSUM_NOT;
6590 }
6591
6592 return mvpp2_txq_desc_csum(skb_network_offset(skb),
6593 skb->protocol, ip_hdr_len, l4_proto);
6594 }
6595
6596 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
6597}
6598
Marcin Wojtas3f518502014-07-10 16:52:13 -03006599/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006600static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
6601 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006602{
6603 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01006604 int rx_received;
6605 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006606 u32 rcvd_pkts = 0;
6607 u32 rcvd_bytes = 0;
6608
6609 /* Get number of received packets and clamp the to-do */
6610 rx_received = mvpp2_rxq_received(port, rxq->id);
6611 if (rx_todo > rx_received)
6612 rx_todo = rx_received;
6613
Marcin Wojtasb5015852015-12-03 15:20:51 +01006614 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006615 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
6616 struct mvpp2_bm_pool *bm_pool;
6617 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006618 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006619 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006620 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006621 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006622 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006623 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006624
Marcin Wojtasb5015852015-12-03 15:20:51 +01006625 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006626 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
6627 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
6628 rx_bytes -= MVPP2_MH_SIZE;
6629 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
6630 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
6631 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006632
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006633 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
6634 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006635 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03006636
6637 /* In case of an error, release the requested buffer pointer
6638 * to the Buffer Manager. This request process is controlled
6639 * by the hardware, and the information about the buffer is
6640 * comprised by the RX descriptor.
6641 */
6642 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02006643err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006644 dev->stats.rx_errors++;
6645 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006646 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02006647 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006648 continue;
6649 }
6650
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006651 if (bm_pool->frag_size > PAGE_SIZE)
6652 frag_size = 0;
6653 else
6654 frag_size = bm_pool->frag_size;
6655
6656 skb = build_skb(data, frag_size);
6657 if (!skb) {
6658 netdev_warn(port->dev, "skb build failed\n");
6659 goto err_drop_frame;
6660 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006661
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02006662 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01006663 if (err) {
6664 netdev_err(port->dev, "failed to refill BM pools\n");
6665 goto err_drop_frame;
6666 }
6667
Thomas Petazzoni20396132017-03-07 16:53:00 +01006668 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01006669 bm_pool->buf_size, DMA_FROM_DEVICE);
6670
Marcin Wojtas3f518502014-07-10 16:52:13 -03006671 rcvd_pkts++;
6672 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006673
Thomas Petazzoni0e037282017-02-21 11:28:12 +01006674 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006675 skb_put(skb, rx_bytes);
6676 skb->protocol = eth_type_trans(skb, dev);
6677 mvpp2_rx_csum(port, rx_status, skb);
6678
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006679 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006680 }
6681
6682 if (rcvd_pkts) {
6683 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
6684
6685 u64_stats_update_begin(&stats->syncp);
6686 stats->rx_packets += rcvd_pkts;
6687 stats->rx_bytes += rcvd_bytes;
6688 u64_stats_update_end(&stats->syncp);
6689 }
6690
6691 /* Update Rx queue management counters */
6692 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01006693 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006694
6695 return rx_todo;
6696}
6697
6698static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006699tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006700 struct mvpp2_tx_desc *desc)
6701{
Antoine Tenart20920262017-10-23 15:24:30 +02006702 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6703
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006704 dma_addr_t buf_dma_addr =
6705 mvpp2_txdesc_dma_addr_get(port, desc);
6706 size_t buf_sz =
6707 mvpp2_txdesc_size_get(port, desc);
Antoine Tenart20920262017-10-23 15:24:30 +02006708 if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
6709 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
6710 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006711 mvpp2_txq_desc_put(txq);
6712}
6713
6714/* Handle tx fragmentation processing */
6715static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
6716 struct mvpp2_tx_queue *aggr_txq,
6717 struct mvpp2_tx_queue *txq)
6718{
6719 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
6720 struct mvpp2_tx_desc *tx_desc;
6721 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006722 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006723
6724 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6725 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6726 void *addr = page_address(frag->page.p) + frag->page_offset;
6727
6728 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006729 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6730 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006731
Thomas Petazzoni20396132017-03-07 16:53:00 +01006732 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006733 frag->size,
6734 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006735 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006736 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02006737 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006738 }
6739
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006740 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006741
6742 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
6743 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006744 mvpp2_txdesc_cmd_set(port, tx_desc,
6745 MVPP2_TXD_L_DESC);
6746 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006747 } else {
6748 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006749 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6750 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006751 }
6752 }
6753
6754 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02006755cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03006756 /* Release all descriptors that were used to map fragments of
6757 * this packet, as well as the corresponding DMA mappings
6758 */
6759 for (i = i - 1; i >= 0; i--) {
6760 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006761 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006762 }
6763
6764 return -ENOMEM;
6765}
6766
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006767static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
6768 struct net_device *dev,
6769 struct mvpp2_tx_queue *txq,
6770 struct mvpp2_tx_queue *aggr_txq,
6771 struct mvpp2_txq_pcpu *txq_pcpu,
6772 int hdr_sz)
6773{
6774 struct mvpp2_port *port = netdev_priv(dev);
6775 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6776 dma_addr_t addr;
6777
6778 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6779 mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
6780
6781 addr = txq_pcpu->tso_headers_dma +
6782 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006783 mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006784
6785 mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
6786 MVPP2_TXD_F_DESC |
6787 MVPP2_TXD_PADDING_DISABLE);
6788 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6789}
6790
6791static inline int mvpp2_tso_put_data(struct sk_buff *skb,
6792 struct net_device *dev, struct tso_t *tso,
6793 struct mvpp2_tx_queue *txq,
6794 struct mvpp2_tx_queue *aggr_txq,
6795 struct mvpp2_txq_pcpu *txq_pcpu,
6796 int sz, bool left, bool last)
6797{
6798 struct mvpp2_port *port = netdev_priv(dev);
6799 struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
6800 dma_addr_t buf_dma_addr;
6801
6802 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6803 mvpp2_txdesc_size_set(port, tx_desc, sz);
6804
6805 buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
6806 DMA_TO_DEVICE);
6807 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
6808 mvpp2_txq_desc_put(txq);
6809 return -ENOMEM;
6810 }
6811
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006812 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006813
6814 if (!left) {
6815 mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
6816 if (last) {
6817 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
6818 return 0;
6819 }
6820 } else {
6821 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
6822 }
6823
6824 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
6825 return 0;
6826}
6827
6828static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
6829 struct mvpp2_tx_queue *txq,
6830 struct mvpp2_tx_queue *aggr_txq,
6831 struct mvpp2_txq_pcpu *txq_pcpu)
6832{
6833 struct mvpp2_port *port = netdev_priv(dev);
6834 struct tso_t tso;
6835 int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
6836 int i, len, descs = 0;
6837
6838 /* Check number of available descriptors */
6839 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
6840 tso_count_descs(skb)) ||
6841 mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu,
6842 tso_count_descs(skb)))
6843 return 0;
6844
6845 tso_start(skb, &tso);
6846 len = skb->len - hdr_sz;
6847 while (len > 0) {
6848 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
6849 char *hdr = txq_pcpu->tso_headers +
6850 txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
6851
6852 len -= left;
6853 descs++;
6854
6855 tso_build_hdr(skb, hdr, &tso, left, len == 0);
6856 mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
6857
6858 while (left > 0) {
6859 int sz = min_t(int, tso.size, left);
6860 left -= sz;
6861 descs++;
6862
6863 if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
6864 txq_pcpu, sz, left, len == 0))
6865 goto release;
6866 tso_build_data(skb, &tso, sz);
6867 }
6868 }
6869
6870 return descs;
6871
6872release:
6873 for (i = descs - 1; i >= 0; i--) {
6874 struct mvpp2_tx_desc *tx_desc = txq->descs + i;
6875 tx_desc_unmap_put(port, txq, tx_desc);
6876 }
6877 return 0;
6878}
6879
Marcin Wojtas3f518502014-07-10 16:52:13 -03006880/* Main tx processing */
6881static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
6882{
6883 struct mvpp2_port *port = netdev_priv(dev);
6884 struct mvpp2_tx_queue *txq, *aggr_txq;
6885 struct mvpp2_txq_pcpu *txq_pcpu;
6886 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01006887 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006888 int frags = 0;
6889 u16 txq_id;
6890 u32 tx_cmd;
6891
6892 txq_id = skb_get_queue_mapping(skb);
6893 txq = port->txqs[txq_id];
6894 txq_pcpu = this_cpu_ptr(txq->pcpu);
6895 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
6896
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006897 if (skb_is_gso(skb)) {
6898 frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
6899 goto out;
6900 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006901 frags = skb_shinfo(skb)->nr_frags + 1;
6902
6903 /* Check number of available descriptors */
6904 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
6905 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
6906 txq_pcpu, frags)) {
6907 frags = 0;
6908 goto out;
6909 }
6910
6911 /* Get a descriptor for the first part of the packet */
6912 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006913 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
6914 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03006915
Thomas Petazzoni20396132017-03-07 16:53:00 +01006916 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006917 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01006918 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006919 mvpp2_txq_desc_put(txq);
6920 frags = 0;
6921 goto out;
6922 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006923
Antoine Tenart6eb5d372017-10-30 11:23:33 +01006924 mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006925
6926 tx_cmd = mvpp2_skb_tx_csum(port, skb);
6927
6928 if (frags == 1) {
6929 /* First and Last descriptor */
6930 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006931 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6932 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006933 } else {
6934 /* First but not Last */
6935 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006936 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
6937 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006938
6939 /* Continue with other skb fragments */
6940 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01006941 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006942 frags = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006943 }
6944 }
6945
Marcin Wojtas3f518502014-07-10 16:52:13 -03006946out:
6947 if (frags > 0) {
6948 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006949 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
6950
6951 txq_pcpu->reserved_num -= frags;
6952 txq_pcpu->count += frags;
6953 aggr_txq->count += frags;
6954
6955 /* Enable transmit */
6956 wmb();
6957 mvpp2_aggr_txq_pend_desc_add(port, frags);
6958
Antoine Tenart1d17db02017-10-30 11:23:31 +01006959 if (txq_pcpu->count >= txq_pcpu->stop_threshold)
Antoine Ténart186cd4d2017-08-23 09:46:56 +02006960 netif_tx_stop_queue(nq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006961
6962 u64_stats_update_begin(&stats->syncp);
6963 stats->tx_packets++;
6964 stats->tx_bytes += skb->len;
6965 u64_stats_update_end(&stats->syncp);
6966 } else {
6967 dev->stats.tx_dropped++;
6968 dev_kfree_skb_any(skb);
6969 }
6970
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006971 /* Finalize TX processing */
Antoine Tenart082297e2017-10-23 15:24:31 +02006972 if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006973 mvpp2_txq_done(port, txq, txq_pcpu);
6974
6975 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006976 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
6977 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006978 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
6979
6980 mvpp2_timer_set(port_pcpu);
6981 }
6982
Marcin Wojtas3f518502014-07-10 16:52:13 -03006983 return NETDEV_TX_OK;
6984}
6985
6986static inline void mvpp2_cause_error(struct net_device *dev, int cause)
6987{
6988 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
6989 netdev_err(dev, "FCS error\n");
6990 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
6991 netdev_err(dev, "rx fifo overrun error\n");
6992 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
6993 netdev_err(dev, "tx fifo underrun error\n");
6994}
6995
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006996static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006997{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006998 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006999 int rx_done = 0;
7000 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007001 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007002 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03007003
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007004 qv = container_of(napi, struct mvpp2_queue_vector, napi);
7005
Marcin Wojtas3f518502014-07-10 16:52:13 -03007006 /* Rx/Tx cause register
7007 *
7008 * Bits 0-15: each bit indicates received packets on the Rx queue
7009 * (bit 0 is for Rx queue 0).
7010 *
7011 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
7012 * (bit 16 is for Tx queue 0).
7013 *
7014 * Each CPU has its own Rx/Tx cause register
7015 */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007016 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
Thomas Petazzonia7868412017-03-07 16:53:13 +01007017 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03007018
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007019 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007020 if (cause_misc) {
7021 mvpp2_cause_error(port->dev, cause_misc);
7022
7023 /* Clear the cause register */
7024 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007025 mvpp2_percpu_write(port->priv, cpu,
7026 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
7027 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007028 }
7029
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007030 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
7031 if (cause_tx) {
7032 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
7033 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
7034 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007035
7036 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007037 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
7038 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007039 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007040 while (cause_rx && budget > 0) {
7041 int count;
7042 struct mvpp2_rx_queue *rxq;
7043
7044 rxq = mvpp2_get_rx_queue(port, cause_rx);
7045 if (!rxq)
7046 break;
7047
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007048 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007049 rx_done += count;
7050 budget -= count;
7051 if (budget > 0) {
7052 /* Clear the bit associated to this Rx queue
7053 * so that next iteration will continue from
7054 * the next Rx queue.
7055 */
7056 cause_rx &= ~(1 << rxq->logic_rxq);
7057 }
7058 }
7059
7060 if (budget > 0) {
7061 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08007062 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007063
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007064 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007065 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007066 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007067 return rx_done;
7068}
7069
7070/* Set hw internals when starting port */
7071static void mvpp2_start_dev(struct mvpp2_port *port)
7072{
Philippe Reynes8e072692016-06-28 00:08:11 +02007073 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007074 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007075
Stefan Chulski76eb1b12017-08-22 19:08:26 +02007076 if (port->gop_id == 0 &&
7077 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
7078 port->phy_interface == PHY_INTERFACE_MODE_10GKR))
7079 mvpp2_xlg_max_rx_size_set(port);
7080 else
7081 mvpp2_gmac_max_rx_size_set(port);
7082
Marcin Wojtas3f518502014-07-10 16:52:13 -03007083 mvpp2_txp_max_tx_size_set(port);
7084
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007085 for (i = 0; i < port->nqvecs; i++)
7086 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007087
7088 /* Enable interrupts on all CPUs */
7089 mvpp2_interrupts_enable(port);
7090
Antoine Tenart542897d2017-08-30 10:29:15 +02007091 if (port->priv->hw_version == MVPP22) {
7092 mvpp22_comphy_init(port);
Antoine Ténartf84bf382017-08-22 19:08:27 +02007093 mvpp22_gop_init(port);
Antoine Tenart542897d2017-08-30 10:29:15 +02007094 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02007095
Antoine Ténart2055d622017-08-22 19:08:23 +02007096 mvpp2_port_mii_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007097 mvpp2_port_enable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007098 if (ndev->phydev)
7099 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007100 netif_tx_start_all_queues(port->dev);
7101}
7102
7103/* Set hw internals when stopping port */
7104static void mvpp2_stop_dev(struct mvpp2_port *port)
7105{
Philippe Reynes8e072692016-06-28 00:08:11 +02007106 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007107 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02007108
Marcin Wojtas3f518502014-07-10 16:52:13 -03007109 /* Stop new packets from arriving to RXQs */
7110 mvpp2_ingress_disable(port);
7111
7112 mdelay(10);
7113
7114 /* Disable interrupts on all CPUs */
7115 mvpp2_interrupts_disable(port);
7116
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007117 for (i = 0; i < port->nqvecs; i++)
7118 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007119
7120 netif_carrier_off(port->dev);
7121 netif_tx_stop_all_queues(port->dev);
7122
7123 mvpp2_egress_disable(port);
7124 mvpp2_port_disable(port);
Antoine Tenart5997c862017-09-01 11:04:53 +02007125 if (ndev->phydev)
7126 phy_stop(ndev->phydev);
Antoine Tenart542897d2017-08-30 10:29:15 +02007127 phy_power_off(port->comphy);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007128}
7129
Marcin Wojtas3f518502014-07-10 16:52:13 -03007130static int mvpp2_check_ringparam_valid(struct net_device *dev,
7131 struct ethtool_ringparam *ring)
7132{
7133 u16 new_rx_pending = ring->rx_pending;
7134 u16 new_tx_pending = ring->tx_pending;
7135
7136 if (ring->rx_pending == 0 || ring->tx_pending == 0)
7137 return -EINVAL;
7138
Yan Markman7cf87e42017-12-11 09:13:26 +01007139 if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
7140 new_rx_pending = MVPP2_MAX_RXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007141 else if (!IS_ALIGNED(ring->rx_pending, 16))
7142 new_rx_pending = ALIGN(ring->rx_pending, 16);
7143
Yan Markman7cf87e42017-12-11 09:13:26 +01007144 if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
7145 new_tx_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007146 else if (!IS_ALIGNED(ring->tx_pending, 32))
7147 new_tx_pending = ALIGN(ring->tx_pending, 32);
7148
Antoine Tenart76e583c2017-11-28 14:19:51 +01007149 /* The Tx ring size cannot be smaller than the minimum number of
7150 * descriptors needed for TSO.
7151 */
7152 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
7153 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
7154
Marcin Wojtas3f518502014-07-10 16:52:13 -03007155 if (ring->rx_pending != new_rx_pending) {
7156 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
7157 ring->rx_pending, new_rx_pending);
7158 ring->rx_pending = new_rx_pending;
7159 }
7160
7161 if (ring->tx_pending != new_tx_pending) {
7162 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
7163 ring->tx_pending, new_tx_pending);
7164 ring->tx_pending = new_tx_pending;
7165 }
7166
7167 return 0;
7168}
7169
Thomas Petazzoni26975822017-03-07 16:53:14 +01007170static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007171{
7172 u32 mac_addr_l, mac_addr_m, mac_addr_h;
7173
7174 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
7175 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
7176 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
7177 addr[0] = (mac_addr_h >> 24) & 0xFF;
7178 addr[1] = (mac_addr_h >> 16) & 0xFF;
7179 addr[2] = (mac_addr_h >> 8) & 0xFF;
7180 addr[3] = mac_addr_h & 0xFF;
7181 addr[4] = mac_addr_m & 0xFF;
7182 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
7183}
7184
7185static int mvpp2_phy_connect(struct mvpp2_port *port)
7186{
7187 struct phy_device *phy_dev;
7188
Antoine Tenart5997c862017-09-01 11:04:53 +02007189 /* No PHY is attached */
7190 if (!port->phy_node)
7191 return 0;
7192
Marcin Wojtas3f518502014-07-10 16:52:13 -03007193 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
7194 port->phy_interface);
7195 if (!phy_dev) {
7196 netdev_err(port->dev, "cannot connect to phy\n");
7197 return -ENODEV;
7198 }
7199 phy_dev->supported &= PHY_GBIT_FEATURES;
7200 phy_dev->advertising = phy_dev->supported;
7201
Marcin Wojtas3f518502014-07-10 16:52:13 -03007202 port->link = 0;
7203 port->duplex = 0;
7204 port->speed = 0;
7205
7206 return 0;
7207}
7208
7209static void mvpp2_phy_disconnect(struct mvpp2_port *port)
7210{
Philippe Reynes8e072692016-06-28 00:08:11 +02007211 struct net_device *ndev = port->dev;
7212
Antoine Tenart5997c862017-09-01 11:04:53 +02007213 if (!ndev->phydev)
7214 return;
7215
Philippe Reynes8e072692016-06-28 00:08:11 +02007216 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007217}
7218
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007219static int mvpp2_irqs_init(struct mvpp2_port *port)
7220{
7221 int err, i;
7222
7223 for (i = 0; i < port->nqvecs; i++) {
7224 struct mvpp2_queue_vector *qv = port->qvecs + i;
7225
Marc Zyngier13c249a2017-11-04 12:33:47 +00007226 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7227 irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
7228
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007229 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
7230 if (err)
7231 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007232
7233 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
7234 irq_set_affinity_hint(qv->irq,
7235 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007236 }
7237
7238 return 0;
7239err:
7240 for (i = 0; i < port->nqvecs; i++) {
7241 struct mvpp2_queue_vector *qv = port->qvecs + i;
7242
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007243 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007244 free_irq(qv->irq, qv);
7245 }
7246
7247 return err;
7248}
7249
7250static void mvpp2_irqs_deinit(struct mvpp2_port *port)
7251{
7252 int i;
7253
7254 for (i = 0; i < port->nqvecs; i++) {
7255 struct mvpp2_queue_vector *qv = port->qvecs + i;
7256
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007257 irq_set_affinity_hint(qv->irq, NULL);
Marc Zyngier13c249a2017-11-04 12:33:47 +00007258 irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007259 free_irq(qv->irq, qv);
7260 }
7261}
7262
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007263static void mvpp22_init_rss(struct mvpp2_port *port)
7264{
7265 struct mvpp2 *priv = port->priv;
7266 int i;
7267
7268 /* Set the table width: replace the whole classifier Rx queue number
7269 * with the ones configured in RSS table entries.
7270 */
7271 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0));
7272 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
7273
7274 /* Loop through the classifier Rx Queues and map them to a RSS table.
7275 * Map them all to the first table (0) by default.
7276 */
7277 for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) {
7278 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i));
7279 mvpp2_write(priv, MVPP22_RSS_TABLE,
7280 MVPP22_RSS_TABLE_POINTER(0));
7281 }
7282
7283 /* Configure the first table to evenly distribute the packets across
7284 * real Rx Queues. The table entries map a hash to an port Rx Queue.
7285 */
7286 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
7287 u32 sel = MVPP22_RSS_INDEX_TABLE(0) |
7288 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
7289 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
7290
7291 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs);
7292 }
7293
7294}
7295
Marcin Wojtas3f518502014-07-10 16:52:13 -03007296static int mvpp2_open(struct net_device *dev)
7297{
7298 struct mvpp2_port *port = netdev_priv(dev);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007299 struct mvpp2 *priv = port->priv;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007300 unsigned char mac_bcast[ETH_ALEN] = {
7301 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
7302 int err;
7303
7304 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
7305 if (err) {
7306 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
7307 return err;
7308 }
7309 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
7310 dev->dev_addr, true);
7311 if (err) {
7312 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
7313 return err;
7314 }
7315 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
7316 if (err) {
7317 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
7318 return err;
7319 }
7320 err = mvpp2_prs_def_flow(port);
7321 if (err) {
7322 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
7323 return err;
7324 }
7325
7326 /* Allocate the Rx/Tx queues */
7327 err = mvpp2_setup_rxqs(port);
7328 if (err) {
7329 netdev_err(port->dev, "cannot allocate Rx queues\n");
7330 return err;
7331 }
7332
7333 err = mvpp2_setup_txqs(port);
7334 if (err) {
7335 netdev_err(port->dev, "cannot allocate Tx queues\n");
7336 goto err_cleanup_rxqs;
7337 }
7338
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007339 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007340 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007341 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007342 goto err_cleanup_txqs;
7343 }
7344
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007345 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq) {
7346 err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
7347 dev->name, port);
7348 if (err) {
7349 netdev_err(port->dev, "cannot request link IRQ %d\n",
7350 port->link_irq);
7351 goto err_free_irq;
7352 }
7353
7354 mvpp22_gop_setup_irq(port);
7355 }
7356
Marcin Wojtas3f518502014-07-10 16:52:13 -03007357 /* In default link is down */
7358 netif_carrier_off(port->dev);
7359
7360 err = mvpp2_phy_connect(port);
7361 if (err < 0)
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007362 goto err_free_link_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007363
7364 /* Unmask interrupts on all CPUs */
7365 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007366 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007367
7368 mvpp2_start_dev(port);
7369
Antoine Tenart1d7d15d2017-10-30 11:23:30 +01007370 if (priv->hw_version == MVPP22)
7371 mvpp22_init_rss(port);
7372
Miquel Raynal118d6292017-11-06 22:56:53 +01007373 /* Start hardware statistics gathering */
Miquel Raynale5c500e2017-11-08 08:59:40 +01007374 queue_delayed_work(priv->stats_queue, &port->stats_work,
Miquel Raynal118d6292017-11-06 22:56:53 +01007375 MVPP2_MIB_COUNTERS_STATS_DELAY);
7376
Marcin Wojtas3f518502014-07-10 16:52:13 -03007377 return 0;
7378
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007379err_free_link_irq:
7380 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7381 free_irq(port->link_irq, port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007382err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007383 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007384err_cleanup_txqs:
7385 mvpp2_cleanup_txqs(port);
7386err_cleanup_rxqs:
7387 mvpp2_cleanup_rxqs(port);
7388 return err;
7389}
7390
7391static int mvpp2_stop(struct net_device *dev)
7392{
7393 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007394 struct mvpp2_port_pcpu *port_pcpu;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007395 struct mvpp2 *priv = port->priv;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007396 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007397
7398 mvpp2_stop_dev(port);
7399 mvpp2_phy_disconnect(port);
7400
7401 /* Mask interrupts on all CPUs */
7402 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007403 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007404
Antoine Tenartfd3651b2017-09-01 11:04:54 +02007405 if (priv->hw_version == MVPP22 && !port->phy_node && port->link_irq)
7406 free_irq(port->link_irq, port);
7407
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007408 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007409 if (!port->has_tx_irqs) {
7410 for_each_present_cpu(cpu) {
7411 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007412
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007413 hrtimer_cancel(&port_pcpu->tx_done_timer);
7414 port_pcpu->timer_scheduled = false;
7415 tasklet_kill(&port_pcpu->tx_done_tasklet);
7416 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007417 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007418 mvpp2_cleanup_rxqs(port);
7419 mvpp2_cleanup_txqs(port);
7420
Miquel Raynale5c500e2017-11-08 08:59:40 +01007421 cancel_delayed_work_sync(&port->stats_work);
Miquel Raynal118d6292017-11-06 22:56:53 +01007422
Marcin Wojtas3f518502014-07-10 16:52:13 -03007423 return 0;
7424}
7425
7426static void mvpp2_set_rx_mode(struct net_device *dev)
7427{
7428 struct mvpp2_port *port = netdev_priv(dev);
7429 struct mvpp2 *priv = port->priv;
7430 struct netdev_hw_addr *ha;
7431 int id = port->id;
7432 bool allmulti = dev->flags & IFF_ALLMULTI;
7433
Mikulas Patocka7ac8ff92018-02-11 18:10:28 -05007434retry:
Marcin Wojtas3f518502014-07-10 16:52:13 -03007435 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
7436 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
7437 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
7438
7439 /* Remove all port->id's mcast enries */
7440 mvpp2_prs_mcast_del_all(priv, id);
7441
Mikulas Patocka7ac8ff92018-02-11 18:10:28 -05007442 if (!allmulti) {
7443 netdev_for_each_mc_addr(ha, dev) {
7444 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
7445 allmulti = true;
7446 goto retry;
7447 }
7448 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007449 }
Maxime Chevallier56beda32018-02-28 10:14:13 +01007450
7451 /* Disable VLAN filtering in promiscuous mode */
7452 if (dev->flags & IFF_PROMISC)
7453 mvpp2_prs_vid_disable_filtering(port);
7454 else
7455 mvpp2_prs_vid_enable_filtering(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007456}
7457
7458static int mvpp2_set_mac_address(struct net_device *dev, void *p)
7459{
7460 struct mvpp2_port *port = netdev_priv(dev);
7461 const struct sockaddr *addr = p;
7462 int err;
7463
7464 if (!is_valid_ether_addr(addr->sa_data)) {
7465 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02007466 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007467 }
7468
7469 if (!netif_running(dev)) {
7470 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7471 if (!err)
7472 return 0;
7473 /* Reconfigure parser to accept the original MAC address */
7474 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7475 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007476 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007477 }
7478
7479 mvpp2_stop_dev(port);
7480
7481 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
7482 if (!err)
7483 goto out_start;
7484
7485 /* Reconfigure parser accept the original MAC address */
7486 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
7487 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007488 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007489out_start:
7490 mvpp2_start_dev(port);
7491 mvpp2_egress_enable(port);
7492 mvpp2_ingress_enable(port);
7493 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007494log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007495 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007496 return err;
7497}
7498
7499static int mvpp2_change_mtu(struct net_device *dev, int mtu)
7500{
7501 struct mvpp2_port *port = netdev_priv(dev);
7502 int err;
7503
Jarod Wilson57779872016-10-17 15:54:06 -04007504 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
7505 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
7506 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
7507 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007508 }
7509
7510 if (!netif_running(dev)) {
7511 err = mvpp2_bm_update_mtu(dev, mtu);
7512 if (!err) {
7513 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7514 return 0;
7515 }
7516
7517 /* Reconfigure BM to the original MTU */
7518 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7519 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007520 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007521 }
7522
7523 mvpp2_stop_dev(port);
7524
7525 err = mvpp2_bm_update_mtu(dev, mtu);
7526 if (!err) {
7527 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
7528 goto out_start;
7529 }
7530
7531 /* Reconfigure BM to the original MTU */
7532 err = mvpp2_bm_update_mtu(dev, dev->mtu);
7533 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02007534 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007535
7536out_start:
7537 mvpp2_start_dev(port);
7538 mvpp2_egress_enable(port);
7539 mvpp2_ingress_enable(port);
7540
7541 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02007542log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02007543 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007544 return err;
7545}
7546
stephen hemmingerbc1f4472017-01-06 19:12:52 -08007547static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03007548mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7549{
7550 struct mvpp2_port *port = netdev_priv(dev);
7551 unsigned int start;
7552 int cpu;
7553
7554 for_each_possible_cpu(cpu) {
7555 struct mvpp2_pcpu_stats *cpu_stats;
7556 u64 rx_packets;
7557 u64 rx_bytes;
7558 u64 tx_packets;
7559 u64 tx_bytes;
7560
7561 cpu_stats = per_cpu_ptr(port->stats, cpu);
7562 do {
7563 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
7564 rx_packets = cpu_stats->rx_packets;
7565 rx_bytes = cpu_stats->rx_bytes;
7566 tx_packets = cpu_stats->tx_packets;
7567 tx_bytes = cpu_stats->tx_bytes;
7568 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
7569
7570 stats->rx_packets += rx_packets;
7571 stats->rx_bytes += rx_bytes;
7572 stats->tx_packets += tx_packets;
7573 stats->tx_bytes += tx_bytes;
7574 }
7575
7576 stats->rx_errors = dev->stats.rx_errors;
7577 stats->rx_dropped = dev->stats.rx_dropped;
7578 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007579}
7580
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007581static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7582{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007583 int ret;
7584
Philippe Reynes8e072692016-06-28 00:08:11 +02007585 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007586 return -ENOTSUPP;
7587
Philippe Reynes8e072692016-06-28 00:08:11 +02007588 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007589 if (!ret)
7590 mvpp2_link_event(dev);
7591
7592 return ret;
7593}
7594
Maxime Chevallier56beda32018-02-28 10:14:13 +01007595static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
7596{
7597 struct mvpp2_port *port = netdev_priv(dev);
7598 int ret;
7599
7600 ret = mvpp2_prs_vid_entry_add(port, vid);
7601 if (ret)
7602 netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
7603 MVPP2_PRS_VLAN_FILT_MAX - 1);
7604 return ret;
7605}
7606
7607static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
7608{
7609 struct mvpp2_port *port = netdev_priv(dev);
7610
7611 mvpp2_prs_vid_entry_remove(port, vid);
7612 return 0;
7613}
7614
7615static int mvpp2_set_features(struct net_device *dev,
7616 netdev_features_t features)
7617{
7618 netdev_features_t changed = dev->features ^ features;
7619 struct mvpp2_port *port = netdev_priv(dev);
7620
7621 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
7622 if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
7623 mvpp2_prs_vid_enable_filtering(port);
7624 } else {
7625 /* Invalidate all registered VID filters for this
7626 * port
7627 */
7628 mvpp2_prs_vid_remove_all(port);
7629
7630 mvpp2_prs_vid_disable_filtering(port);
7631 }
7632 }
7633
7634 return 0;
7635}
7636
Marcin Wojtas3f518502014-07-10 16:52:13 -03007637/* Ethtool methods */
7638
Marcin Wojtas3f518502014-07-10 16:52:13 -03007639/* Set interrupt coalescing for ethtools */
7640static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
7641 struct ethtool_coalesce *c)
7642{
7643 struct mvpp2_port *port = netdev_priv(dev);
7644 int queue;
7645
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007646 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007647 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
7648
7649 rxq->time_coal = c->rx_coalesce_usecs;
7650 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01007651 mvpp2_rx_pkts_coal_set(port, rxq);
7652 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007653 }
7654
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007655 if (port->has_tx_irqs) {
7656 port->tx_time_coal = c->tx_coalesce_usecs;
7657 mvpp2_tx_time_coal_set(port);
7658 }
7659
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007660 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007661 struct mvpp2_tx_queue *txq = port->txqs[queue];
7662
7663 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007664
7665 if (port->has_tx_irqs)
7666 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007667 }
7668
Marcin Wojtas3f518502014-07-10 16:52:13 -03007669 return 0;
7670}
7671
7672/* get coalescing for ethtools */
7673static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
7674 struct ethtool_coalesce *c)
7675{
7676 struct mvpp2_port *port = netdev_priv(dev);
7677
Antoine Tenart385c2842017-12-11 09:13:27 +01007678 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
7679 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
7680 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
Antoine Tenart24b28cc2017-12-11 09:13:28 +01007681 c->tx_coalesce_usecs = port->tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007682 return 0;
7683}
7684
7685static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
7686 struct ethtool_drvinfo *drvinfo)
7687{
7688 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
7689 sizeof(drvinfo->driver));
7690 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
7691 sizeof(drvinfo->version));
7692 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
7693 sizeof(drvinfo->bus_info));
7694}
7695
7696static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
7697 struct ethtool_ringparam *ring)
7698{
7699 struct mvpp2_port *port = netdev_priv(dev);
7700
Yan Markman7cf87e42017-12-11 09:13:26 +01007701 ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
7702 ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007703 ring->rx_pending = port->rx_ring_size;
7704 ring->tx_pending = port->tx_ring_size;
7705}
7706
7707static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
7708 struct ethtool_ringparam *ring)
7709{
7710 struct mvpp2_port *port = netdev_priv(dev);
7711 u16 prev_rx_ring_size = port->rx_ring_size;
7712 u16 prev_tx_ring_size = port->tx_ring_size;
7713 int err;
7714
7715 err = mvpp2_check_ringparam_valid(dev, ring);
7716 if (err)
7717 return err;
7718
7719 if (!netif_running(dev)) {
7720 port->rx_ring_size = ring->rx_pending;
7721 port->tx_ring_size = ring->tx_pending;
7722 return 0;
7723 }
7724
7725 /* The interface is running, so we have to force a
7726 * reallocation of the queues
7727 */
7728 mvpp2_stop_dev(port);
7729 mvpp2_cleanup_rxqs(port);
7730 mvpp2_cleanup_txqs(port);
7731
7732 port->rx_ring_size = ring->rx_pending;
7733 port->tx_ring_size = ring->tx_pending;
7734
7735 err = mvpp2_setup_rxqs(port);
7736 if (err) {
7737 /* Reallocate Rx queues with the original ring size */
7738 port->rx_ring_size = prev_rx_ring_size;
7739 ring->rx_pending = prev_rx_ring_size;
7740 err = mvpp2_setup_rxqs(port);
7741 if (err)
7742 goto err_out;
7743 }
7744 err = mvpp2_setup_txqs(port);
7745 if (err) {
7746 /* Reallocate Tx queues with the original ring size */
7747 port->tx_ring_size = prev_tx_ring_size;
7748 ring->tx_pending = prev_tx_ring_size;
7749 err = mvpp2_setup_txqs(port);
7750 if (err)
7751 goto err_clean_rxqs;
7752 }
7753
7754 mvpp2_start_dev(port);
7755 mvpp2_egress_enable(port);
7756 mvpp2_ingress_enable(port);
7757
7758 return 0;
7759
7760err_clean_rxqs:
7761 mvpp2_cleanup_rxqs(port);
7762err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02007763 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03007764 return err;
7765}
7766
7767/* Device ops */
7768
7769static const struct net_device_ops mvpp2_netdev_ops = {
7770 .ndo_open = mvpp2_open,
7771 .ndo_stop = mvpp2_stop,
7772 .ndo_start_xmit = mvpp2_tx,
7773 .ndo_set_rx_mode = mvpp2_set_rx_mode,
7774 .ndo_set_mac_address = mvpp2_set_mac_address,
7775 .ndo_change_mtu = mvpp2_change_mtu,
7776 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02007777 .ndo_do_ioctl = mvpp2_ioctl,
Maxime Chevallier56beda32018-02-28 10:14:13 +01007778 .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
7779 .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
7780 .ndo_set_features = mvpp2_set_features,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007781};
7782
7783static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08007784 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007785 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007786 .set_coalesce = mvpp2_ethtool_set_coalesce,
7787 .get_coalesce = mvpp2_ethtool_get_coalesce,
7788 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
7789 .get_ringparam = mvpp2_ethtool_get_ringparam,
7790 .set_ringparam = mvpp2_ethtool_set_ringparam,
Miquel Raynal118d6292017-11-06 22:56:53 +01007791 .get_strings = mvpp2_ethtool_get_strings,
7792 .get_ethtool_stats = mvpp2_ethtool_get_stats,
7793 .get_sset_count = mvpp2_ethtool_get_sset_count,
Philippe Reynesfb773e92016-06-28 00:08:12 +02007794 .get_link_ksettings = phy_ethtool_get_link_ksettings,
7795 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03007796};
7797
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007798/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
7799 * had a single IRQ defined per-port.
7800 */
7801static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
7802 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007803{
7804 struct mvpp2_queue_vector *v = &port->qvecs[0];
7805
7806 v->first_rxq = 0;
7807 v->nrxqs = port->nrxqs;
7808 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7809 v->sw_thread_id = 0;
7810 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
7811 v->port = port;
7812 v->irq = irq_of_parse_and_map(port_node, 0);
7813 if (v->irq <= 0)
7814 return -EINVAL;
7815 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7816 NAPI_POLL_WEIGHT);
7817
7818 port->nqvecs = 1;
7819
7820 return 0;
7821}
7822
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007823static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
7824 struct device_node *port_node)
7825{
7826 struct mvpp2_queue_vector *v;
7827 int i, ret;
7828
7829 port->nqvecs = num_possible_cpus();
7830 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
7831 port->nqvecs += 1;
7832
7833 for (i = 0; i < port->nqvecs; i++) {
7834 char irqname[16];
7835
7836 v = port->qvecs + i;
7837
7838 v->port = port;
7839 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
7840 v->sw_thread_id = i;
7841 v->sw_thread_mask = BIT(i);
7842
7843 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
7844
7845 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
7846 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
7847 v->nrxqs = MVPP2_DEFAULT_RXQ;
7848 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
7849 i == (port->nqvecs - 1)) {
7850 v->first_rxq = 0;
7851 v->nrxqs = port->nrxqs;
7852 v->type = MVPP2_QUEUE_VECTOR_SHARED;
7853 strncpy(irqname, "rx-shared", sizeof(irqname));
7854 }
7855
Marcin Wojtasa75edc72018-01-18 13:31:44 +01007856 if (port_node)
7857 v->irq = of_irq_get_byname(port_node, irqname);
7858 else
7859 v->irq = fwnode_irq_get(port->fwnode, i);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007860 if (v->irq <= 0) {
7861 ret = -EINVAL;
7862 goto err;
7863 }
7864
7865 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
7866 NAPI_POLL_WEIGHT);
7867 }
7868
7869 return 0;
7870
7871err:
7872 for (i = 0; i < port->nqvecs; i++)
7873 irq_dispose_mapping(port->qvecs[i].irq);
7874 return ret;
7875}
7876
7877static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
7878 struct device_node *port_node)
7879{
7880 if (port->has_tx_irqs)
7881 return mvpp2_multi_queue_vectors_init(port, port_node);
7882 else
7883 return mvpp2_simple_queue_vectors_init(port, port_node);
7884}
7885
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007886static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
7887{
7888 int i;
7889
7890 for (i = 0; i < port->nqvecs; i++)
7891 irq_dispose_mapping(port->qvecs[i].irq);
7892}
7893
7894/* Configure Rx queue group interrupt for this port */
7895static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
7896{
7897 struct mvpp2 *priv = port->priv;
7898 u32 val;
7899 int i;
7900
7901 if (priv->hw_version == MVPP21) {
7902 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
7903 port->nrxqs);
7904 return;
7905 }
7906
7907 /* Handle the more complicated PPv2.2 case */
7908 for (i = 0; i < port->nqvecs; i++) {
7909 struct mvpp2_queue_vector *qv = port->qvecs + i;
7910
7911 if (!qv->nrxqs)
7912 continue;
7913
7914 val = qv->sw_thread_id;
7915 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
7916 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
7917
7918 val = qv->first_rxq;
7919 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
7920 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
7921 }
7922}
7923
Marcin Wojtas3f518502014-07-10 16:52:13 -03007924/* Initialize port HW */
7925static int mvpp2_port_init(struct mvpp2_port *port)
7926{
7927 struct device *dev = port->dev->dev.parent;
7928 struct mvpp2 *priv = port->priv;
7929 struct mvpp2_txq_pcpu *txq_pcpu;
7930 int queue, cpu, err;
7931
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007932 /* Checks for hardware constraints */
7933 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007934 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007935 return -EINVAL;
7936
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007937 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
7938 (port->ntxqs > MVPP2_MAX_TXQ))
7939 return -EINVAL;
7940
Marcin Wojtas3f518502014-07-10 16:52:13 -03007941 /* Disable port */
7942 mvpp2_egress_disable(port);
7943 mvpp2_port_disable(port);
7944
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007945 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
7946
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007947 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007948 GFP_KERNEL);
7949 if (!port->txqs)
7950 return -ENOMEM;
7951
7952 /* Associate physical Tx queues to this port and initialize.
7953 * The mapping is predefined.
7954 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007955 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007956 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
7957 struct mvpp2_tx_queue *txq;
7958
7959 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01007960 if (!txq) {
7961 err = -ENOMEM;
7962 goto err_free_percpu;
7963 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007964
7965 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
7966 if (!txq->pcpu) {
7967 err = -ENOMEM;
7968 goto err_free_percpu;
7969 }
7970
7971 txq->id = queue_phy_id;
7972 txq->log_id = queue;
7973 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
7974 for_each_present_cpu(cpu) {
7975 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
7976 txq_pcpu->cpu = cpu;
7977 }
7978
7979 port->txqs[queue] = txq;
7980 }
7981
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007982 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007983 GFP_KERNEL);
7984 if (!port->rxqs) {
7985 err = -ENOMEM;
7986 goto err_free_percpu;
7987 }
7988
7989 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007990 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03007991 struct mvpp2_rx_queue *rxq;
7992
7993 /* Map physical Rx queue to port's logical Rx queue */
7994 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08007995 if (!rxq) {
7996 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007997 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08007998 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007999 /* Map this Rx queue to a physical queue */
8000 rxq->id = port->first_rxq + queue;
8001 rxq->port = port->id;
8002 rxq->logic_rxq = queue;
8003
8004 port->rxqs[queue] = rxq;
8005 }
8006
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008007 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008008
8009 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008010 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008011 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
8012
8013 rxq->size = port->rx_ring_size;
8014 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
8015 rxq->time_coal = MVPP2_RX_COAL_USEC;
8016 }
8017
8018 mvpp2_ingress_disable(port);
8019
8020 /* Port default configuration */
8021 mvpp2_defaults_set(port);
8022
8023 /* Port's classifier configuration */
8024 mvpp2_cls_oversize_rxq_set(port);
8025 mvpp2_cls_port_config(port);
8026
8027 /* Provide an initial Rx packet size */
8028 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
8029
8030 /* Initialize pools for swf */
8031 err = mvpp2_swf_bm_pool_init(port);
8032 if (err)
8033 goto err_free_percpu;
8034
8035 return 0;
8036
8037err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008038 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008039 if (!port->txqs[queue])
8040 continue;
8041 free_percpu(port->txqs[queue]->pcpu);
8042 }
8043 return err;
8044}
8045
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008046/* Checks if the port DT description has the TX interrupts
8047 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
8048 * there are available, but we need to keep support for old DTs.
8049 */
8050static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
8051 struct device_node *port_node)
8052{
8053 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
8054 "tx-cpu2", "tx-cpu3" };
8055 int ret, i;
8056
8057 if (priv->hw_version == MVPP21)
8058 return false;
8059
8060 for (i = 0; i < 5; i++) {
8061 ret = of_property_match_string(port_node, "interrupt-names",
8062 irqs[i]);
8063 if (ret < 0)
8064 return false;
8065 }
8066
8067 return true;
8068}
8069
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008070static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
Marcin Wojtas24812222018-01-18 13:31:43 +01008071 struct fwnode_handle *fwnode,
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008072 char **mac_from)
8073{
8074 struct mvpp2_port *port = netdev_priv(dev);
8075 char hw_mac_addr[ETH_ALEN] = {0};
Marcin Wojtas24812222018-01-18 13:31:43 +01008076 char fw_mac_addr[ETH_ALEN];
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008077
Marcin Wojtas24812222018-01-18 13:31:43 +01008078 if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
8079 *mac_from = "firmware node";
8080 ether_addr_copy(dev->dev_addr, fw_mac_addr);
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008081 return;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008082 }
Antoine Tenart688cbaf2017-09-02 11:06:49 +02008083
8084 if (priv->hw_version == MVPP21) {
8085 mvpp21_get_mac_address(port, hw_mac_addr);
8086 if (is_valid_ether_addr(hw_mac_addr)) {
8087 *mac_from = "hardware";
8088 ether_addr_copy(dev->dev_addr, hw_mac_addr);
8089 return;
8090 }
8091 }
8092
8093 *mac_from = "random";
8094 eth_hw_addr_random(dev);
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008095}
8096
Marcin Wojtas3f518502014-07-10 16:52:13 -03008097/* Ports initialization */
8098static int mvpp2_port_probe(struct platform_device *pdev,
Marcin Wojtas24812222018-01-18 13:31:43 +01008099 struct fwnode_handle *port_fwnode,
Marcin Wojtasbf147152018-01-18 13:31:42 +01008100 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008101{
8102 struct device_node *phy_node;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008103 struct phy *comphy = NULL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008104 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008105 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas24812222018-01-18 13:31:43 +01008106 struct device_node *port_node = to_of_node(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008107 struct net_device *dev;
8108 struct resource *res;
Antoine Tenart3ba8c812017-09-02 11:06:47 +02008109 char *mac_from = "";
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008110 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008111 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008112 u32 id;
8113 int features;
8114 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008115 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008116
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008117 if (port_node) {
8118 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
8119 } else {
8120 has_tx_irqs = true;
8121 queue_mode = MVPP2_QDIST_MULTI_MODE;
8122 }
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008123
8124 if (!has_tx_irqs)
8125 queue_mode = MVPP2_QDIST_SINGLE_MODE;
8126
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008127 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008128 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
8129 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
8130 else
8131 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008132
8133 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008134 if (!dev)
8135 return -ENOMEM;
8136
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008137 if (port_node)
8138 phy_node = of_parse_phandle(port_node, "phy", 0);
8139 else
8140 phy_node = NULL;
8141
Marcin Wojtas24812222018-01-18 13:31:43 +01008142 phy_mode = fwnode_get_phy_mode(port_fwnode);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008143 if (phy_mode < 0) {
8144 dev_err(&pdev->dev, "incorrect phy mode\n");
8145 err = phy_mode;
8146 goto err_free_netdev;
8147 }
8148
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008149 if (port_node) {
8150 comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
8151 if (IS_ERR(comphy)) {
8152 if (PTR_ERR(comphy) == -EPROBE_DEFER) {
8153 err = -EPROBE_DEFER;
8154 goto err_free_netdev;
8155 }
8156 comphy = NULL;
Antoine Tenart542897d2017-08-30 10:29:15 +02008157 }
Antoine Tenart542897d2017-08-30 10:29:15 +02008158 }
8159
Marcin Wojtas24812222018-01-18 13:31:43 +01008160 if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008161 err = -EINVAL;
8162 dev_err(&pdev->dev, "missing port-id value\n");
8163 goto err_free_netdev;
8164 }
8165
Yan Markman7cf87e42017-12-11 09:13:26 +01008166 dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008167 dev->watchdog_timeo = 5 * HZ;
8168 dev->netdev_ops = &mvpp2_netdev_ops;
8169 dev->ethtool_ops = &mvpp2_eth_tool_ops;
8170
8171 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008172 port->dev = dev;
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008173 port->fwnode = port_fwnode;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008174 port->ntxqs = ntxqs;
8175 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008176 port->priv = priv;
8177 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008178
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008179 err = mvpp2_queue_vectors_init(port, port_node);
8180 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008181 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008182
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008183 if (port_node)
8184 port->link_irq = of_irq_get_byname(port_node, "link");
8185 else
8186 port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008187 if (port->link_irq == -EPROBE_DEFER) {
8188 err = -EPROBE_DEFER;
8189 goto err_deinit_qvecs;
8190 }
8191 if (port->link_irq <= 0)
8192 /* the link irq is optional */
8193 port->link_irq = 0;
8194
Marcin Wojtas24812222018-01-18 13:31:43 +01008195 if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
Marcin Wojtas3f518502014-07-10 16:52:13 -03008196 port->flags |= MVPP2_F_LOOPBACK;
8197
Marcin Wojtas3f518502014-07-10 16:52:13 -03008198 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008199 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008200 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008201 else
8202 port->first_rxq = port->id * priv->max_port_rxqs;
8203
Marcin Wojtas3f518502014-07-10 16:52:13 -03008204 port->phy_node = phy_node;
8205 port->phy_interface = phy_mode;
Antoine Tenart542897d2017-08-30 10:29:15 +02008206 port->comphy = comphy;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008207
Thomas Petazzonia7868412017-03-07 16:53:13 +01008208 if (priv->hw_version == MVPP21) {
8209 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
8210 port->base = devm_ioremap_resource(&pdev->dev, res);
8211 if (IS_ERR(port->base)) {
8212 err = PTR_ERR(port->base);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008213 goto err_free_irq;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008214 }
Miquel Raynal118d6292017-11-06 22:56:53 +01008215
8216 port->stats_base = port->priv->lms_base +
8217 MVPP21_MIB_COUNTERS_OFFSET +
8218 port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008219 } else {
Marcin Wojtas24812222018-01-18 13:31:43 +01008220 if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
8221 &port->gop_id)) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008222 err = -EINVAL;
8223 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008224 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008225 }
8226
8227 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Miquel Raynal118d6292017-11-06 22:56:53 +01008228 port->stats_base = port->priv->iface_base +
8229 MVPP22_MIB_COUNTERS_OFFSET +
8230 port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008231 }
8232
Miquel Raynal118d6292017-11-06 22:56:53 +01008233 /* Alloc per-cpu and ethtool stats */
Marcin Wojtas3f518502014-07-10 16:52:13 -03008234 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
8235 if (!port->stats) {
8236 err = -ENOMEM;
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008237 goto err_free_irq;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008238 }
8239
Miquel Raynal118d6292017-11-06 22:56:53 +01008240 port->ethtool_stats = devm_kcalloc(&pdev->dev,
8241 ARRAY_SIZE(mvpp2_ethtool_regs),
8242 sizeof(u64), GFP_KERNEL);
8243 if (!port->ethtool_stats) {
8244 err = -ENOMEM;
8245 goto err_free_stats;
8246 }
8247
Miquel Raynale5c500e2017-11-08 08:59:40 +01008248 mutex_init(&port->gather_stats_lock);
8249 INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
8250
Marcin Wojtas24812222018-01-18 13:31:43 +01008251 mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008252
Yan Markman7cf87e42017-12-11 09:13:26 +01008253 port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
8254 port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008255 SET_NETDEV_DEV(dev, &pdev->dev);
8256
8257 err = mvpp2_port_init(port);
8258 if (err < 0) {
8259 dev_err(&pdev->dev, "failed to init port %d\n", id);
8260 goto err_free_stats;
8261 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01008262
Thomas Petazzoni26975822017-03-07 16:53:14 +01008263 mvpp2_port_periodic_xon_disable(port);
8264
8265 if (priv->hw_version == MVPP21)
8266 mvpp2_port_fc_adv_enable(port);
8267
8268 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008269
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008270 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
8271 if (!port->pcpu) {
8272 err = -ENOMEM;
8273 goto err_free_txq_pcpu;
8274 }
8275
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008276 if (!port->has_tx_irqs) {
8277 for_each_present_cpu(cpu) {
8278 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008279
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008280 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
8281 HRTIMER_MODE_REL_PINNED);
8282 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
8283 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008284
Thomas Petazzoni213f4282017-08-03 10:42:00 +02008285 tasklet_init(&port_pcpu->tx_done_tasklet,
8286 mvpp2_tx_proc_cb,
8287 (unsigned long)dev);
8288 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008289 }
8290
Antoine Ténart186cd4d2017-08-23 09:46:56 +02008291 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008292 dev->features = features | NETIF_F_RXCSUM;
Maxime Chevallier56beda32018-02-28 10:14:13 +01008293 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
8294 NETIF_F_HW_VLAN_CTAG_FILTER;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008295 dev->vlan_features |= features;
Antoine Tenart1d17db02017-10-30 11:23:31 +01008296 dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008297
Jarod Wilson57779872016-10-17 15:54:06 -04008298 /* MTU range: 68 - 9676 */
8299 dev->min_mtu = ETH_MIN_MTU;
8300 /* 9676 == 9700 - 20 and rounding to 8 */
8301 dev->max_mtu = 9676;
8302
Marcin Wojtas3f518502014-07-10 16:52:13 -03008303 err = register_netdev(dev);
8304 if (err < 0) {
8305 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008306 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008307 }
8308 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
8309
Marcin Wojtasbf147152018-01-18 13:31:42 +01008310 priv->port_list[priv->port_count++] = port;
8311
Marcin Wojtas3f518502014-07-10 16:52:13 -03008312 return 0;
8313
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008314err_free_port_pcpu:
8315 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008316err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008317 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008318 free_percpu(port->txqs[i]->pcpu);
8319err_free_stats:
8320 free_percpu(port->stats);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008321err_free_irq:
8322 if (port->link_irq)
8323 irq_dispose_mapping(port->link_irq);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008324err_deinit_qvecs:
8325 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008326err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08008327 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008328 free_netdev(dev);
8329 return err;
8330}
8331
8332/* Ports removal routine */
8333static void mvpp2_port_remove(struct mvpp2_port *port)
8334{
8335 int i;
8336
8337 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08008338 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02008339 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008340 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02008341 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03008342 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02008343 mvpp2_queue_vectors_deinit(port);
Antoine Tenartfd3651b2017-09-01 11:04:54 +02008344 if (port->link_irq)
8345 irq_dispose_mapping(port->link_irq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008346 free_netdev(port->dev);
8347}
8348
8349/* Initialize decoding windows */
8350static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
8351 struct mvpp2 *priv)
8352{
8353 u32 win_enable;
8354 int i;
8355
8356 for (i = 0; i < 6; i++) {
8357 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
8358 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
8359
8360 if (i < 4)
8361 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
8362 }
8363
8364 win_enable = 0;
8365
8366 for (i = 0; i < dram->num_cs; i++) {
8367 const struct mbus_dram_window *cs = dram->cs + i;
8368
8369 mvpp2_write(priv, MVPP2_WIN_BASE(i),
8370 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
8371 dram->mbus_dram_target_id);
8372
8373 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
8374 (cs->size - 1) & 0xffff0000);
8375
8376 win_enable |= (1 << i);
8377 }
8378
8379 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
8380}
8381
8382/* Initialize Rx FIFO's */
8383static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
8384{
8385 int port;
8386
8387 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
8388 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008389 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008390 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008391 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
8392 }
8393
8394 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8395 MVPP2_RX_FIFO_PORT_MIN_PKT);
8396 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8397}
8398
8399static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
8400{
8401 int port;
8402
8403 /* The FIFO size parameters are set depending on the maximum speed a
8404 * given port can handle:
8405 * - Port 0: 10Gbps
8406 * - Port 1: 2.5Gbps
8407 * - Ports 2 and 3: 1Gbps
8408 */
8409
8410 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
8411 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
8412 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
8413 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
8414
8415 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
8416 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
8417 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
8418 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
8419
8420 for (port = 2; port < MVPP2_MAX_PORTS; port++) {
8421 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
8422 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
8423 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
8424 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008425 }
8426
8427 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
8428 MVPP2_RX_FIFO_PORT_MIN_PKT);
8429 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
8430}
8431
Antoine Tenart7c10f972017-10-30 11:23:29 +01008432/* Initialize Tx FIFO's */
8433static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
8434{
8435 int port;
8436
8437 for (port = 0; port < MVPP2_MAX_PORTS; port++)
8438 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port),
8439 MVPP22_TX_FIFO_DATA_SIZE_3KB);
8440}
8441
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008442static void mvpp2_axi_init(struct mvpp2 *priv)
8443{
8444 u32 val, rdval, wrval;
8445
8446 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
8447
8448 /* AXI Bridge Configuration */
8449
8450 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
8451 << MVPP22_AXI_ATTR_CACHE_OFFS;
8452 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8453 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8454
8455 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
8456 << MVPP22_AXI_ATTR_CACHE_OFFS;
8457 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8458 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
8459
8460 /* BM */
8461 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
8462 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
8463
8464 /* Descriptors */
8465 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
8466 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
8467 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
8468 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
8469
8470 /* Buffer Data */
8471 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
8472 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
8473
8474 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
8475 << MVPP22_AXI_CODE_CACHE_OFFS;
8476 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
8477 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8478 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
8479 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
8480
8481 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
8482 << MVPP22_AXI_CODE_CACHE_OFFS;
8483 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8484 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8485
8486 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
8487
8488 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
8489 << MVPP22_AXI_CODE_CACHE_OFFS;
8490 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
8491 << MVPP22_AXI_CODE_DOMAIN_OFFS;
8492
8493 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
8494}
8495
Marcin Wojtas3f518502014-07-10 16:52:13 -03008496/* Initialize network controller common part HW */
8497static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
8498{
8499 const struct mbus_dram_target_info *dram_target_info;
8500 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03008501 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008502
Marcin Wojtas3f518502014-07-10 16:52:13 -03008503 /* MBUS windows configuration */
8504 dram_target_info = mv_mbus_dram_info();
8505 if (dram_target_info)
8506 mvpp2_conf_mbus_windows(dram_target_info, priv);
8507
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01008508 if (priv->hw_version == MVPP22)
8509 mvpp2_axi_init(priv);
8510
Marcin Wojtas08a23752014-07-21 13:48:12 -03008511 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01008512 if (priv->hw_version == MVPP21) {
8513 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8514 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
8515 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
8516 } else {
8517 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8518 val &= ~MVPP22_SMI_POLLING_EN;
8519 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
8520 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03008521
Marcin Wojtas3f518502014-07-10 16:52:13 -03008522 /* Allocate and initialize aggregated TXQs */
8523 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02008524 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008525 GFP_KERNEL);
8526 if (!priv->aggr_txqs)
8527 return -ENOMEM;
8528
8529 for_each_present_cpu(i) {
8530 priv->aggr_txqs[i].id = i;
8531 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
Antoine Ténart85affd72017-08-23 09:46:55 +02008532 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008533 if (err < 0)
8534 return err;
8535 }
8536
Antoine Tenart7c10f972017-10-30 11:23:29 +01008537 /* Fifo Init */
8538 if (priv->hw_version == MVPP21) {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008539 mvpp2_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008540 } else {
Antoine Tenart2d1d7df2017-10-30 11:23:28 +01008541 mvpp22_rx_fifo_init(priv);
Antoine Tenart7c10f972017-10-30 11:23:29 +01008542 mvpp22_tx_fifo_init(priv);
8543 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008544
Thomas Petazzoni26975822017-03-07 16:53:14 +01008545 if (priv->hw_version == MVPP21)
8546 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
8547 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008548
8549 /* Allow cache snoop when transmiting packets */
8550 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
8551
8552 /* Buffer Manager initialization */
8553 err = mvpp2_bm_init(pdev, priv);
8554 if (err < 0)
8555 return err;
8556
8557 /* Parser default initialization */
8558 err = mvpp2_prs_default_init(pdev, priv);
8559 if (err < 0)
8560 return err;
8561
8562 /* Classifier default initialization */
8563 mvpp2_cls_init(priv);
8564
8565 return 0;
8566}
8567
8568static int mvpp2_probe(struct platform_device *pdev)
8569{
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008570 const struct acpi_device_id *acpi_id;
Marcin Wojtas24812222018-01-18 13:31:43 +01008571 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8572 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008573 struct mvpp2 *priv;
8574 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008575 void __iomem *base;
Miquel Raynal118d6292017-11-06 22:56:53 +01008576 int i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008577 int err;
8578
Markus Elfring0b92e592017-04-17 08:38:32 +02008579 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008580 if (!priv)
8581 return -ENOMEM;
8582
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008583 if (has_acpi_companion(&pdev->dev)) {
8584 acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
8585 &pdev->dev);
8586 priv->hw_version = (unsigned long)acpi_id->driver_data;
8587 } else {
8588 priv->hw_version =
8589 (unsigned long)of_device_get_match_data(&pdev->dev);
8590 }
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008591
Marcin Wojtas3f518502014-07-10 16:52:13 -03008592 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01008593 base = devm_ioremap_resource(&pdev->dev, res);
8594 if (IS_ERR(base))
8595 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008596
Thomas Petazzonia7868412017-03-07 16:53:13 +01008597 if (priv->hw_version == MVPP21) {
8598 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
8599 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
8600 if (IS_ERR(priv->lms_base))
8601 return PTR_ERR(priv->lms_base);
8602 } else {
8603 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008604 if (has_acpi_companion(&pdev->dev)) {
8605 /* In case the MDIO memory region is declared in
8606 * the ACPI, it can already appear as 'in-use'
8607 * in the OS. Because it is overlapped by second
8608 * region of the network controller, make
8609 * sure it is released, before requesting it again.
8610 * The care is taken by mvpp2 driver to avoid
8611 * concurrent access to this memory region.
8612 */
8613 release_resource(res);
8614 }
Thomas Petazzonia7868412017-03-07 16:53:13 +01008615 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
8616 if (IS_ERR(priv->iface_base))
8617 return PTR_ERR(priv->iface_base);
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008618 }
Antoine Ténartf84bf382017-08-22 19:08:27 +02008619
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008620 if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
Antoine Ténartf84bf382017-08-22 19:08:27 +02008621 priv->sysctrl_base =
8622 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
8623 "marvell,system-controller");
8624 if (IS_ERR(priv->sysctrl_base))
8625 /* The system controller regmap is optional for dt
8626 * compatibility reasons. When not provided, the
8627 * configuration of the GoP relies on the
8628 * firmware/bootloader.
8629 */
8630 priv->sysctrl_base = NULL;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008631 }
8632
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008633 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01008634 u32 addr_space_sz;
8635
8636 addr_space_sz = (priv->hw_version == MVPP21 ?
8637 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02008638 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01008639 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008640
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01008641 if (priv->hw_version == MVPP21)
8642 priv->max_port_rxqs = 8;
8643 else
8644 priv->max_port_rxqs = 32;
8645
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008646 if (dev_of_node(&pdev->dev)) {
8647 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
8648 if (IS_ERR(priv->pp_clk))
8649 return PTR_ERR(priv->pp_clk);
8650 err = clk_prepare_enable(priv->pp_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008651 if (err < 0)
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008652 return err;
8653
8654 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
8655 if (IS_ERR(priv->gop_clk)) {
8656 err = PTR_ERR(priv->gop_clk);
8657 goto err_pp_clk;
8658 }
8659 err = clk_prepare_enable(priv->gop_clk);
8660 if (err < 0)
8661 goto err_pp_clk;
8662
8663 if (priv->hw_version == MVPP22) {
8664 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
8665 if (IS_ERR(priv->mg_clk)) {
8666 err = PTR_ERR(priv->mg_clk);
8667 goto err_gop_clk;
8668 }
8669
8670 err = clk_prepare_enable(priv->mg_clk);
8671 if (err < 0)
8672 goto err_gop_clk;
8673 }
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008674
8675 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
8676 if (IS_ERR(priv->axi_clk)) {
8677 err = PTR_ERR(priv->axi_clk);
8678 if (err == -EPROBE_DEFER)
8679 goto err_gop_clk;
8680 priv->axi_clk = NULL;
8681 } else {
8682 err = clk_prepare_enable(priv->axi_clk);
8683 if (err < 0)
8684 goto err_gop_clk;
8685 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008686
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008687 /* Get system's tclk rate */
8688 priv->tclk = clk_get_rate(priv->pp_clk);
8689 } else if (device_property_read_u32(&pdev->dev, "clock-frequency",
8690 &priv->tclk)) {
8691 dev_err(&pdev->dev, "missing clock-frequency value\n");
8692 return -EINVAL;
8693 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008694
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01008695 if (priv->hw_version == MVPP22) {
8696 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
8697 if (err)
8698 goto err_mg_clk;
8699 /* Sadly, the BM pools all share the same register to
8700 * store the high 32 bits of their address. So they
8701 * must all have the same high 32 bits, which forces
8702 * us to restrict coherent memory to DMA_BIT_MASK(32).
8703 */
8704 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
8705 if (err)
8706 goto err_mg_clk;
8707 }
8708
Marcin Wojtas3f518502014-07-10 16:52:13 -03008709 /* Initialize network controller */
8710 err = mvpp2_init(pdev, priv);
8711 if (err < 0) {
8712 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008713 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008714 }
8715
Marcin Wojtasbf147152018-01-18 13:31:42 +01008716 /* Initialize ports */
Marcin Wojtas24812222018-01-18 13:31:43 +01008717 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
8718 err = mvpp2_port_probe(pdev, port_fwnode, priv);
Marcin Wojtasbf147152018-01-18 13:31:42 +01008719 if (err < 0)
8720 goto err_port_probe;
8721 }
8722
Miquel Raynal118d6292017-11-06 22:56:53 +01008723 if (priv->port_count == 0) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03008724 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08008725 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008726 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008727 }
8728
Miquel Raynal118d6292017-11-06 22:56:53 +01008729 /* Statistics must be gathered regularly because some of them (like
8730 * packets counters) are 32-bit registers and could overflow quite
8731 * quickly. For instance, a 10Gb link used at full bandwidth with the
8732 * smallest packets (64B) will overflow a 32-bit counter in less than
8733 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
8734 */
Miquel Raynal118d6292017-11-06 22:56:53 +01008735 snprintf(priv->queue_name, sizeof(priv->queue_name),
8736 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
8737 priv->port_count > 1 ? "+" : "");
8738 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8739 if (!priv->stats_queue) {
8740 err = -ENOMEM;
Antoine Tenart26146b02017-11-28 14:19:49 +01008741 goto err_port_probe;
Miquel Raynal118d6292017-11-06 22:56:53 +01008742 }
8743
Marcin Wojtas3f518502014-07-10 16:52:13 -03008744 platform_set_drvdata(pdev, priv);
8745 return 0;
8746
Antoine Tenart26146b02017-11-28 14:19:49 +01008747err_port_probe:
8748 i = 0;
Marcin Wojtas24812222018-01-18 13:31:43 +01008749 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Antoine Tenart26146b02017-11-28 14:19:49 +01008750 if (priv->port_list[i])
8751 mvpp2_port_remove(priv->port_list[i]);
8752 i++;
8753 }
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008754err_mg_clk:
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008755 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008756 if (priv->hw_version == MVPP22)
8757 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008758err_gop_clk:
8759 clk_disable_unprepare(priv->gop_clk);
8760err_pp_clk:
8761 clk_disable_unprepare(priv->pp_clk);
8762 return err;
8763}
8764
8765static int mvpp2_remove(struct platform_device *pdev)
8766{
8767 struct mvpp2 *priv = platform_get_drvdata(pdev);
Marcin Wojtas24812222018-01-18 13:31:43 +01008768 struct fwnode_handle *fwnode = pdev->dev.fwnode;
8769 struct fwnode_handle *port_fwnode;
Marcin Wojtas3f518502014-07-10 16:52:13 -03008770 int i = 0;
8771
Miquel Raynale5c500e2017-11-08 08:59:40 +01008772 flush_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008773 destroy_workqueue(priv->stats_queue);
Miquel Raynal118d6292017-11-06 22:56:53 +01008774
Marcin Wojtas24812222018-01-18 13:31:43 +01008775 fwnode_for_each_available_child_node(fwnode, port_fwnode) {
Miquel Raynale5c500e2017-11-08 08:59:40 +01008776 if (priv->port_list[i]) {
8777 mutex_destroy(&priv->port_list[i]->gather_stats_lock);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008778 mvpp2_port_remove(priv->port_list[i]);
Miquel Raynale5c500e2017-11-08 08:59:40 +01008779 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03008780 i++;
8781 }
8782
8783 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
8784 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
8785
8786 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
8787 }
8788
8789 for_each_present_cpu(i) {
8790 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
8791
8792 dma_free_coherent(&pdev->dev,
8793 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
8794 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01008795 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008796 }
8797
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008798 if (is_acpi_node(port_fwnode))
8799 return 0;
8800
Gregory CLEMENT4792ea02017-09-29 14:27:39 +02008801 clk_disable_unprepare(priv->axi_clk);
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01008802 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03008803 clk_disable_unprepare(priv->pp_clk);
8804 clk_disable_unprepare(priv->gop_clk);
8805
8806 return 0;
8807}
8808
8809static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01008810 {
8811 .compatible = "marvell,armada-375-pp2",
8812 .data = (void *)MVPP21,
8813 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01008814 {
8815 .compatible = "marvell,armada-7k-pp22",
8816 .data = (void *)MVPP22,
8817 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03008818 { }
8819};
8820MODULE_DEVICE_TABLE(of, mvpp2_match);
8821
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008822static const struct acpi_device_id mvpp2_acpi_match[] = {
8823 { "MRVL0110", MVPP22 },
8824 { },
8825};
8826MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
8827
Marcin Wojtas3f518502014-07-10 16:52:13 -03008828static struct platform_driver mvpp2_driver = {
8829 .probe = mvpp2_probe,
8830 .remove = mvpp2_remove,
8831 .driver = {
8832 .name = MVPP2_DRIVER_NAME,
8833 .of_match_table = mvpp2_match,
Marcin Wojtasa75edc72018-01-18 13:31:44 +01008834 .acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
Marcin Wojtas3f518502014-07-10 16:52:13 -03008835 },
8836};
8837
8838module_platform_driver(mvpp2_driver);
8839
8840MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
8841MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03008842MODULE_LICENSE("GPL v2");