blob: 2ab7366ac90f1cb01ae6417af02b54b4a96f9fb0 [file] [log] [blame]
Marcin Wojtas3f518502014-07-10 16:52:13 -03001/*
2 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/skbuff.h>
18#include <linux/inetdevice.h>
19#include <linux/mbus.h>
20#include <linux/module.h>
21#include <linux/interrupt.h>
22#include <linux/cpumask.h>
23#include <linux/of.h>
24#include <linux/of_irq.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/of_address.h>
Thomas Petazzonifaca9242017-03-07 16:53:06 +010028#include <linux/of_device.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030029#include <linux/phy.h>
30#include <linux/clk.h>
Marcin Wojtasedc660f2015-08-06 19:00:30 +020031#include <linux/hrtimer.h>
32#include <linux/ktime.h>
Marcin Wojtas3f518502014-07-10 16:52:13 -030033#include <uapi/linux/ppp_defs.h>
34#include <net/ip.h>
35#include <net/ipv6.h>
36
37/* RX Fifo Registers */
38#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
39#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
40#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
41#define MVPP2_RX_FIFO_INIT_REG 0x64
42
43/* RX DMA Top Registers */
44#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port))
45#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16)
46#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31)
47#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool))
48#define MVPP2_POOL_BUF_SIZE_OFFSET 5
49#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq))
50#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff
51#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9)
52#define MVPP2_RXQ_POOL_SHORT_OFFS 20
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010053#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000
54#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000
Marcin Wojtas3f518502014-07-10 16:52:13 -030055#define MVPP2_RXQ_POOL_LONG_OFFS 24
Thomas Petazzoni5eac8922017-03-07 16:53:10 +010056#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000
57#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000
Marcin Wojtas3f518502014-07-10 16:52:13 -030058#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28
59#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
60#define MVPP2_RXQ_DISABLE_MASK BIT(31)
61
62/* Parser Registers */
63#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
64#define MVPP2_PRS_PORT_LU_MAX 0xf
65#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4))
66#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4))
67#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4))
68#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8))
69#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8))
70#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4))
71#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8))
72#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8))
73#define MVPP2_PRS_TCAM_IDX_REG 0x1100
74#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4)
75#define MVPP2_PRS_TCAM_INV_MASK BIT(31)
76#define MVPP2_PRS_SRAM_IDX_REG 0x1200
77#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4)
78#define MVPP2_PRS_TCAM_CTRL_REG 0x1230
79#define MVPP2_PRS_TCAM_EN_MASK BIT(0)
80
81/* Classifier Registers */
82#define MVPP2_CLS_MODE_REG 0x1800
83#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0)
84#define MVPP2_CLS_PORT_WAY_REG 0x1810
85#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port))
86#define MVPP2_CLS_LKP_INDEX_REG 0x1814
87#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6
88#define MVPP2_CLS_LKP_TBL_REG 0x1818
89#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff
90#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25)
91#define MVPP2_CLS_FLOW_INDEX_REG 0x1820
92#define MVPP2_CLS_FLOW_TBL0_REG 0x1824
93#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
94#define MVPP2_CLS_FLOW_TBL2_REG 0x182c
95#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4))
96#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3
97#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7
98#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4))
99#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0
100#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port))
101
102/* Descriptor Manager Top Registers */
103#define MVPP2_RXQ_NUM_REG 0x2040
104#define MVPP2_RXQ_DESC_ADDR_REG 0x2044
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100105#define MVPP22_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300106#define MVPP2_RXQ_DESC_SIZE_REG 0x2048
107#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0
108#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq))
109#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0
110#define MVPP2_RXQ_NUM_NEW_OFFSET 16
111#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq))
112#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff
113#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16
114#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000
115#define MVPP2_RXQ_THRESH_REG 0x204c
116#define MVPP2_OCCUPIED_THRESH_OFFSET 0
117#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff
118#define MVPP2_RXQ_INDEX_REG 0x2050
119#define MVPP2_TXQ_NUM_REG 0x2080
120#define MVPP2_TXQ_DESC_ADDR_REG 0x2084
121#define MVPP2_TXQ_DESC_SIZE_REG 0x2088
122#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200123#define MVPP2_TXQ_THRESH_REG 0x2094
124#define MVPP2_TXQ_THRESH_OFFSET 16
125#define MVPP2_TXQ_THRESH_MASK 0x3fff
Marcin Wojtas3f518502014-07-10 16:52:13 -0300126#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090
Marcin Wojtas3f518502014-07-10 16:52:13 -0300127#define MVPP2_TXQ_INDEX_REG 0x2098
128#define MVPP2_TXQ_PREF_BUF_REG 0x209c
129#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff)
130#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13))
131#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14))
132#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17)
133#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31)
134#define MVPP2_TXQ_PENDING_REG 0x20a0
135#define MVPP2_TXQ_PENDING_MASK 0x3fff
136#define MVPP2_TXQ_INT_STATUS_REG 0x20a4
137#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq))
138#define MVPP2_TRANSMITTED_COUNT_OFFSET 16
139#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000
140#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0
141#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16
142#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4
143#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff
144#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8
145#define MVPP2_TXQ_RSVD_CLR_OFFSET 16
146#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu))
Thomas Petazzonib02f31f2017-03-07 16:53:12 +0100147#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300148#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu))
149#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0
150#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu))
151#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff
152#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu))
153
154/* MBUS bridge registers */
155#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2))
156#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2))
157#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2))
158#define MVPP2_BASE_ADDR_ENABLE 0x4060
159
Thomas Petazzoni6763ce32017-03-07 16:53:15 +0100160/* AXI Bridge Registers */
161#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100
162#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104
163#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110
164#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114
165#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118
166#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c
167#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120
168#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130
169#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150
170#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154
171#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160
172#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164
173
174/* Values for AXI Bridge registers */
175#define MVPP22_AXI_ATTR_CACHE_OFFS 0
176#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12
177
178#define MVPP22_AXI_CODE_CACHE_OFFS 0
179#define MVPP22_AXI_CODE_DOMAIN_OFFS 4
180
181#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3
182#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7
183#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb
184
185#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2
186#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3
187
Marcin Wojtas3f518502014-07-10 16:52:13 -0300188/* Interrupt Cause and Mask registers */
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200189#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port))
190#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0
191
Marcin Wojtas3f518502014-07-10 16:52:13 -0300192#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq))
Thomas Petazzoniab426762017-02-21 11:28:04 +0100193#define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0
Thomas Petazzonieb1e93a2017-08-03 10:41:55 +0200194#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port))
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100195
Antoine Ténart81b66302017-08-22 19:08:21 +0200196#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100197#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200198#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
199#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100200
201#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
Antoine Ténart81b66302017-08-22 19:08:21 +0200202#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100203
Antoine Ténart81b66302017-08-22 19:08:21 +0200204#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404
205#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f
206#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00
207#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8
Thomas Petazzonia73fef12017-03-07 16:53:16 +0100208
Marcin Wojtas3f518502014-07-10 16:52:13 -0300209#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port))
210#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff)
211#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000)
212#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port))
213#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
214#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200215#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16
Marcin Wojtas3f518502014-07-10 16:52:13 -0300216#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24)
217#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25)
218#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26)
219#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29)
220#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30)
221#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31)
222#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port))
223#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc
224#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff
225#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
226#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
227#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
228
229/* Buffer Manager registers */
230#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
231#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80
232#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4))
233#define MVPP2_BM_POOL_SIZE_MASK 0xfff0
234#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4))
235#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0
236#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4))
237#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0
238#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
239#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
240#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
241#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
242#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
243#define MVPP2_BM_START_MASK BIT(0)
244#define MVPP2_BM_STOP_MASK BIT(1)
245#define MVPP2_BM_STATE_MASK BIT(4)
246#define MVPP2_BM_LOW_THRESH_OFFS 8
247#define MVPP2_BM_LOW_THRESH_MASK 0x7f00
248#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \
249 MVPP2_BM_LOW_THRESH_OFFS)
250#define MVPP2_BM_HIGH_THRESH_OFFS 16
251#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
252#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
253 MVPP2_BM_HIGH_THRESH_OFFS)
254#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
255#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
256#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
257#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2)
258#define MVPP2_BM_BPPE_FULL_MASK BIT(3)
259#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4)
260#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4))
261#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4))
262#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0)
263#define MVPP2_BM_VIRT_ALLOC_REG 0x6440
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100264#define MVPP22_BM_ADDR_HIGH_ALLOC 0x6444
265#define MVPP22_BM_ADDR_HIGH_PHYS_MASK 0xff
266#define MVPP22_BM_ADDR_HIGH_VIRT_MASK 0xff00
267#define MVPP22_BM_ADDR_HIGH_VIRT_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300268#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4))
269#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0)
270#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1)
271#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2)
272#define MVPP2_BM_VIRT_RLS_REG 0x64c0
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100273#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4
274#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff
Antoine Ténart81b66302017-08-22 19:08:21 +0200275#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00
Thomas Petazzonid01524d2017-03-07 16:53:09 +0100276#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8
Marcin Wojtas3f518502014-07-10 16:52:13 -0300277
278/* TX Scheduler registers */
279#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000
280#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004
281#define MVPP2_TXP_SCHED_ENQ_MASK 0xff
282#define MVPP2_TXP_SCHED_DISQ_OFFSET 8
283#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010
284#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018
285#define MVPP2_TXP_SCHED_MTU_REG 0x801c
286#define MVPP2_TXP_MTU_MAX 0x7FFFF
287#define MVPP2_TXP_SCHED_REFILL_REG 0x8020
288#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff
289#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000
290#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20)
291#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024
292#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff
293#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2))
294#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff
295#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000
296#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20)
297#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2))
298#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff
299#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2))
300#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff
301
302/* TX general registers */
303#define MVPP2_TX_SNOOP_REG 0x8800
304#define MVPP2_TX_PORT_FLUSH_REG 0x8810
305#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port))
306
307/* LMS registers */
308#define MVPP2_SRC_ADDR_MIDDLE 0x24
309#define MVPP2_SRC_ADDR_HIGH 0x28
Marcin Wojtas08a23752014-07-21 13:48:12 -0300310#define MVPP2_PHY_AN_CFG0_REG 0x34
311#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300312#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c
Thomas Petazzoni31d76772017-02-21 11:28:10 +0100313#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27
Marcin Wojtas3f518502014-07-10 16:52:13 -0300314
315/* Per-port registers */
316#define MVPP2_GMAC_CTRL_0_REG 0x0
Antoine Ténart81b66302017-08-22 19:08:21 +0200317#define MVPP2_GMAC_PORT_EN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200318#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200319#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2
320#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc
321#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300322#define MVPP2_GMAC_CTRL_1_REG 0x4
Antoine Ténart81b66302017-08-22 19:08:21 +0200323#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1)
324#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5)
325#define MVPP2_GMAC_PCS_LB_EN_BIT 6
326#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6)
327#define MVPP2_GMAC_SA_LOW_OFFS 7
Marcin Wojtas3f518502014-07-10 16:52:13 -0300328#define MVPP2_GMAC_CTRL_2_REG 0x8
Antoine Ténart81b66302017-08-22 19:08:21 +0200329#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
Antoine Ténart39193572017-08-22 19:08:24 +0200330#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
Antoine Ténart81b66302017-08-22 19:08:21 +0200331#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
332#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4)
Antoine Ténart39193572017-08-22 19:08:24 +0200333#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
Antoine Ténart81b66302017-08-22 19:08:21 +0200334#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300335#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
Antoine Ténart81b66302017-08-22 19:08:21 +0200336#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0)
337#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1)
Antoine Ténart39193572017-08-22 19:08:24 +0200338#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2)
339#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3)
Antoine Ténart81b66302017-08-22 19:08:21 +0200340#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5)
341#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6)
342#define MVPP2_GMAC_AN_SPEED_EN BIT(7)
343#define MVPP2_GMAC_FC_ADV_EN BIT(9)
Antoine Ténart39193572017-08-22 19:08:24 +0200344#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11)
Antoine Ténart81b66302017-08-22 19:08:21 +0200345#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12)
346#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300347#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c
Antoine Ténart81b66302017-08-22 19:08:21 +0200348#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6
349#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0
350#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
Marcin Wojtas3f518502014-07-10 16:52:13 -0300351 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100352#define MVPP22_GMAC_CTRL_4_REG 0x90
Antoine Ténart81b66302017-08-22 19:08:21 +0200353#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0)
354#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
Antoine Ténart1068ec72017-08-22 19:08:22 +0200355#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
Antoine Ténart81b66302017-08-22 19:08:21 +0200356#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100357
358/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
359 * relative to port->base.
360 */
Antoine Ténart725757a2017-06-12 16:01:39 +0200361#define MVPP22_XLG_CTRL0_REG 0x100
Antoine Ténart81b66302017-08-22 19:08:21 +0200362#define MVPP22_XLG_CTRL0_PORT_EN BIT(0)
363#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1)
364#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14)
Antoine Ténart725757a2017-06-12 16:01:39 +0200365
Thomas Petazzoni26975822017-03-07 16:53:14 +0100366#define MVPP22_XLG_CTRL3_REG 0x11c
Antoine Ténart81b66302017-08-22 19:08:21 +0200367#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
368#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
369#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
Thomas Petazzoni26975822017-03-07 16:53:14 +0100370
371/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
372#define MVPP22_SMI_MISC_CFG_REG 0x1204
Antoine Ténart81b66302017-08-22 19:08:21 +0200373#define MVPP22_SMI_POLLING_EN BIT(10)
Marcin Wojtas3f518502014-07-10 16:52:13 -0300374
Thomas Petazzonia7868412017-03-07 16:53:13 +0100375#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
376
Marcin Wojtas3f518502014-07-10 16:52:13 -0300377#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
378
379/* Descriptor ring Macros */
380#define MVPP2_QUEUE_NEXT_DESC(q, index) \
381 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
382
383/* Various constants */
384
385/* Coalescing */
386#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200387#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200388#define MVPP2_TXDONE_COAL_USEC 1000
Marcin Wojtas3f518502014-07-10 16:52:13 -0300389#define MVPP2_RX_COAL_PKTS 32
390#define MVPP2_RX_COAL_USEC 100
391
392/* The two bytes Marvell header. Either contains a special value used
393 * by Marvell switches when a specific hardware mode is enabled (not
394 * supported by this driver) or is filled automatically by zeroes on
395 * the RX side. Those two bytes being at the front of the Ethernet
396 * header, they allow to have the IP header aligned on a 4 bytes
397 * boundary automatically: the hardware skips those two bytes on its
398 * own.
399 */
400#define MVPP2_MH_SIZE 2
401#define MVPP2_ETH_TYPE_LEN 2
402#define MVPP2_PPPOE_HDR_SIZE 8
403#define MVPP2_VLAN_TAG_LEN 4
404
405/* Lbtd 802.3 type */
406#define MVPP2_IP_LBDT_TYPE 0xfffa
407
Marcin Wojtas3f518502014-07-10 16:52:13 -0300408#define MVPP2_TX_CSUM_MAX_SIZE 9800
409
410/* Timeout constants */
411#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000
412#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000
413
414#define MVPP2_TX_MTU_MAX 0x7ffff
415
416/* Maximum number of T-CONTs of PON port */
417#define MVPP2_MAX_TCONT 16
418
419/* Maximum number of supported ports */
420#define MVPP2_MAX_PORTS 4
421
422/* Maximum number of TXQs used by single port */
423#define MVPP2_MAX_TXQ 8
424
Marcin Wojtas3f518502014-07-10 16:52:13 -0300425/* Dfault number of RXQs in use */
426#define MVPP2_DEFAULT_RXQ 4
427
Marcin Wojtas3f518502014-07-10 16:52:13 -0300428/* Max number of Rx descriptors */
429#define MVPP2_MAX_RXD 128
430
431/* Max number of Tx descriptors */
432#define MVPP2_MAX_TXD 1024
433
434/* Amount of Tx descriptors that can be reserved at once by CPU */
435#define MVPP2_CPU_DESC_CHUNK 64
436
437/* Max number of Tx descriptors in each aggregated queue */
438#define MVPP2_AGGR_TXQ_SIZE 256
439
440/* Descriptor aligned size */
441#define MVPP2_DESC_ALIGNED_SIZE 32
442
443/* Descriptor alignment mask */
444#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1)
445
446/* RX FIFO constants */
447#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000
448#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80
449#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
450
451/* RX buffer constants */
452#define MVPP2_SKB_SHINFO_SIZE \
453 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
454
455#define MVPP2_RX_PKT_SIZE(mtu) \
456 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
Jisheng Zhang4a0a12d2016-04-01 17:11:05 +0800457 ETH_HLEN + ETH_FCS_LEN, cache_line_size())
Marcin Wojtas3f518502014-07-10 16:52:13 -0300458
459#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
460#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
461#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
462 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
463
464#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8)
465
466/* IPv6 max L3 address size */
467#define MVPP2_MAX_L3_ADDR_SIZE 16
468
469/* Port flags */
470#define MVPP2_F_LOOPBACK BIT(0)
471
472/* Marvell tag types */
473enum mvpp2_tag_type {
474 MVPP2_TAG_TYPE_NONE = 0,
475 MVPP2_TAG_TYPE_MH = 1,
476 MVPP2_TAG_TYPE_DSA = 2,
477 MVPP2_TAG_TYPE_EDSA = 3,
478 MVPP2_TAG_TYPE_VLAN = 4,
479 MVPP2_TAG_TYPE_LAST = 5
480};
481
482/* Parser constants */
483#define MVPP2_PRS_TCAM_SRAM_SIZE 256
484#define MVPP2_PRS_TCAM_WORDS 6
485#define MVPP2_PRS_SRAM_WORDS 4
486#define MVPP2_PRS_FLOW_ID_SIZE 64
487#define MVPP2_PRS_FLOW_ID_MASK 0x3f
488#define MVPP2_PRS_TCAM_ENTRY_INVALID 1
489#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5)
490#define MVPP2_PRS_IPV4_HEAD 0x40
491#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0
492#define MVPP2_PRS_IPV4_MC 0xe0
493#define MVPP2_PRS_IPV4_MC_MASK 0xf0
494#define MVPP2_PRS_IPV4_BC_MASK 0xff
495#define MVPP2_PRS_IPV4_IHL 0x5
496#define MVPP2_PRS_IPV4_IHL_MASK 0xf
497#define MVPP2_PRS_IPV6_MC 0xff
498#define MVPP2_PRS_IPV6_MC_MASK 0xff
499#define MVPP2_PRS_IPV6_HOP_MASK 0xff
500#define MVPP2_PRS_TCAM_PROTO_MASK 0xff
501#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f
502#define MVPP2_PRS_DBL_VLANS_MAX 100
503
504/* Tcam structure:
505 * - lookup ID - 4 bits
506 * - port ID - 1 byte
507 * - additional information - 1 byte
508 * - header data - 8 bytes
509 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
510 */
511#define MVPP2_PRS_AI_BITS 8
512#define MVPP2_PRS_PORT_MASK 0xff
513#define MVPP2_PRS_LU_MASK 0xf
514#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \
515 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
516#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \
517 (((offs) * 2) - ((offs) % 2) + 2)
518#define MVPP2_PRS_TCAM_AI_BYTE 16
519#define MVPP2_PRS_TCAM_PORT_BYTE 17
520#define MVPP2_PRS_TCAM_LU_BYTE 20
521#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
522#define MVPP2_PRS_TCAM_INV_WORD 5
523/* Tcam entries ID */
524#define MVPP2_PE_DROP_ALL 0
525#define MVPP2_PE_FIRST_FREE_TID 1
526#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
527#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
528#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
529#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
530#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
531#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
532#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
533#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
534#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
535#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
536#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
537#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
538#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
539#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
540#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
541#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
542#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
543#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
544#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
545#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
546#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
547#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
548#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
549#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
550#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
551
552/* Sram structure
553 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
554 */
555#define MVPP2_PRS_SRAM_RI_OFFS 0
556#define MVPP2_PRS_SRAM_RI_WORD 0
557#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32
558#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1
559#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32
560#define MVPP2_PRS_SRAM_SHIFT_OFFS 64
561#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72
562#define MVPP2_PRS_SRAM_UDF_OFFS 73
563#define MVPP2_PRS_SRAM_UDF_BITS 8
564#define MVPP2_PRS_SRAM_UDF_MASK 0xff
565#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81
566#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82
567#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7
568#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1
569#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4
570#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85
571#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3
572#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1
573#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2
574#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3
575#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87
576#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2
577#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3
578#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0
579#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2
580#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3
581#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89
582#define MVPP2_PRS_SRAM_AI_OFFS 90
583#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98
584#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8
585#define MVPP2_PRS_SRAM_AI_MASK 0xff
586#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106
587#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf
588#define MVPP2_PRS_SRAM_LU_DONE_BIT 110
589#define MVPP2_PRS_SRAM_LU_GEN_BIT 111
590
591/* Sram result info bits assignment */
592#define MVPP2_PRS_RI_MAC_ME_MASK 0x1
593#define MVPP2_PRS_RI_DSA_MASK 0x2
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100594#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3))
595#define MVPP2_PRS_RI_VLAN_NONE 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300596#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2)
597#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3)
598#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3))
599#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70
600#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4)
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100601#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10))
602#define MVPP2_PRS_RI_L2_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300603#define MVPP2_PRS_RI_L2_MCAST BIT(9)
604#define MVPP2_PRS_RI_L2_BCAST BIT(10)
605#define MVPP2_PRS_RI_PPPOE_MASK 0x800
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100606#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14))
607#define MVPP2_PRS_RI_L3_UN 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300608#define MVPP2_PRS_RI_L3_IP4 BIT(12)
609#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13)
610#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13))
611#define MVPP2_PRS_RI_L3_IP6 BIT(14)
612#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14))
613#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14))
Thomas Petazzoni8138aff2017-02-21 11:28:11 +0100614#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16))
615#define MVPP2_PRS_RI_L3_UCAST 0x0
Marcin Wojtas3f518502014-07-10 16:52:13 -0300616#define MVPP2_PRS_RI_L3_MCAST BIT(15)
617#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
618#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
619#define MVPP2_PRS_RI_UDF3_MASK 0x300000
620#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
621#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
622#define MVPP2_PRS_RI_L4_TCP BIT(22)
623#define MVPP2_PRS_RI_L4_UDP BIT(23)
624#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23))
625#define MVPP2_PRS_RI_UDF7_MASK 0x60000000
626#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29)
627#define MVPP2_PRS_RI_DROP_MASK 0x80000000
628
629/* Sram additional info bits assignment */
630#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0)
631#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0)
632#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1)
633#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2)
634#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3)
635#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
636#define MVPP2_PRS_SINGLE_VLAN_AI 0
637#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
638
639/* DSA/EDSA type */
640#define MVPP2_PRS_TAGGED true
641#define MVPP2_PRS_UNTAGGED false
642#define MVPP2_PRS_EDSA true
643#define MVPP2_PRS_DSA false
644
645/* MAC entries, shadow udf */
646enum mvpp2_prs_udf {
647 MVPP2_PRS_UDF_MAC_DEF,
648 MVPP2_PRS_UDF_MAC_RANGE,
649 MVPP2_PRS_UDF_L2_DEF,
650 MVPP2_PRS_UDF_L2_DEF_COPY,
651 MVPP2_PRS_UDF_L2_USER,
652};
653
654/* Lookup ID */
655enum mvpp2_prs_lookup {
656 MVPP2_PRS_LU_MH,
657 MVPP2_PRS_LU_MAC,
658 MVPP2_PRS_LU_DSA,
659 MVPP2_PRS_LU_VLAN,
660 MVPP2_PRS_LU_L2,
661 MVPP2_PRS_LU_PPPOE,
662 MVPP2_PRS_LU_IP4,
663 MVPP2_PRS_LU_IP6,
664 MVPP2_PRS_LU_FLOWS,
665 MVPP2_PRS_LU_LAST,
666};
667
668/* L3 cast enum */
669enum mvpp2_prs_l3_cast {
670 MVPP2_PRS_L3_UNI_CAST,
671 MVPP2_PRS_L3_MULTI_CAST,
672 MVPP2_PRS_L3_BROAD_CAST
673};
674
675/* Classifier constants */
676#define MVPP2_CLS_FLOWS_TBL_SIZE 512
677#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
678#define MVPP2_CLS_LKP_TBL_SIZE 64
679
680/* BM constants */
681#define MVPP2_BM_POOLS_NUM 8
682#define MVPP2_BM_LONG_BUF_NUM 1024
683#define MVPP2_BM_SHORT_BUF_NUM 2048
684#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
685#define MVPP2_BM_POOL_PTR_ALIGN 128
686#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
687#define MVPP2_BM_SWF_SHORT_POOL 3
688
689/* BM cookie (32 bits) definition */
690#define MVPP2_BM_COOKIE_POOL_OFFS 8
691#define MVPP2_BM_COOKIE_CPU_OFFS 24
692
693/* BM short pool packet size
694 * These value assure that for SWF the total number
695 * of bytes allocated for each buffer will be 512
696 */
697#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
698
Thomas Petazzonia7868412017-03-07 16:53:13 +0100699#define MVPP21_ADDR_SPACE_SZ 0
700#define MVPP22_ADDR_SPACE_SZ SZ_64K
701
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200702#define MVPP2_MAX_THREADS 8
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200703#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
Thomas Petazzonia7868412017-03-07 16:53:13 +0100704
Marcin Wojtas3f518502014-07-10 16:52:13 -0300705enum mvpp2_bm_type {
706 MVPP2_BM_FREE,
707 MVPP2_BM_SWF_LONG,
708 MVPP2_BM_SWF_SHORT
709};
710
711/* Definitions */
712
713/* Shared Packet Processor resources */
714struct mvpp2 {
715 /* Shared registers' base addresses */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300716 void __iomem *lms_base;
Thomas Petazzonia7868412017-03-07 16:53:13 +0100717 void __iomem *iface_base;
718
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200719 /* On PPv2.2, each "software thread" can access the base
720 * register through a separate address space, each 64 KB apart
721 * from each other. Typically, such address spaces will be
722 * used per CPU.
Thomas Petazzonia7868412017-03-07 16:53:13 +0100723 */
Thomas Petazzonidf089aa2017-08-03 10:41:58 +0200724 void __iomem *swth_base[MVPP2_MAX_THREADS];
Marcin Wojtas3f518502014-07-10 16:52:13 -0300725
726 /* Common clocks */
727 struct clk *pp_clk;
728 struct clk *gop_clk;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +0100729 struct clk *mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300730
731 /* List of pointers to port structures */
732 struct mvpp2_port **port_list;
733
734 /* Aggregated TXQs */
735 struct mvpp2_tx_queue *aggr_txqs;
736
737 /* BM pools */
738 struct mvpp2_bm_pool *bm_pools;
739
740 /* PRS shadow table */
741 struct mvpp2_prs_shadow *prs_shadow;
742 /* PRS auxiliary table for double vlan entries control */
743 bool *prs_double_vlans;
744
745 /* Tclk value */
746 u32 tclk;
Thomas Petazzonifaca9242017-03-07 16:53:06 +0100747
748 /* HW version */
749 enum { MVPP21, MVPP22 } hw_version;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +0100750
751 /* Maximum number of RXQs per port */
752 unsigned int max_port_rxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300753};
754
755struct mvpp2_pcpu_stats {
756 struct u64_stats_sync syncp;
757 u64 rx_packets;
758 u64 rx_bytes;
759 u64 tx_packets;
760 u64 tx_bytes;
761};
762
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200763/* Per-CPU port control */
764struct mvpp2_port_pcpu {
765 struct hrtimer tx_done_timer;
766 bool timer_scheduled;
767 /* Tasklet for egress finalization */
768 struct tasklet_struct tx_done_tasklet;
769};
770
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200771struct mvpp2_queue_vector {
772 int irq;
773 struct napi_struct napi;
774 enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type;
775 int sw_thread_id;
776 u16 sw_thread_mask;
777 int first_rxq;
778 int nrxqs;
779 u32 pending_cause_rx;
780 struct mvpp2_port *port;
781};
782
Marcin Wojtas3f518502014-07-10 16:52:13 -0300783struct mvpp2_port {
784 u8 id;
785
Thomas Petazzonia7868412017-03-07 16:53:13 +0100786 /* Index of the port from the "group of ports" complex point
787 * of view
788 */
789 int gop_id;
790
Marcin Wojtas3f518502014-07-10 16:52:13 -0300791 struct mvpp2 *priv;
792
793 /* Per-port registers' base address */
794 void __iomem *base;
795
796 struct mvpp2_rx_queue **rxqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200797 unsigned int nrxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300798 struct mvpp2_tx_queue **txqs;
Thomas Petazzoni09f83972017-08-03 10:41:57 +0200799 unsigned int ntxqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300800 struct net_device *dev;
801
802 int pkt_size;
803
Marcin Wojtasedc660f2015-08-06 19:00:30 +0200804 /* Per-CPU port control */
805 struct mvpp2_port_pcpu __percpu *pcpu;
806
Marcin Wojtas3f518502014-07-10 16:52:13 -0300807 /* Flags */
808 unsigned long flags;
809
810 u16 tx_ring_size;
811 u16 rx_ring_size;
812 struct mvpp2_pcpu_stats __percpu *stats;
813
Marcin Wojtas3f518502014-07-10 16:52:13 -0300814 phy_interface_t phy_interface;
815 struct device_node *phy_node;
816 unsigned int link;
817 unsigned int duplex;
818 unsigned int speed;
819
820 struct mvpp2_bm_pool *pool_long;
821 struct mvpp2_bm_pool *pool_short;
822
823 /* Index of first port's physical RXQ */
824 u8 first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +0200825
826 struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS];
827 unsigned int nqvecs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +0200828 bool has_tx_irqs;
829
830 u32 tx_time_coal;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300831};
832
833/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
834 * layout of the transmit and reception DMA descriptors, and their
835 * layout is therefore defined by the hardware design
836 */
837
838#define MVPP2_TXD_L3_OFF_SHIFT 0
839#define MVPP2_TXD_IP_HLEN_SHIFT 8
840#define MVPP2_TXD_L4_CSUM_FRAG BIT(13)
841#define MVPP2_TXD_L4_CSUM_NOT BIT(14)
842#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15)
843#define MVPP2_TXD_PADDING_DISABLE BIT(23)
844#define MVPP2_TXD_L4_UDP BIT(24)
845#define MVPP2_TXD_L3_IP6 BIT(26)
846#define MVPP2_TXD_L_DESC BIT(28)
847#define MVPP2_TXD_F_DESC BIT(29)
848
849#define MVPP2_RXD_ERR_SUMMARY BIT(15)
850#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14))
851#define MVPP2_RXD_ERR_CRC 0x0
852#define MVPP2_RXD_ERR_OVERRUN BIT(13)
853#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14))
854#define MVPP2_RXD_BM_POOL_ID_OFFS 16
855#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18))
856#define MVPP2_RXD_HWF_SYNC BIT(21)
857#define MVPP2_RXD_L4_CSUM_OK BIT(22)
858#define MVPP2_RXD_IP4_HEADER_ERR BIT(24)
859#define MVPP2_RXD_L4_TCP BIT(25)
860#define MVPP2_RXD_L4_UDP BIT(26)
861#define MVPP2_RXD_L3_IP4 BIT(28)
862#define MVPP2_RXD_L3_IP6 BIT(30)
863#define MVPP2_RXD_BUF_HDR BIT(31)
864
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100865/* HW TX descriptor for PPv2.1 */
866struct mvpp21_tx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300867 u32 command; /* Options used by HW for packet transmitting.*/
868 u8 packet_offset; /* the offset from the buffer beginning */
869 u8 phys_txq; /* destination queue ID */
870 u16 data_size; /* data size of transmitted packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100871 u32 buf_dma_addr; /* physical addr of transmitted buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300872 u32 buf_cookie; /* cookie for access to TX buffer in tx path */
873 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */
874 u32 reserved2; /* reserved (for future use) */
875};
876
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100877/* HW RX descriptor for PPv2.1 */
878struct mvpp21_rx_desc {
Marcin Wojtas3f518502014-07-10 16:52:13 -0300879 u32 status; /* info about received packet */
880 u16 reserved1; /* parser_info (for future use, PnC) */
881 u16 data_size; /* size of received packet in bytes */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100882 u32 buf_dma_addr; /* physical address of the buffer */
Marcin Wojtas3f518502014-07-10 16:52:13 -0300883 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
884 u16 reserved2; /* gem_port_id (for future use, PON) */
885 u16 reserved3; /* csum_l4 (for future use, PnC) */
886 u8 reserved4; /* bm_qset (for future use, BM) */
887 u8 reserved5;
888 u16 reserved6; /* classify_info (for future use, PnC) */
889 u32 reserved7; /* flow_id (for future use, PnC) */
890 u32 reserved8;
891};
892
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100893/* HW TX descriptor for PPv2.2 */
894struct mvpp22_tx_desc {
895 u32 command;
896 u8 packet_offset;
897 u8 phys_txq;
898 u16 data_size;
899 u64 reserved1;
900 u64 buf_dma_addr_ptp;
901 u64 buf_cookie_misc;
902};
903
904/* HW RX descriptor for PPv2.2 */
905struct mvpp22_rx_desc {
906 u32 status;
907 u16 reserved1;
908 u16 data_size;
909 u32 reserved2;
910 u32 reserved3;
911 u64 buf_dma_addr_key_hash;
912 u64 buf_cookie_misc;
913};
914
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100915/* Opaque type used by the driver to manipulate the HW TX and RX
916 * descriptors
917 */
918struct mvpp2_tx_desc {
919 union {
920 struct mvpp21_tx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100921 struct mvpp22_tx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100922 };
923};
924
925struct mvpp2_rx_desc {
926 union {
927 struct mvpp21_rx_desc pp21;
Thomas Petazzonie7c53592017-03-07 16:53:08 +0100928 struct mvpp22_rx_desc pp22;
Thomas Petazzoni054f6372017-03-07 16:53:07 +0100929 };
930};
931
Thomas Petazzoni83544912016-12-21 11:28:49 +0100932struct mvpp2_txq_pcpu_buf {
933 /* Transmitted SKB */
934 struct sk_buff *skb;
935
936 /* Physical address of transmitted buffer */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100937 dma_addr_t dma;
Thomas Petazzoni83544912016-12-21 11:28:49 +0100938
939 /* Size transmitted */
940 size_t size;
941};
942
Marcin Wojtas3f518502014-07-10 16:52:13 -0300943/* Per-CPU Tx queue control */
944struct mvpp2_txq_pcpu {
945 int cpu;
946
947 /* Number of Tx DMA descriptors in the descriptor ring */
948 int size;
949
950 /* Number of currently used Tx DMA descriptor in the
951 * descriptor ring
952 */
953 int count;
954
955 /* Number of Tx DMA descriptors reserved for each CPU */
956 int reserved_num;
957
Thomas Petazzoni83544912016-12-21 11:28:49 +0100958 /* Infos about transmitted buffers */
959 struct mvpp2_txq_pcpu_buf *buffs;
Marcin Wojtas71ce3912015-08-06 19:00:29 +0200960
Marcin Wojtas3f518502014-07-10 16:52:13 -0300961 /* Index of last TX DMA descriptor that was inserted */
962 int txq_put_index;
963
964 /* Index of the TX DMA descriptor to be cleaned up */
965 int txq_get_index;
966};
967
968struct mvpp2_tx_queue {
969 /* Physical number of this Tx queue */
970 u8 id;
971
972 /* Logical number of this Tx queue */
973 u8 log_id;
974
975 /* Number of Tx DMA descriptors in the descriptor ring */
976 int size;
977
978 /* Number of currently used Tx DMA descriptor in the descriptor ring */
979 int count;
980
981 /* Per-CPU control of physical Tx queues */
982 struct mvpp2_txq_pcpu __percpu *pcpu;
983
Marcin Wojtas3f518502014-07-10 16:52:13 -0300984 u32 done_pkts_coal;
985
986 /* Virtual address of thex Tx DMA descriptors array */
987 struct mvpp2_tx_desc *descs;
988
989 /* DMA address of the Tx DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +0100990 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -0300991
992 /* Index of the last Tx DMA descriptor */
993 int last_desc;
994
995 /* Index of the next Tx DMA descriptor to process */
996 int next_desc_to_proc;
997};
998
999struct mvpp2_rx_queue {
1000 /* RX queue number, in the range 0-31 for physical RXQs */
1001 u8 id;
1002
1003 /* Num of rx descriptors in the rx descriptor ring */
1004 int size;
1005
1006 u32 pkts_coal;
1007 u32 time_coal;
1008
1009 /* Virtual address of the RX DMA descriptors array */
1010 struct mvpp2_rx_desc *descs;
1011
1012 /* DMA address of the RX DMA descriptors array */
Thomas Petazzoni20396132017-03-07 16:53:00 +01001013 dma_addr_t descs_dma;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001014
1015 /* Index of the last RX DMA descriptor */
1016 int last_desc;
1017
1018 /* Index of the next RX DMA descriptor to process */
1019 int next_desc_to_proc;
1020
1021 /* ID of port to which physical RXQ is mapped */
1022 int port;
1023
1024 /* Port's logic RXQ number to which physical RXQ is mapped */
1025 int logic_rxq;
1026};
1027
1028union mvpp2_prs_tcam_entry {
1029 u32 word[MVPP2_PRS_TCAM_WORDS];
1030 u8 byte[MVPP2_PRS_TCAM_WORDS * 4];
1031};
1032
1033union mvpp2_prs_sram_entry {
1034 u32 word[MVPP2_PRS_SRAM_WORDS];
1035 u8 byte[MVPP2_PRS_SRAM_WORDS * 4];
1036};
1037
1038struct mvpp2_prs_entry {
1039 u32 index;
1040 union mvpp2_prs_tcam_entry tcam;
1041 union mvpp2_prs_sram_entry sram;
1042};
1043
1044struct mvpp2_prs_shadow {
1045 bool valid;
1046 bool finish;
1047
1048 /* Lookup ID */
1049 int lu;
1050
1051 /* User defined offset */
1052 int udf;
1053
1054 /* Result info */
1055 u32 ri;
1056 u32 ri_mask;
1057};
1058
1059struct mvpp2_cls_flow_entry {
1060 u32 index;
1061 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1062};
1063
1064struct mvpp2_cls_lookup_entry {
1065 u32 lkpid;
1066 u32 way;
1067 u32 data;
1068};
1069
1070struct mvpp2_bm_pool {
1071 /* Pool number in the range 0-7 */
1072 int id;
1073 enum mvpp2_bm_type type;
1074
1075 /* Buffer Pointers Pool External (BPPE) size */
1076 int size;
Thomas Petazzonid01524d2017-03-07 16:53:09 +01001077 /* BPPE size in bytes */
1078 int size_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001079 /* Number of buffers for this pool */
1080 int buf_num;
1081 /* Pool buffer size */
1082 int buf_size;
1083 /* Packet size */
1084 int pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01001085 int frag_size;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001086
1087 /* BPPE virtual base address */
1088 u32 *virt_addr;
Thomas Petazzoni20396132017-03-07 16:53:00 +01001089 /* BPPE DMA base address */
1090 dma_addr_t dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001091
1092 /* Ports using BM pool */
1093 u32 port_map;
Marcin Wojtas3f518502014-07-10 16:52:13 -03001094};
1095
Thomas Petazzoni213f4282017-08-03 10:42:00 +02001096/* Queue modes */
1097#define MVPP2_QDIST_SINGLE_MODE 0
1098#define MVPP2_QDIST_MULTI_MODE 1
1099
1100static int queue_mode = MVPP2_QDIST_SINGLE_MODE;
1101
1102module_param(queue_mode, int, 0444);
1103MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
1104
Marcin Wojtas3f518502014-07-10 16:52:13 -03001105#define MVPP2_DRIVER_NAME "mvpp2"
1106#define MVPP2_DRIVER_VERSION "1.0"
1107
1108/* Utility/helper methods */
1109
1110static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1111{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001112 writel(data, priv->swth_base[0] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001113}
1114
1115static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1116{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001117 return readl(priv->swth_base[0] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001118}
1119
1120/* These accessors should be used to access:
1121 *
1122 * - per-CPU registers, where each CPU has its own copy of the
1123 * register.
1124 *
1125 * MVPP2_BM_VIRT_ALLOC_REG
1126 * MVPP2_BM_ADDR_HIGH_ALLOC
1127 * MVPP22_BM_ADDR_HIGH_RLS_REG
1128 * MVPP2_BM_VIRT_RLS_REG
1129 * MVPP2_ISR_RX_TX_CAUSE_REG
1130 * MVPP2_ISR_RX_TX_MASK_REG
1131 * MVPP2_TXQ_NUM_REG
1132 * MVPP2_AGGR_TXQ_UPDATE_REG
1133 * MVPP2_TXQ_RSVD_REQ_REG
1134 * MVPP2_TXQ_RSVD_RSLT_REG
1135 * MVPP2_TXQ_SENT_REG
1136 * MVPP2_RXQ_NUM_REG
1137 *
1138 * - global registers that must be accessed through a specific CPU
1139 * window, because they are related to an access to a per-CPU
1140 * register
1141 *
1142 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
1143 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
1144 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
1145 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
1146 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
1147 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
1148 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1149 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
1150 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
1151 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
1152 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
1153 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1154 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
1155 */
1156static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu,
1157 u32 offset, u32 data)
1158{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001159 writel(data, priv->swth_base[cpu] + offset);
Thomas Petazzonia7868412017-03-07 16:53:13 +01001160}
1161
1162static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu,
1163 u32 offset)
1164{
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02001165 return readl(priv->swth_base[cpu] + offset);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001166}
1167
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001168static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
1169 struct mvpp2_tx_desc *tx_desc)
1170{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001171 if (port->priv->hw_version == MVPP21)
1172 return tx_desc->pp21.buf_dma_addr;
1173 else
1174 return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001175}
1176
1177static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1178 struct mvpp2_tx_desc *tx_desc,
1179 dma_addr_t dma_addr)
1180{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001181 if (port->priv->hw_version == MVPP21) {
1182 tx_desc->pp21.buf_dma_addr = dma_addr;
1183 } else {
1184 u64 val = (u64)dma_addr;
1185
1186 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1187 tx_desc->pp22.buf_dma_addr_ptp |= val;
1188 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001189}
1190
1191static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
1192 struct mvpp2_tx_desc *tx_desc)
1193{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001194 if (port->priv->hw_version == MVPP21)
1195 return tx_desc->pp21.data_size;
1196 else
1197 return tx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001198}
1199
1200static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1201 struct mvpp2_tx_desc *tx_desc,
1202 size_t size)
1203{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001204 if (port->priv->hw_version == MVPP21)
1205 tx_desc->pp21.data_size = size;
1206 else
1207 tx_desc->pp22.data_size = size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001208}
1209
1210static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1211 struct mvpp2_tx_desc *tx_desc,
1212 unsigned int txq)
1213{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001214 if (port->priv->hw_version == MVPP21)
1215 tx_desc->pp21.phys_txq = txq;
1216 else
1217 tx_desc->pp22.phys_txq = txq;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001218}
1219
1220static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1221 struct mvpp2_tx_desc *tx_desc,
1222 unsigned int command)
1223{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001224 if (port->priv->hw_version == MVPP21)
1225 tx_desc->pp21.command = command;
1226 else
1227 tx_desc->pp22.command = command;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001228}
1229
1230static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1231 struct mvpp2_tx_desc *tx_desc,
1232 unsigned int offset)
1233{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001234 if (port->priv->hw_version == MVPP21)
1235 tx_desc->pp21.packet_offset = offset;
1236 else
1237 tx_desc->pp22.packet_offset = offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001238}
1239
1240static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
1241 struct mvpp2_tx_desc *tx_desc)
1242{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001243 if (port->priv->hw_version == MVPP21)
1244 return tx_desc->pp21.packet_offset;
1245 else
1246 return tx_desc->pp22.packet_offset;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001247}
1248
1249static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1250 struct mvpp2_rx_desc *rx_desc)
1251{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001252 if (port->priv->hw_version == MVPP21)
1253 return rx_desc->pp21.buf_dma_addr;
1254 else
1255 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001256}
1257
1258static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1259 struct mvpp2_rx_desc *rx_desc)
1260{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001261 if (port->priv->hw_version == MVPP21)
1262 return rx_desc->pp21.buf_cookie;
1263 else
1264 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001265}
1266
1267static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1268 struct mvpp2_rx_desc *rx_desc)
1269{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001270 if (port->priv->hw_version == MVPP21)
1271 return rx_desc->pp21.data_size;
1272 else
1273 return rx_desc->pp22.data_size;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001274}
1275
1276static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1277 struct mvpp2_rx_desc *rx_desc)
1278{
Thomas Petazzonie7c53592017-03-07 16:53:08 +01001279 if (port->priv->hw_version == MVPP21)
1280 return rx_desc->pp21.status;
1281 else
1282 return rx_desc->pp22.status;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001283}
1284
Marcin Wojtas3f518502014-07-10 16:52:13 -03001285static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1286{
1287 txq_pcpu->txq_get_index++;
1288 if (txq_pcpu->txq_get_index == txq_pcpu->size)
1289 txq_pcpu->txq_get_index = 0;
1290}
1291
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001292static void mvpp2_txq_inc_put(struct mvpp2_port *port,
1293 struct mvpp2_txq_pcpu *txq_pcpu,
Marcin Wojtas71ce3912015-08-06 19:00:29 +02001294 struct sk_buff *skb,
1295 struct mvpp2_tx_desc *tx_desc)
Marcin Wojtas3f518502014-07-10 16:52:13 -03001296{
Thomas Petazzoni83544912016-12-21 11:28:49 +01001297 struct mvpp2_txq_pcpu_buf *tx_buf =
1298 txq_pcpu->buffs + txq_pcpu->txq_put_index;
1299 tx_buf->skb = skb;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01001300 tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
1301 tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
1302 mvpp2_txdesc_offset_get(port, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03001303 txq_pcpu->txq_put_index++;
1304 if (txq_pcpu->txq_put_index == txq_pcpu->size)
1305 txq_pcpu->txq_put_index = 0;
1306}
1307
1308/* Get number of physical egress port */
1309static inline int mvpp2_egress_port(struct mvpp2_port *port)
1310{
1311 return MVPP2_MAX_TCONT + port->id;
1312}
1313
1314/* Get number of physical TXQ */
1315static inline int mvpp2_txq_phys(int port, int txq)
1316{
1317 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1318}
1319
1320/* Parser configuration routines */
1321
1322/* Update parser tcam and sram hw entries */
1323static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1324{
1325 int i;
1326
1327 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1328 return -EINVAL;
1329
1330 /* Clear entry invalidation bit */
1331 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1332
1333 /* Write tcam index - indirect access */
1334 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1335 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1336 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1337
1338 /* Write sram index - indirect access */
1339 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1340 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1341 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1342
1343 return 0;
1344}
1345
1346/* Read tcam entry from hw */
1347static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1348{
1349 int i;
1350
1351 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1352 return -EINVAL;
1353
1354 /* Write tcam index - indirect access */
1355 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1356
1357 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1358 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1359 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1360 return MVPP2_PRS_TCAM_ENTRY_INVALID;
1361
1362 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1363 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1364
1365 /* Write sram index - indirect access */
1366 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1367 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1368 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1369
1370 return 0;
1371}
1372
1373/* Invalidate tcam hw entry */
1374static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1375{
1376 /* Write index - indirect access */
1377 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1378 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1379 MVPP2_PRS_TCAM_INV_MASK);
1380}
1381
1382/* Enable shadow table entry and set its lookup ID */
1383static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1384{
1385 priv->prs_shadow[index].valid = true;
1386 priv->prs_shadow[index].lu = lu;
1387}
1388
1389/* Update ri fields in shadow table entry */
1390static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1391 unsigned int ri, unsigned int ri_mask)
1392{
1393 priv->prs_shadow[index].ri_mask = ri_mask;
1394 priv->prs_shadow[index].ri = ri;
1395}
1396
1397/* Update lookup field in tcam sw entry */
1398static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1399{
1400 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1401
1402 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1403 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1404}
1405
1406/* Update mask for single port in tcam sw entry */
1407static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1408 unsigned int port, bool add)
1409{
1410 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1411
1412 if (add)
1413 pe->tcam.byte[enable_off] &= ~(1 << port);
1414 else
1415 pe->tcam.byte[enable_off] |= 1 << port;
1416}
1417
1418/* Update port map in tcam sw entry */
1419static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1420 unsigned int ports)
1421{
1422 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1423 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1424
1425 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1426 pe->tcam.byte[enable_off] &= ~port_mask;
1427 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1428}
1429
1430/* Obtain port map from tcam sw entry */
1431static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1432{
1433 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1434
1435 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1436}
1437
1438/* Set byte of data and its enable bits in tcam sw entry */
1439static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1440 unsigned int offs, unsigned char byte,
1441 unsigned char enable)
1442{
1443 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1444 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1445}
1446
1447/* Get byte of data and its enable bits from tcam sw entry */
1448static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1449 unsigned int offs, unsigned char *byte,
1450 unsigned char *enable)
1451{
1452 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1453 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1454}
1455
1456/* Compare tcam data bytes with a pattern */
1457static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
1458 u16 data)
1459{
1460 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
1461 u16 tcam_data;
1462
1463 tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
1464 if (tcam_data != data)
1465 return false;
1466 return true;
1467}
1468
1469/* Update ai bits in tcam sw entry */
1470static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
1471 unsigned int bits, unsigned int enable)
1472{
1473 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
1474
1475 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
1476
1477 if (!(enable & BIT(i)))
1478 continue;
1479
1480 if (bits & BIT(i))
1481 pe->tcam.byte[ai_idx] |= 1 << i;
1482 else
1483 pe->tcam.byte[ai_idx] &= ~(1 << i);
1484 }
1485
1486 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
1487}
1488
1489/* Get ai bits from tcam sw entry */
1490static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
1491{
1492 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
1493}
1494
1495/* Set ethertype in tcam sw entry */
1496static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1497 unsigned short ethertype)
1498{
1499 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1500 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1501}
1502
1503/* Set bits in sram sw entry */
1504static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1505 int val)
1506{
1507 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1508}
1509
1510/* Clear bits in sram sw entry */
1511static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1512 int val)
1513{
1514 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1515}
1516
1517/* Update ri bits in sram sw entry */
1518static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1519 unsigned int bits, unsigned int mask)
1520{
1521 unsigned int i;
1522
1523 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1524 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1525
1526 if (!(mask & BIT(i)))
1527 continue;
1528
1529 if (bits & BIT(i))
1530 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1531 else
1532 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1533
1534 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1535 }
1536}
1537
1538/* Obtain ri bits from sram sw entry */
1539static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
1540{
1541 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
1542}
1543
1544/* Update ai bits in sram sw entry */
1545static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1546 unsigned int bits, unsigned int mask)
1547{
1548 unsigned int i;
1549 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1550
1551 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1552
1553 if (!(mask & BIT(i)))
1554 continue;
1555
1556 if (bits & BIT(i))
1557 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1558 else
1559 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1560
1561 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1562 }
1563}
1564
1565/* Read ai bits from sram sw entry */
1566static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1567{
1568 u8 bits;
1569 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1570 int ai_en_off = ai_off + 1;
1571 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1572
1573 bits = (pe->sram.byte[ai_off] >> ai_shift) |
1574 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1575
1576 return bits;
1577}
1578
1579/* In sram sw entry set lookup ID field of the tcam key to be used in the next
1580 * lookup interation
1581 */
1582static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1583 unsigned int lu)
1584{
1585 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1586
1587 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1588 MVPP2_PRS_SRAM_NEXT_LU_MASK);
1589 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1590}
1591
1592/* In the sram sw entry set sign and value of the next lookup offset
1593 * and the offset value generated to the classifier
1594 */
1595static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1596 unsigned int op)
1597{
1598 /* Set sign */
1599 if (shift < 0) {
1600 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1601 shift = 0 - shift;
1602 } else {
1603 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1604 }
1605
1606 /* Set value */
1607 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1608 (unsigned char)shift;
1609
1610 /* Reset and set operation */
1611 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1612 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1613 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1614
1615 /* Set base offset as current */
1616 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1617}
1618
1619/* In the sram sw entry set sign and value of the user defined offset
1620 * generated to the classifier
1621 */
1622static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1623 unsigned int type, int offset,
1624 unsigned int op)
1625{
1626 /* Set sign */
1627 if (offset < 0) {
1628 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1629 offset = 0 - offset;
1630 } else {
1631 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1632 }
1633
1634 /* Set value */
1635 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1636 MVPP2_PRS_SRAM_UDF_MASK);
1637 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1638 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1639 MVPP2_PRS_SRAM_UDF_BITS)] &=
1640 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1641 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1642 MVPP2_PRS_SRAM_UDF_BITS)] |=
1643 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1644
1645 /* Set offset type */
1646 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1647 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1648 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1649
1650 /* Set offset operation */
1651 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1652 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1653 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1654
1655 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1656 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1657 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1658 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1659
1660 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1661 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1662 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1663
1664 /* Set base offset as current */
1665 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1666}
1667
1668/* Find parser flow entry */
1669static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1670{
1671 struct mvpp2_prs_entry *pe;
1672 int tid;
1673
1674 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1675 if (!pe)
1676 return NULL;
1677 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1678
1679 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1680 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1681 u8 bits;
1682
1683 if (!priv->prs_shadow[tid].valid ||
1684 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1685 continue;
1686
1687 pe->index = tid;
1688 mvpp2_prs_hw_read(priv, pe);
1689 bits = mvpp2_prs_sram_ai_get(pe);
1690
1691 /* Sram store classification lookup ID in AI bits [5:0] */
1692 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1693 return pe;
1694 }
1695 kfree(pe);
1696
1697 return NULL;
1698}
1699
1700/* Return first free tcam index, seeking from start to end */
1701static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1702 unsigned char end)
1703{
1704 int tid;
1705
1706 if (start > end)
1707 swap(start, end);
1708
1709 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1710 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1711
1712 for (tid = start; tid <= end; tid++) {
1713 if (!priv->prs_shadow[tid].valid)
1714 return tid;
1715 }
1716
1717 return -EINVAL;
1718}
1719
1720/* Enable/disable dropping all mac da's */
1721static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1722{
1723 struct mvpp2_prs_entry pe;
1724
1725 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1726 /* Entry exist - update port only */
1727 pe.index = MVPP2_PE_DROP_ALL;
1728 mvpp2_prs_hw_read(priv, &pe);
1729 } else {
1730 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001731 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001732 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1733 pe.index = MVPP2_PE_DROP_ALL;
1734
1735 /* Non-promiscuous mode for all ports - DROP unknown packets */
1736 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1737 MVPP2_PRS_RI_DROP_MASK);
1738
1739 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1740 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1741
1742 /* Update shadow table */
1743 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1744
1745 /* Mask all ports */
1746 mvpp2_prs_tcam_port_map_set(&pe, 0);
1747 }
1748
1749 /* Update port mask */
1750 mvpp2_prs_tcam_port_set(&pe, port, add);
1751
1752 mvpp2_prs_hw_write(priv, &pe);
1753}
1754
1755/* Set port to promiscuous mode */
1756static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1757{
1758 struct mvpp2_prs_entry pe;
1759
Joe Perchesdbedd442015-03-06 20:49:12 -08001760 /* Promiscuous mode - Accept unknown packets */
Marcin Wojtas3f518502014-07-10 16:52:13 -03001761
1762 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1763 /* Entry exist - update port only */
1764 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1765 mvpp2_prs_hw_read(priv, &pe);
1766 } else {
1767 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001768 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001769 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1770 pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1771
1772 /* Continue - set next lookup */
1773 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1774
1775 /* Set result info bits */
1776 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1777 MVPP2_PRS_RI_L2_CAST_MASK);
1778
1779 /* Shift to ethertype */
1780 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1781 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1782
1783 /* Mask all ports */
1784 mvpp2_prs_tcam_port_map_set(&pe, 0);
1785
1786 /* Update shadow table */
1787 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1788 }
1789
1790 /* Update port mask */
1791 mvpp2_prs_tcam_port_set(&pe, port, add);
1792
1793 mvpp2_prs_hw_write(priv, &pe);
1794}
1795
1796/* Accept multicast */
1797static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1798 bool add)
1799{
1800 struct mvpp2_prs_entry pe;
1801 unsigned char da_mc;
1802
1803 /* Ethernet multicast address first byte is
1804 * 0x01 for IPv4 and 0x33 for IPv6
1805 */
1806 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1807
1808 if (priv->prs_shadow[index].valid) {
1809 /* Entry exist - update port only */
1810 pe.index = index;
1811 mvpp2_prs_hw_read(priv, &pe);
1812 } else {
1813 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001814 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001815 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1816 pe.index = index;
1817
1818 /* Continue - set next lookup */
1819 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1820
1821 /* Set result info bits */
1822 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1823 MVPP2_PRS_RI_L2_CAST_MASK);
1824
1825 /* Update tcam entry data first byte */
1826 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1827
1828 /* Shift to ethertype */
1829 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1830 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1831
1832 /* Mask all ports */
1833 mvpp2_prs_tcam_port_map_set(&pe, 0);
1834
1835 /* Update shadow table */
1836 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1837 }
1838
1839 /* Update port mask */
1840 mvpp2_prs_tcam_port_set(&pe, port, add);
1841
1842 mvpp2_prs_hw_write(priv, &pe);
1843}
1844
1845/* Set entry for dsa packets */
1846static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
1847 bool tagged, bool extend)
1848{
1849 struct mvpp2_prs_entry pe;
1850 int tid, shift;
1851
1852 if (extend) {
1853 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
1854 shift = 8;
1855 } else {
1856 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
1857 shift = 4;
1858 }
1859
1860 if (priv->prs_shadow[tid].valid) {
1861 /* Entry exist - update port only */
1862 pe.index = tid;
1863 mvpp2_prs_hw_read(priv, &pe);
1864 } else {
1865 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001866 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001867 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1868 pe.index = tid;
1869
1870 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
1871 mvpp2_prs_sram_shift_set(&pe, shift,
1872 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1873
1874 /* Update shadow table */
1875 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1876
1877 if (tagged) {
1878 /* Set tagged bit in DSA tag */
1879 mvpp2_prs_tcam_data_byte_set(&pe, 0,
1880 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1881 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1882 /* Clear all ai bits for next iteration */
1883 mvpp2_prs_sram_ai_update(&pe, 0,
1884 MVPP2_PRS_SRAM_AI_MASK);
1885 /* If packet is tagged continue check vlans */
1886 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1887 } else {
1888 /* Set result info bits to 'no vlans' */
1889 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1890 MVPP2_PRS_RI_VLAN_MASK);
1891 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1892 }
1893
1894 /* Mask all ports */
1895 mvpp2_prs_tcam_port_map_set(&pe, 0);
1896 }
1897
1898 /* Update port mask */
1899 mvpp2_prs_tcam_port_set(&pe, port, add);
1900
1901 mvpp2_prs_hw_write(priv, &pe);
1902}
1903
1904/* Set entry for dsa ethertype */
1905static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
1906 bool add, bool tagged, bool extend)
1907{
1908 struct mvpp2_prs_entry pe;
1909 int tid, shift, port_mask;
1910
1911 if (extend) {
1912 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
1913 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
1914 port_mask = 0;
1915 shift = 8;
1916 } else {
1917 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
1918 MVPP2_PE_ETYPE_DSA_UNTAGGED;
1919 port_mask = MVPP2_PRS_PORT_MASK;
1920 shift = 4;
1921 }
1922
1923 if (priv->prs_shadow[tid].valid) {
1924 /* Entry exist - update port only */
1925 pe.index = tid;
1926 mvpp2_prs_hw_read(priv, &pe);
1927 } else {
1928 /* Entry doesn't exist - create new */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02001929 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03001930 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1931 pe.index = tid;
1932
1933 /* Set ethertype */
1934 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
1935 mvpp2_prs_match_etype(&pe, 2, 0);
1936
1937 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
1938 MVPP2_PRS_RI_DSA_MASK);
1939 /* Shift ethertype + 2 byte reserved + tag*/
1940 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
1941 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1942
1943 /* Update shadow table */
1944 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
1945
1946 if (tagged) {
1947 /* Set tagged bit in DSA tag */
1948 mvpp2_prs_tcam_data_byte_set(&pe,
1949 MVPP2_ETH_TYPE_LEN + 2 + 3,
1950 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
1951 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
1952 /* Clear all ai bits for next iteration */
1953 mvpp2_prs_sram_ai_update(&pe, 0,
1954 MVPP2_PRS_SRAM_AI_MASK);
1955 /* If packet is tagged continue check vlans */
1956 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1957 } else {
1958 /* Set result info bits to 'no vlans' */
1959 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1960 MVPP2_PRS_RI_VLAN_MASK);
1961 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1962 }
1963 /* Mask/unmask all ports, depending on dsa type */
1964 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
1965 }
1966
1967 /* Update port mask */
1968 mvpp2_prs_tcam_port_set(&pe, port, add);
1969
1970 mvpp2_prs_hw_write(priv, &pe);
1971}
1972
1973/* Search for existing single/triple vlan entry */
1974static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
1975 unsigned short tpid, int ai)
1976{
1977 struct mvpp2_prs_entry *pe;
1978 int tid;
1979
1980 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1981 if (!pe)
1982 return NULL;
1983 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
1984
1985 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
1986 for (tid = MVPP2_PE_FIRST_FREE_TID;
1987 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
1988 unsigned int ri_bits, ai_bits;
1989 bool match;
1990
1991 if (!priv->prs_shadow[tid].valid ||
1992 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
1993 continue;
1994
1995 pe->index = tid;
1996
1997 mvpp2_prs_hw_read(priv, pe);
1998 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
1999 if (!match)
2000 continue;
2001
2002 /* Get vlan type */
2003 ri_bits = mvpp2_prs_sram_ri_get(pe);
2004 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2005
2006 /* Get current ai value from tcam */
2007 ai_bits = mvpp2_prs_tcam_ai_get(pe);
2008 /* Clear double vlan bit */
2009 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
2010
2011 if (ai != ai_bits)
2012 continue;
2013
2014 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2015 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2016 return pe;
2017 }
2018 kfree(pe);
2019
2020 return NULL;
2021}
2022
2023/* Add/update single/triple vlan entry */
2024static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
2025 unsigned int port_map)
2026{
2027 struct mvpp2_prs_entry *pe;
2028 int tid_aux, tid;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302029 int ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002030
2031 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
2032
2033 if (!pe) {
2034 /* Create new tcam entry */
2035 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
2036 MVPP2_PE_FIRST_FREE_TID);
2037 if (tid < 0)
2038 return tid;
2039
2040 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2041 if (!pe)
2042 return -ENOMEM;
2043
2044 /* Get last double vlan tid */
2045 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
2046 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
2047 unsigned int ri_bits;
2048
2049 if (!priv->prs_shadow[tid_aux].valid ||
2050 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2051 continue;
2052
2053 pe->index = tid_aux;
2054 mvpp2_prs_hw_read(priv, pe);
2055 ri_bits = mvpp2_prs_sram_ri_get(pe);
2056 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
2057 MVPP2_PRS_RI_VLAN_DOUBLE)
2058 break;
2059 }
2060
Sudip Mukherjee43737472014-11-01 16:59:34 +05302061 if (tid <= tid_aux) {
2062 ret = -EINVAL;
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002063 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302064 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002065
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002066 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002067 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2068 pe->index = tid;
2069
2070 mvpp2_prs_match_etype(pe, 0, tpid);
2071
2072 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
2073 /* Shift 4 bytes - skip 1 vlan tag */
2074 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
2075 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2076 /* Clear all ai bits for next iteration */
2077 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2078
2079 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
2080 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
2081 MVPP2_PRS_RI_VLAN_MASK);
2082 } else {
2083 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
2084 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
2085 MVPP2_PRS_RI_VLAN_MASK);
2086 }
2087 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
2088
2089 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2090 }
2091 /* Update ports' mask */
2092 mvpp2_prs_tcam_port_map_set(pe, port_map);
2093
2094 mvpp2_prs_hw_write(priv, pe);
Markus Elfringf9fd0e32017-04-17 13:50:35 +02002095free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002096 kfree(pe);
2097
Sudip Mukherjee43737472014-11-01 16:59:34 +05302098 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002099}
2100
2101/* Get first free double vlan ai number */
2102static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
2103{
2104 int i;
2105
2106 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
2107 if (!priv->prs_double_vlans[i])
2108 return i;
2109 }
2110
2111 return -EINVAL;
2112}
2113
2114/* Search for existing double vlan entry */
2115static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
2116 unsigned short tpid1,
2117 unsigned short tpid2)
2118{
2119 struct mvpp2_prs_entry *pe;
2120 int tid;
2121
2122 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2123 if (!pe)
2124 return NULL;
2125 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2126
2127 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
2128 for (tid = MVPP2_PE_FIRST_FREE_TID;
2129 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2130 unsigned int ri_mask;
2131 bool match;
2132
2133 if (!priv->prs_shadow[tid].valid ||
2134 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
2135 continue;
2136
2137 pe->index = tid;
2138 mvpp2_prs_hw_read(priv, pe);
2139
2140 match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
2141 && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
2142
2143 if (!match)
2144 continue;
2145
2146 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
2147 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
2148 return pe;
2149 }
2150 kfree(pe);
2151
2152 return NULL;
2153}
2154
2155/* Add or update double vlan entry */
2156static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
2157 unsigned short tpid2,
2158 unsigned int port_map)
2159{
2160 struct mvpp2_prs_entry *pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302161 int tid_aux, tid, ai, ret = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002162
2163 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
2164
2165 if (!pe) {
2166 /* Create new tcam entry */
2167 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2168 MVPP2_PE_LAST_FREE_TID);
2169 if (tid < 0)
2170 return tid;
2171
2172 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2173 if (!pe)
2174 return -ENOMEM;
2175
2176 /* Set ai value for new double vlan entry */
2177 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302178 if (ai < 0) {
2179 ret = ai;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002180 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302181 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002182
2183 /* Get first single/triple vlan tid */
2184 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
2185 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
2186 unsigned int ri_bits;
2187
2188 if (!priv->prs_shadow[tid_aux].valid ||
2189 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
2190 continue;
2191
2192 pe->index = tid_aux;
2193 mvpp2_prs_hw_read(priv, pe);
2194 ri_bits = mvpp2_prs_sram_ri_get(pe);
2195 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
2196 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
2197 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
2198 break;
2199 }
2200
Sudip Mukherjee43737472014-11-01 16:59:34 +05302201 if (tid >= tid_aux) {
2202 ret = -ERANGE;
Markus Elfringc9a7e122017-04-17 13:03:49 +02002203 goto free_pe;
Sudip Mukherjee43737472014-11-01 16:59:34 +05302204 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03002205
Markus Elfringbd6aaf52017-04-17 10:40:32 +02002206 memset(pe, 0, sizeof(*pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002207 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
2208 pe->index = tid;
2209
2210 priv->prs_double_vlans[ai] = true;
2211
2212 mvpp2_prs_match_etype(pe, 0, tpid1);
2213 mvpp2_prs_match_etype(pe, 4, tpid2);
2214
2215 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
2216 /* Shift 8 bytes - skip 2 vlan tags */
2217 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
2218 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2219 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2220 MVPP2_PRS_RI_VLAN_MASK);
2221 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
2222 MVPP2_PRS_SRAM_AI_MASK);
2223
2224 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
2225 }
2226
2227 /* Update ports' mask */
2228 mvpp2_prs_tcam_port_map_set(pe, port_map);
2229 mvpp2_prs_hw_write(priv, pe);
Markus Elfringc9a7e122017-04-17 13:03:49 +02002230free_pe:
Marcin Wojtas3f518502014-07-10 16:52:13 -03002231 kfree(pe);
Sudip Mukherjee43737472014-11-01 16:59:34 +05302232 return ret;
Marcin Wojtas3f518502014-07-10 16:52:13 -03002233}
2234
2235/* IPv4 header parsing for fragmentation and L4 offset */
2236static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2237 unsigned int ri, unsigned int ri_mask)
2238{
2239 struct mvpp2_prs_entry pe;
2240 int tid;
2241
2242 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2243 (proto != IPPROTO_IGMP))
2244 return -EINVAL;
2245
2246 /* Fragmented packet */
2247 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2248 MVPP2_PE_LAST_FREE_TID);
2249 if (tid < 0)
2250 return tid;
2251
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002252 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002253 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2254 pe.index = tid;
2255
2256 /* Set next lu to IPv4 */
2257 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2258 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2259 /* Set L4 offset */
2260 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2261 sizeof(struct iphdr) - 4,
2262 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2263 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2264 MVPP2_PRS_IPV4_DIP_AI_BIT);
2265 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
2266 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2267
2268 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2269 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
2270 /* Unmask all ports */
2271 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2272
2273 /* Update shadow table and hw entry */
2274 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2275 mvpp2_prs_hw_write(priv, &pe);
2276
2277 /* Not fragmented packet */
2278 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2279 MVPP2_PE_LAST_FREE_TID);
2280 if (tid < 0)
2281 return tid;
2282
2283 pe.index = tid;
2284 /* Clear ri before updating */
2285 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2286 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2287 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2288
2289 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
2290 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
2291
2292 /* Update shadow table and hw entry */
2293 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2294 mvpp2_prs_hw_write(priv, &pe);
2295
2296 return 0;
2297}
2298
2299/* IPv4 L3 multicast or broadcast */
2300static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
2301{
2302 struct mvpp2_prs_entry pe;
2303 int mask, tid;
2304
2305 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2306 MVPP2_PE_LAST_FREE_TID);
2307 if (tid < 0)
2308 return tid;
2309
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002310 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002311 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
2312 pe.index = tid;
2313
2314 switch (l3_cast) {
2315 case MVPP2_PRS_L3_MULTI_CAST:
2316 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
2317 MVPP2_PRS_IPV4_MC_MASK);
2318 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2319 MVPP2_PRS_RI_L3_ADDR_MASK);
2320 break;
2321 case MVPP2_PRS_L3_BROAD_CAST:
2322 mask = MVPP2_PRS_IPV4_BC_MASK;
2323 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
2324 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
2325 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
2326 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
2327 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
2328 MVPP2_PRS_RI_L3_ADDR_MASK);
2329 break;
2330 default:
2331 return -EINVAL;
2332 }
2333
2334 /* Finished: go to flowid generation */
2335 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2336 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2337
2338 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2339 MVPP2_PRS_IPV4_DIP_AI_BIT);
2340 /* Unmask all ports */
2341 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2342
2343 /* Update shadow table and hw entry */
2344 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2345 mvpp2_prs_hw_write(priv, &pe);
2346
2347 return 0;
2348}
2349
2350/* Set entries for protocols over IPv6 */
2351static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
2352 unsigned int ri, unsigned int ri_mask)
2353{
2354 struct mvpp2_prs_entry pe;
2355 int tid;
2356
2357 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
2358 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
2359 return -EINVAL;
2360
2361 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2362 MVPP2_PE_LAST_FREE_TID);
2363 if (tid < 0)
2364 return tid;
2365
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002366 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002367 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2368 pe.index = tid;
2369
2370 /* Finished: go to flowid generation */
2371 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2372 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2373 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2374 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
2375 sizeof(struct ipv6hdr) - 6,
2376 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2377
2378 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2379 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2380 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2381 /* Unmask all ports */
2382 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2383
2384 /* Write HW */
2385 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2386 mvpp2_prs_hw_write(priv, &pe);
2387
2388 return 0;
2389}
2390
2391/* IPv6 L3 multicast entry */
2392static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
2393{
2394 struct mvpp2_prs_entry pe;
2395 int tid;
2396
2397 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
2398 return -EINVAL;
2399
2400 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2401 MVPP2_PE_LAST_FREE_TID);
2402 if (tid < 0)
2403 return tid;
2404
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002405 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002406 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
2407 pe.index = tid;
2408
2409 /* Finished: go to flowid generation */
2410 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2411 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
2412 MVPP2_PRS_RI_L3_ADDR_MASK);
2413 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
2414 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2415 /* Shift back to IPv6 NH */
2416 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2417
2418 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
2419 MVPP2_PRS_IPV6_MC_MASK);
2420 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
2421 /* Unmask all ports */
2422 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2423
2424 /* Update shadow table and hw entry */
2425 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
2426 mvpp2_prs_hw_write(priv, &pe);
2427
2428 return 0;
2429}
2430
2431/* Parser per-port initialization */
2432static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
2433 int lu_max, int offset)
2434{
2435 u32 val;
2436
2437 /* Set lookup ID */
2438 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
2439 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
2440 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
2441 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
2442
2443 /* Set maximum number of loops for packet received from port */
2444 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
2445 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
2446 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
2447 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
2448
2449 /* Set initial offset for packet header extraction for the first
2450 * searching loop
2451 */
2452 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
2453 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
2454 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
2455 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
2456}
2457
2458/* Default flow entries initialization for all ports */
2459static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
2460{
2461 struct mvpp2_prs_entry pe;
2462 int port;
2463
2464 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002465 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002466 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2467 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
2468
2469 /* Mask all ports */
2470 mvpp2_prs_tcam_port_map_set(&pe, 0);
2471
2472 /* Set flow ID*/
2473 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
2474 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2475
2476 /* Update shadow table and hw entry */
2477 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2478 mvpp2_prs_hw_write(priv, &pe);
2479 }
2480}
2481
2482/* Set default entry for Marvell Header field */
2483static void mvpp2_prs_mh_init(struct mvpp2 *priv)
2484{
2485 struct mvpp2_prs_entry pe;
2486
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002487 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002488
2489 pe.index = MVPP2_PE_MH_DEFAULT;
2490 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
2491 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
2492 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2493 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
2494
2495 /* Unmask all ports */
2496 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2497
2498 /* Update shadow table and hw entry */
2499 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
2500 mvpp2_prs_hw_write(priv, &pe);
2501}
2502
2503/* Set default entires (place holder) for promiscuous, non-promiscuous and
2504 * multicast MAC addresses
2505 */
2506static void mvpp2_prs_mac_init(struct mvpp2 *priv)
2507{
2508 struct mvpp2_prs_entry pe;
2509
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002510 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002511
2512 /* Non-promiscuous mode for all ports - DROP unknown packets */
2513 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
2514 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2515
2516 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2517 MVPP2_PRS_RI_DROP_MASK);
2518 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2519 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2520
2521 /* Unmask all ports */
2522 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2523
2524 /* Update shadow table and hw entry */
2525 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2526 mvpp2_prs_hw_write(priv, &pe);
2527
2528 /* place holders only - no ports */
2529 mvpp2_prs_mac_drop_all_set(priv, 0, false);
2530 mvpp2_prs_mac_promisc_set(priv, 0, false);
2531 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
2532 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
2533}
2534
2535/* Set default entries for various types of dsa packets */
2536static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
2537{
2538 struct mvpp2_prs_entry pe;
2539
2540 /* None tagged EDSA entry - place holder */
2541 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2542 MVPP2_PRS_EDSA);
2543
2544 /* Tagged EDSA entry - place holder */
2545 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2546
2547 /* None tagged DSA entry - place holder */
2548 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
2549 MVPP2_PRS_DSA);
2550
2551 /* Tagged DSA entry - place holder */
2552 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2553
2554 /* None tagged EDSA ethertype entry - place holder*/
2555 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2556 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2557
2558 /* Tagged EDSA ethertype entry - place holder*/
2559 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
2560 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2561
2562 /* None tagged DSA ethertype entry */
2563 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2564 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2565
2566 /* Tagged DSA ethertype entry */
2567 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
2568 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2569
2570 /* Set default entry, in case DSA or EDSA tag not found */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002571 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002572 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
2573 pe.index = MVPP2_PE_DSA_DEFAULT;
2574 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2575
2576 /* Shift 0 bytes */
2577 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2578 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2579
2580 /* Clear all sram ai bits for next iteration */
2581 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2582
2583 /* Unmask all ports */
2584 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2585
2586 mvpp2_prs_hw_write(priv, &pe);
2587}
2588
2589/* Match basic ethertypes */
2590static int mvpp2_prs_etype_init(struct mvpp2 *priv)
2591{
2592 struct mvpp2_prs_entry pe;
2593 int tid;
2594
2595 /* Ethertype: PPPoE */
2596 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2597 MVPP2_PE_LAST_FREE_TID);
2598 if (tid < 0)
2599 return tid;
2600
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002601 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002602 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2603 pe.index = tid;
2604
2605 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
2606
2607 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2608 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2609 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2610 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2611 MVPP2_PRS_RI_PPPOE_MASK);
2612
2613 /* Update shadow table and hw entry */
2614 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2615 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2616 priv->prs_shadow[pe.index].finish = false;
2617 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2618 MVPP2_PRS_RI_PPPOE_MASK);
2619 mvpp2_prs_hw_write(priv, &pe);
2620
2621 /* Ethertype: ARP */
2622 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2623 MVPP2_PE_LAST_FREE_TID);
2624 if (tid < 0)
2625 return tid;
2626
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002627 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002628 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2629 pe.index = tid;
2630
2631 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
2632
2633 /* Generate flow in the next iteration*/
2634 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2635 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2636 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2637 MVPP2_PRS_RI_L3_PROTO_MASK);
2638 /* Set L3 offset */
2639 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2640 MVPP2_ETH_TYPE_LEN,
2641 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2642
2643 /* Update shadow table and hw entry */
2644 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2645 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2646 priv->prs_shadow[pe.index].finish = true;
2647 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2648 MVPP2_PRS_RI_L3_PROTO_MASK);
2649 mvpp2_prs_hw_write(priv, &pe);
2650
2651 /* Ethertype: LBTD */
2652 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2653 MVPP2_PE_LAST_FREE_TID);
2654 if (tid < 0)
2655 return tid;
2656
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002657 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002658 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2659 pe.index = tid;
2660
2661 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2662
2663 /* Generate flow in the next iteration*/
2664 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2665 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2666 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2667 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2668 MVPP2_PRS_RI_CPU_CODE_MASK |
2669 MVPP2_PRS_RI_UDF3_MASK);
2670 /* Set L3 offset */
2671 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2672 MVPP2_ETH_TYPE_LEN,
2673 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2674
2675 /* Update shadow table and hw entry */
2676 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2677 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2678 priv->prs_shadow[pe.index].finish = true;
2679 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2680 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2681 MVPP2_PRS_RI_CPU_CODE_MASK |
2682 MVPP2_PRS_RI_UDF3_MASK);
2683 mvpp2_prs_hw_write(priv, &pe);
2684
2685 /* Ethertype: IPv4 without options */
2686 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2687 MVPP2_PE_LAST_FREE_TID);
2688 if (tid < 0)
2689 return tid;
2690
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002691 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002692 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2693 pe.index = tid;
2694
2695 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
2696 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2697 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2698 MVPP2_PRS_IPV4_HEAD_MASK |
2699 MVPP2_PRS_IPV4_IHL_MASK);
2700
2701 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2702 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2703 MVPP2_PRS_RI_L3_PROTO_MASK);
2704 /* Skip eth_type + 4 bytes of IP header */
2705 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2706 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2707 /* Set L3 offset */
2708 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2709 MVPP2_ETH_TYPE_LEN,
2710 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2711
2712 /* Update shadow table and hw entry */
2713 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2714 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2715 priv->prs_shadow[pe.index].finish = false;
2716 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2717 MVPP2_PRS_RI_L3_PROTO_MASK);
2718 mvpp2_prs_hw_write(priv, &pe);
2719
2720 /* Ethertype: IPv4 with options */
2721 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2722 MVPP2_PE_LAST_FREE_TID);
2723 if (tid < 0)
2724 return tid;
2725
2726 pe.index = tid;
2727
2728 /* Clear tcam data before updating */
2729 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2730 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2731
2732 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2733 MVPP2_PRS_IPV4_HEAD,
2734 MVPP2_PRS_IPV4_HEAD_MASK);
2735
2736 /* Clear ri before updating */
2737 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2738 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2739 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2740 MVPP2_PRS_RI_L3_PROTO_MASK);
2741
2742 /* Update shadow table and hw entry */
2743 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2744 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2745 priv->prs_shadow[pe.index].finish = false;
2746 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2747 MVPP2_PRS_RI_L3_PROTO_MASK);
2748 mvpp2_prs_hw_write(priv, &pe);
2749
2750 /* Ethertype: IPv6 without options */
2751 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2752 MVPP2_PE_LAST_FREE_TID);
2753 if (tid < 0)
2754 return tid;
2755
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002756 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002757 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2758 pe.index = tid;
2759
2760 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
2761
2762 /* Skip DIP of IPV6 header */
2763 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2764 MVPP2_MAX_L3_ADDR_SIZE,
2765 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2766 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2767 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2768 MVPP2_PRS_RI_L3_PROTO_MASK);
2769 /* Set L3 offset */
2770 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2771 MVPP2_ETH_TYPE_LEN,
2772 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2773
2774 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2775 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2776 priv->prs_shadow[pe.index].finish = false;
2777 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2778 MVPP2_PRS_RI_L3_PROTO_MASK);
2779 mvpp2_prs_hw_write(priv, &pe);
2780
2781 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2782 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2783 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2784 pe.index = MVPP2_PE_ETH_TYPE_UN;
2785
2786 /* Unmask all ports */
2787 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2788
2789 /* Generate flow in the next iteration*/
2790 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2791 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2792 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2793 MVPP2_PRS_RI_L3_PROTO_MASK);
2794 /* Set L3 offset even it's unknown L3 */
2795 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2796 MVPP2_ETH_TYPE_LEN,
2797 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2798
2799 /* Update shadow table and hw entry */
2800 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2801 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2802 priv->prs_shadow[pe.index].finish = true;
2803 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2804 MVPP2_PRS_RI_L3_PROTO_MASK);
2805 mvpp2_prs_hw_write(priv, &pe);
2806
2807 return 0;
2808}
2809
2810/* Configure vlan entries and detect up to 2 successive VLAN tags.
2811 * Possible options:
2812 * 0x8100, 0x88A8
2813 * 0x8100, 0x8100
2814 * 0x8100
2815 * 0x88A8
2816 */
2817static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
2818{
2819 struct mvpp2_prs_entry pe;
2820 int err;
2821
2822 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
2823 MVPP2_PRS_DBL_VLANS_MAX,
2824 GFP_KERNEL);
2825 if (!priv->prs_double_vlans)
2826 return -ENOMEM;
2827
2828 /* Double VLAN: 0x8100, 0x88A8 */
2829 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
2830 MVPP2_PRS_PORT_MASK);
2831 if (err)
2832 return err;
2833
2834 /* Double VLAN: 0x8100, 0x8100 */
2835 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
2836 MVPP2_PRS_PORT_MASK);
2837 if (err)
2838 return err;
2839
2840 /* Single VLAN: 0x88a8 */
2841 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
2842 MVPP2_PRS_PORT_MASK);
2843 if (err)
2844 return err;
2845
2846 /* Single VLAN: 0x8100 */
2847 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
2848 MVPP2_PRS_PORT_MASK);
2849 if (err)
2850 return err;
2851
2852 /* Set default double vlan entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002853 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002854 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2855 pe.index = MVPP2_PE_VLAN_DBL;
2856
2857 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2858 /* Clear ai for next iterations */
2859 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2860 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
2861 MVPP2_PRS_RI_VLAN_MASK);
2862
2863 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
2864 MVPP2_PRS_DBL_VLAN_AI_BIT);
2865 /* Unmask all ports */
2866 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2867
2868 /* Update shadow table and hw entry */
2869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2870 mvpp2_prs_hw_write(priv, &pe);
2871
2872 /* Set default vlan none entry */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002873 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002874 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
2875 pe.index = MVPP2_PE_VLAN_NONE;
2876
2877 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2878 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
2879 MVPP2_PRS_RI_VLAN_MASK);
2880
2881 /* Unmask all ports */
2882 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2883
2884 /* Update shadow table and hw entry */
2885 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
2886 mvpp2_prs_hw_write(priv, &pe);
2887
2888 return 0;
2889}
2890
2891/* Set entries for PPPoE ethertype */
2892static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
2893{
2894 struct mvpp2_prs_entry pe;
2895 int tid;
2896
2897 /* IPv4 over PPPoE with options */
2898 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2899 MVPP2_PE_LAST_FREE_TID);
2900 if (tid < 0)
2901 return tid;
2902
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002903 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002904 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2905 pe.index = tid;
2906
2907 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
2908
2909 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2910 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2911 MVPP2_PRS_RI_L3_PROTO_MASK);
2912 /* Skip eth_type + 4 bytes of IP header */
2913 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2914 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2915 /* Set L3 offset */
2916 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2917 MVPP2_ETH_TYPE_LEN,
2918 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2919
2920 /* Update shadow table and hw entry */
2921 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2922 mvpp2_prs_hw_write(priv, &pe);
2923
2924 /* IPv4 over PPPoE without options */
2925 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2926 MVPP2_PE_LAST_FREE_TID);
2927 if (tid < 0)
2928 return tid;
2929
2930 pe.index = tid;
2931
2932 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2933 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2934 MVPP2_PRS_IPV4_HEAD_MASK |
2935 MVPP2_PRS_IPV4_IHL_MASK);
2936
2937 /* Clear ri before updating */
2938 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2939 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2940 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2941 MVPP2_PRS_RI_L3_PROTO_MASK);
2942
2943 /* Update shadow table and hw entry */
2944 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2945 mvpp2_prs_hw_write(priv, &pe);
2946
2947 /* IPv6 over PPPoE */
2948 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2949 MVPP2_PE_LAST_FREE_TID);
2950 if (tid < 0)
2951 return tid;
2952
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002953 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002954 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2955 pe.index = tid;
2956
2957 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
2958
2959 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2960 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2961 MVPP2_PRS_RI_L3_PROTO_MASK);
2962 /* Skip eth_type + 4 bytes of IPv6 header */
2963 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2964 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2965 /* Set L3 offset */
2966 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2967 MVPP2_ETH_TYPE_LEN,
2968 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2969
2970 /* Update shadow table and hw entry */
2971 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2972 mvpp2_prs_hw_write(priv, &pe);
2973
2974 /* Non-IP over PPPoE */
2975 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2976 MVPP2_PE_LAST_FREE_TID);
2977 if (tid < 0)
2978 return tid;
2979
Markus Elfringc5b2ce22017-04-17 10:30:29 +02002980 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03002981 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2982 pe.index = tid;
2983
2984 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2985 MVPP2_PRS_RI_L3_PROTO_MASK);
2986
2987 /* Finished: go to flowid generation */
2988 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2989 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2990 /* Set L3 offset even if it's unknown L3 */
2991 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2992 MVPP2_ETH_TYPE_LEN,
2993 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2994
2995 /* Update shadow table and hw entry */
2996 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
2997 mvpp2_prs_hw_write(priv, &pe);
2998
2999 return 0;
3000}
3001
3002/* Initialize entries for IPv4 */
3003static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
3004{
3005 struct mvpp2_prs_entry pe;
3006 int err;
3007
3008 /* Set entries for TCP, UDP and IGMP over IPv4 */
3009 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
3010 MVPP2_PRS_RI_L4_PROTO_MASK);
3011 if (err)
3012 return err;
3013
3014 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
3015 MVPP2_PRS_RI_L4_PROTO_MASK);
3016 if (err)
3017 return err;
3018
3019 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
3020 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3021 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3022 MVPP2_PRS_RI_CPU_CODE_MASK |
3023 MVPP2_PRS_RI_UDF3_MASK);
3024 if (err)
3025 return err;
3026
3027 /* IPv4 Broadcast */
3028 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
3029 if (err)
3030 return err;
3031
3032 /* IPv4 Multicast */
3033 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3034 if (err)
3035 return err;
3036
3037 /* Default IPv4 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003038 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003039 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3040 pe.index = MVPP2_PE_IP4_PROTO_UN;
3041
3042 /* Set next lu to IPv4 */
3043 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
3044 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3045 /* Set L4 offset */
3046 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3047 sizeof(struct iphdr) - 4,
3048 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3049 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3050 MVPP2_PRS_IPV4_DIP_AI_BIT);
3051 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3052 MVPP2_PRS_RI_L4_PROTO_MASK);
3053
3054 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
3055 /* Unmask all ports */
3056 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3057
3058 /* Update shadow table and hw entry */
3059 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3060 mvpp2_prs_hw_write(priv, &pe);
3061
3062 /* Default IPv4 entry for unicast address */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003063 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003064 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
3065 pe.index = MVPP2_PE_IP4_ADDR_UN;
3066
3067 /* Finished: go to flowid generation */
3068 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3069 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3070 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3071 MVPP2_PRS_RI_L3_ADDR_MASK);
3072
3073 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
3074 MVPP2_PRS_IPV4_DIP_AI_BIT);
3075 /* Unmask all ports */
3076 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3077
3078 /* Update shadow table and hw entry */
3079 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3080 mvpp2_prs_hw_write(priv, &pe);
3081
3082 return 0;
3083}
3084
3085/* Initialize entries for IPv6 */
3086static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
3087{
3088 struct mvpp2_prs_entry pe;
3089 int tid, err;
3090
3091 /* Set entries for TCP, UDP and ICMP over IPv6 */
3092 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
3093 MVPP2_PRS_RI_L4_TCP,
3094 MVPP2_PRS_RI_L4_PROTO_MASK);
3095 if (err)
3096 return err;
3097
3098 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
3099 MVPP2_PRS_RI_L4_UDP,
3100 MVPP2_PRS_RI_L4_PROTO_MASK);
3101 if (err)
3102 return err;
3103
3104 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
3105 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
3106 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
3107 MVPP2_PRS_RI_CPU_CODE_MASK |
3108 MVPP2_PRS_RI_UDF3_MASK);
3109 if (err)
3110 return err;
3111
3112 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
3113 /* Result Info: UDF7=1, DS lite */
3114 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
3115 MVPP2_PRS_RI_UDF7_IP6_LITE,
3116 MVPP2_PRS_RI_UDF7_MASK);
3117 if (err)
3118 return err;
3119
3120 /* IPv6 multicast */
3121 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
3122 if (err)
3123 return err;
3124
3125 /* Entry for checking hop limit */
3126 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3127 MVPP2_PE_LAST_FREE_TID);
3128 if (tid < 0)
3129 return tid;
3130
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003131 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003132 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3133 pe.index = tid;
3134
3135 /* Finished: go to flowid generation */
3136 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3137 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3138 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
3139 MVPP2_PRS_RI_DROP_MASK,
3140 MVPP2_PRS_RI_L3_PROTO_MASK |
3141 MVPP2_PRS_RI_DROP_MASK);
3142
3143 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
3144 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3145 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3146
3147 /* Update shadow table and hw entry */
3148 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3149 mvpp2_prs_hw_write(priv, &pe);
3150
3151 /* Default IPv6 entry for unknown protocols */
Markus Elfringc5b2ce22017-04-17 10:30:29 +02003152 memset(&pe, 0, sizeof(pe));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003153 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3154 pe.index = MVPP2_PE_IP6_PROTO_UN;
3155
3156 /* Finished: go to flowid generation */
3157 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3158 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3159 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3160 MVPP2_PRS_RI_L4_PROTO_MASK);
3161 /* Set L4 offset relatively to our current place */
3162 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
3163 sizeof(struct ipv6hdr) - 4,
3164 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
3165
3166 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3167 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3168 /* Unmask all ports */
3169 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3170
3171 /* Update shadow table and hw entry */
3172 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3173 mvpp2_prs_hw_write(priv, &pe);
3174
3175 /* Default IPv6 entry for unknown ext protocols */
3176 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3177 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3178 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
3179
3180 /* Finished: go to flowid generation */
3181 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3182 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3183 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
3184 MVPP2_PRS_RI_L4_PROTO_MASK);
3185
3186 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
3187 MVPP2_PRS_IPV6_EXT_AI_BIT);
3188 /* Unmask all ports */
3189 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3190
3191 /* Update shadow table and hw entry */
3192 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
3193 mvpp2_prs_hw_write(priv, &pe);
3194
3195 /* Default IPv6 entry for unicast address */
3196 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
3197 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
3198 pe.index = MVPP2_PE_IP6_ADDR_UN;
3199
3200 /* Finished: go to IPv6 again */
3201 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
3202 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
3203 MVPP2_PRS_RI_L3_ADDR_MASK);
3204 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
3205 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3206 /* Shift back to IPV6 NH */
3207 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3208
3209 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
3210 /* Unmask all ports */
3211 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
3212
3213 /* Update shadow table and hw entry */
3214 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
3215 mvpp2_prs_hw_write(priv, &pe);
3216
3217 return 0;
3218}
3219
3220/* Parser default initialization */
3221static int mvpp2_prs_default_init(struct platform_device *pdev,
3222 struct mvpp2 *priv)
3223{
3224 int err, index, i;
3225
3226 /* Enable tcam table */
3227 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
3228
3229 /* Clear all tcam and sram entries */
3230 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
3231 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
3232 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3233 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
3234
3235 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
3236 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3237 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
3238 }
3239
3240 /* Invalidate all tcam entries */
3241 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
3242 mvpp2_prs_hw_inv(priv, index);
3243
3244 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
Markus Elfring37df25e2017-04-17 09:12:34 +02003245 sizeof(*priv->prs_shadow),
Marcin Wojtas3f518502014-07-10 16:52:13 -03003246 GFP_KERNEL);
3247 if (!priv->prs_shadow)
3248 return -ENOMEM;
3249
3250 /* Always start from lookup = 0 */
3251 for (index = 0; index < MVPP2_MAX_PORTS; index++)
3252 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
3253 MVPP2_PRS_PORT_LU_MAX, 0);
3254
3255 mvpp2_prs_def_flow_init(priv);
3256
3257 mvpp2_prs_mh_init(priv);
3258
3259 mvpp2_prs_mac_init(priv);
3260
3261 mvpp2_prs_dsa_init(priv);
3262
3263 err = mvpp2_prs_etype_init(priv);
3264 if (err)
3265 return err;
3266
3267 err = mvpp2_prs_vlan_init(pdev, priv);
3268 if (err)
3269 return err;
3270
3271 err = mvpp2_prs_pppoe_init(priv);
3272 if (err)
3273 return err;
3274
3275 err = mvpp2_prs_ip6_init(priv);
3276 if (err)
3277 return err;
3278
3279 err = mvpp2_prs_ip4_init(priv);
3280 if (err)
3281 return err;
3282
3283 return 0;
3284}
3285
3286/* Compare MAC DA with tcam entry data */
3287static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
3288 const u8 *da, unsigned char *mask)
3289{
3290 unsigned char tcam_byte, tcam_mask;
3291 int index;
3292
3293 for (index = 0; index < ETH_ALEN; index++) {
3294 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
3295 if (tcam_mask != mask[index])
3296 return false;
3297
3298 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
3299 return false;
3300 }
3301
3302 return true;
3303}
3304
3305/* Find tcam entry with matched pair <MAC DA, port> */
3306static struct mvpp2_prs_entry *
3307mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
3308 unsigned char *mask, int udf_type)
3309{
3310 struct mvpp2_prs_entry *pe;
3311 int tid;
3312
3313 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3314 if (!pe)
3315 return NULL;
3316 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3317
3318 /* Go through the all entires with MVPP2_PRS_LU_MAC */
3319 for (tid = MVPP2_PE_FIRST_FREE_TID;
3320 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3321 unsigned int entry_pmap;
3322
3323 if (!priv->prs_shadow[tid].valid ||
3324 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3325 (priv->prs_shadow[tid].udf != udf_type))
3326 continue;
3327
3328 pe->index = tid;
3329 mvpp2_prs_hw_read(priv, pe);
3330 entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
3331
3332 if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
3333 entry_pmap == pmap)
3334 return pe;
3335 }
3336 kfree(pe);
3337
3338 return NULL;
3339}
3340
3341/* Update parser's mac da entry */
3342static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3343 const u8 *da, bool add)
3344{
3345 struct mvpp2_prs_entry *pe;
3346 unsigned int pmap, len, ri;
3347 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3348 int tid;
3349
3350 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
3351 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
3352 MVPP2_PRS_UDF_MAC_DEF);
3353
3354 /* No such entry */
3355 if (!pe) {
3356 if (!add)
3357 return 0;
3358
3359 /* Create new TCAM entry */
3360 /* Find first range mac entry*/
3361 for (tid = MVPP2_PE_FIRST_FREE_TID;
3362 tid <= MVPP2_PE_LAST_FREE_TID; tid++)
3363 if (priv->prs_shadow[tid].valid &&
3364 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
3365 (priv->prs_shadow[tid].udf ==
3366 MVPP2_PRS_UDF_MAC_RANGE))
3367 break;
3368
3369 /* Go through the all entries from first to last */
3370 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
3371 tid - 1);
3372 if (tid < 0)
3373 return tid;
3374
3375 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3376 if (!pe)
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303377 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003378 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3379 pe->index = tid;
3380
3381 /* Mask all ports */
3382 mvpp2_prs_tcam_port_map_set(pe, 0);
3383 }
3384
3385 /* Update port mask */
3386 mvpp2_prs_tcam_port_set(pe, port, add);
3387
3388 /* Invalidate the entry if no ports are left enabled */
3389 pmap = mvpp2_prs_tcam_port_map_get(pe);
3390 if (pmap == 0) {
3391 if (add) {
3392 kfree(pe);
Amitoj Kaur Chawlac2bb7bc2016-02-04 19:25:26 +05303393 return -EINVAL;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003394 }
3395 mvpp2_prs_hw_inv(priv, pe->index);
3396 priv->prs_shadow[pe->index].valid = false;
3397 kfree(pe);
3398 return 0;
3399 }
3400
3401 /* Continue - set next lookup */
3402 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
3403
3404 /* Set match on DA */
3405 len = ETH_ALEN;
3406 while (len--)
3407 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
3408
3409 /* Set result info bits */
3410 if (is_broadcast_ether_addr(da))
3411 ri = MVPP2_PRS_RI_L2_BCAST;
3412 else if (is_multicast_ether_addr(da))
3413 ri = MVPP2_PRS_RI_L2_MCAST;
3414 else
3415 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
3416
3417 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3418 MVPP2_PRS_RI_MAC_ME_MASK);
3419 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
3420 MVPP2_PRS_RI_MAC_ME_MASK);
3421
3422 /* Shift to ethertype */
3423 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
3424 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3425
3426 /* Update shadow table and hw entry */
3427 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
3428 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
3429 mvpp2_prs_hw_write(priv, pe);
3430
3431 kfree(pe);
3432
3433 return 0;
3434}
3435
3436static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
3437{
3438 struct mvpp2_port *port = netdev_priv(dev);
3439 int err;
3440
3441 /* Remove old parser entry */
3442 err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
3443 false);
3444 if (err)
3445 return err;
3446
3447 /* Add new parser entry */
3448 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
3449 if (err)
3450 return err;
3451
3452 /* Set addr in the device */
3453 ether_addr_copy(dev->dev_addr, da);
3454
3455 return 0;
3456}
3457
3458/* Delete all port's multicast simple (not range) entries */
3459static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
3460{
3461 struct mvpp2_prs_entry pe;
3462 int index, tid;
3463
3464 for (tid = MVPP2_PE_FIRST_FREE_TID;
3465 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3466 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
3467
3468 if (!priv->prs_shadow[tid].valid ||
3469 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
3470 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
3471 continue;
3472
3473 /* Only simple mac entries */
3474 pe.index = tid;
3475 mvpp2_prs_hw_read(priv, &pe);
3476
3477 /* Read mac addr from entry */
3478 for (index = 0; index < ETH_ALEN; index++)
3479 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
3480 &da_mask[index]);
3481
3482 if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
3483 /* Delete this entry */
3484 mvpp2_prs_mac_da_accept(priv, port, da, false);
3485 }
3486}
3487
3488static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
3489{
3490 switch (type) {
3491 case MVPP2_TAG_TYPE_EDSA:
3492 /* Add port to EDSA entries */
3493 mvpp2_prs_dsa_tag_set(priv, port, true,
3494 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3495 mvpp2_prs_dsa_tag_set(priv, port, true,
3496 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3497 /* Remove port from DSA entries */
3498 mvpp2_prs_dsa_tag_set(priv, port, false,
3499 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3500 mvpp2_prs_dsa_tag_set(priv, port, false,
3501 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3502 break;
3503
3504 case MVPP2_TAG_TYPE_DSA:
3505 /* Add port to DSA entries */
3506 mvpp2_prs_dsa_tag_set(priv, port, true,
3507 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3508 mvpp2_prs_dsa_tag_set(priv, port, true,
3509 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3510 /* Remove port from EDSA entries */
3511 mvpp2_prs_dsa_tag_set(priv, port, false,
3512 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3513 mvpp2_prs_dsa_tag_set(priv, port, false,
3514 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3515 break;
3516
3517 case MVPP2_TAG_TYPE_MH:
3518 case MVPP2_TAG_TYPE_NONE:
3519 /* Remove port form EDSA and DSA entries */
3520 mvpp2_prs_dsa_tag_set(priv, port, false,
3521 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
3522 mvpp2_prs_dsa_tag_set(priv, port, false,
3523 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
3524 mvpp2_prs_dsa_tag_set(priv, port, false,
3525 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
3526 mvpp2_prs_dsa_tag_set(priv, port, false,
3527 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
3528 break;
3529
3530 default:
3531 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
3532 return -EINVAL;
3533 }
3534
3535 return 0;
3536}
3537
3538/* Set prs flow for the port */
3539static int mvpp2_prs_def_flow(struct mvpp2_port *port)
3540{
3541 struct mvpp2_prs_entry *pe;
3542 int tid;
3543
3544 pe = mvpp2_prs_flow_find(port->priv, port->id);
3545
3546 /* Such entry not exist */
3547 if (!pe) {
3548 /* Go through the all entires from last to first */
3549 tid = mvpp2_prs_tcam_first_free(port->priv,
3550 MVPP2_PE_LAST_FREE_TID,
3551 MVPP2_PE_FIRST_FREE_TID);
3552 if (tid < 0)
3553 return tid;
3554
3555 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3556 if (!pe)
3557 return -ENOMEM;
3558
3559 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
3560 pe->index = tid;
3561
3562 /* Set flow ID*/
3563 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
3564 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
3565
3566 /* Update shadow table */
3567 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
3568 }
3569
3570 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
3571 mvpp2_prs_hw_write(port->priv, pe);
3572 kfree(pe);
3573
3574 return 0;
3575}
3576
3577/* Classifier configuration routines */
3578
3579/* Update classification flow table registers */
3580static void mvpp2_cls_flow_write(struct mvpp2 *priv,
3581 struct mvpp2_cls_flow_entry *fe)
3582{
3583 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
3584 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
3585 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
3586 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
3587}
3588
3589/* Update classification lookup table register */
3590static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
3591 struct mvpp2_cls_lookup_entry *le)
3592{
3593 u32 val;
3594
3595 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
3596 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
3597 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
3598}
3599
3600/* Classifier default initialization */
3601static void mvpp2_cls_init(struct mvpp2 *priv)
3602{
3603 struct mvpp2_cls_lookup_entry le;
3604 struct mvpp2_cls_flow_entry fe;
3605 int index;
3606
3607 /* Enable classifier */
3608 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
3609
3610 /* Clear classifier flow table */
Arnd Bergmanne8f967c2016-11-24 17:28:12 +01003611 memset(&fe.data, 0, sizeof(fe.data));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003612 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
3613 fe.index = index;
3614 mvpp2_cls_flow_write(priv, &fe);
3615 }
3616
3617 /* Clear classifier lookup table */
3618 le.data = 0;
3619 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
3620 le.lkpid = index;
3621 le.way = 0;
3622 mvpp2_cls_lookup_write(priv, &le);
3623
3624 le.way = 1;
3625 mvpp2_cls_lookup_write(priv, &le);
3626 }
3627}
3628
3629static void mvpp2_cls_port_config(struct mvpp2_port *port)
3630{
3631 struct mvpp2_cls_lookup_entry le;
3632 u32 val;
3633
3634 /* Set way for the port */
3635 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
3636 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
3637 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
3638
3639 /* Pick the entry to be accessed in lookup ID decoding table
3640 * according to the way and lkpid.
3641 */
3642 le.lkpid = port->id;
3643 le.way = 0;
3644 le.data = 0;
3645
3646 /* Set initial CPU queue for receiving packets */
3647 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
3648 le.data |= port->first_rxq;
3649
3650 /* Disable classification engines */
3651 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
3652
3653 /* Update lookup ID table entry */
3654 mvpp2_cls_lookup_write(port->priv, &le);
3655}
3656
3657/* Set CPU queue number for oversize packets */
3658static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
3659{
3660 u32 val;
3661
3662 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
3663 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
3664
3665 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
3666 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
3667
3668 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
3669 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
3670 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
3671}
3672
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003673static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
3674{
3675 if (likely(pool->frag_size <= PAGE_SIZE))
3676 return netdev_alloc_frag(pool->frag_size);
3677 else
3678 return kmalloc(pool->frag_size, GFP_ATOMIC);
3679}
3680
3681static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
3682{
3683 if (likely(pool->frag_size <= PAGE_SIZE))
3684 skb_free_frag(data);
3685 else
3686 kfree(data);
3687}
3688
Marcin Wojtas3f518502014-07-10 16:52:13 -03003689/* Buffer Manager configuration routines */
3690
3691/* Create pool */
3692static int mvpp2_bm_pool_create(struct platform_device *pdev,
3693 struct mvpp2 *priv,
3694 struct mvpp2_bm_pool *bm_pool, int size)
3695{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003696 u32 val;
3697
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003698 /* Number of buffer pointers must be a multiple of 16, as per
3699 * hardware constraints
3700 */
3701 if (!IS_ALIGNED(size, 16))
3702 return -EINVAL;
3703
3704 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
3705 * bytes per buffer pointer
3706 */
3707 if (priv->hw_version == MVPP21)
3708 bm_pool->size_bytes = 2 * sizeof(u32) * size;
3709 else
3710 bm_pool->size_bytes = 2 * sizeof(u64) * size;
3711
3712 bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, bm_pool->size_bytes,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003713 &bm_pool->dma_addr,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003714 GFP_KERNEL);
3715 if (!bm_pool->virt_addr)
3716 return -ENOMEM;
3717
Thomas Petazzonid3158802017-02-21 11:28:13 +01003718 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
3719 MVPP2_BM_POOL_PTR_ALIGN)) {
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003720 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
3721 bm_pool->virt_addr, bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003722 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
3723 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
3724 return -ENOMEM;
3725 }
3726
3727 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003728 lower_32_bits(bm_pool->dma_addr));
Marcin Wojtas3f518502014-07-10 16:52:13 -03003729 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
3730
3731 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3732 val |= MVPP2_BM_START_MASK;
3733 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3734
3735 bm_pool->type = MVPP2_BM_FREE;
3736 bm_pool->size = size;
3737 bm_pool->pkt_size = 0;
3738 bm_pool->buf_num = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003739
3740 return 0;
3741}
3742
3743/* Set pool buffer size */
3744static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
3745 struct mvpp2_bm_pool *bm_pool,
3746 int buf_size)
3747{
3748 u32 val;
3749
3750 bm_pool->buf_size = buf_size;
3751
3752 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
3753 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
3754}
3755
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003756static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
3757 struct mvpp2_bm_pool *bm_pool,
3758 dma_addr_t *dma_addr,
3759 phys_addr_t *phys_addr)
3760{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003761 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01003762
3763 *dma_addr = mvpp2_percpu_read(priv, cpu,
3764 MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
3765 *phys_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_VIRT_ALLOC_REG);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003766
3767 if (priv->hw_version == MVPP22) {
3768 u32 val;
3769 u32 dma_addr_highbits, phys_addr_highbits;
3770
Thomas Petazzonia7868412017-03-07 16:53:13 +01003771 val = mvpp2_percpu_read(priv, cpu, MVPP22_BM_ADDR_HIGH_ALLOC);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003772 dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
3773 phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
3774 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
3775
3776 if (sizeof(dma_addr_t) == 8)
3777 *dma_addr |= (u64)dma_addr_highbits << 32;
3778
3779 if (sizeof(phys_addr_t) == 8)
3780 *phys_addr |= (u64)phys_addr_highbits << 32;
3781 }
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003782
3783 put_cpu();
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003784}
3785
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003786/* Free all buffers from the pool */
Marcin Wojtas4229d502015-12-03 15:20:50 +01003787static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
3788 struct mvpp2_bm_pool *bm_pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003789{
3790 int i;
3791
Ezequiel Garcia7861f122014-07-21 13:48:14 -03003792 for (i = 0; i < bm_pool->buf_num; i++) {
Thomas Petazzoni20396132017-03-07 16:53:00 +01003793 dma_addr_t buf_dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003794 phys_addr_t buf_phys_addr;
3795 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003796
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003797 mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
3798 &buf_dma_addr, &buf_phys_addr);
Marcin Wojtas4229d502015-12-03 15:20:50 +01003799
Thomas Petazzoni20396132017-03-07 16:53:00 +01003800 dma_unmap_single(dev, buf_dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01003801 bm_pool->buf_size, DMA_FROM_DEVICE);
3802
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003803 data = (void *)phys_to_virt(buf_phys_addr);
3804 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003805 break;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003806
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003807 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003808 }
3809
3810 /* Update BM driver with number of buffers removed from pool */
3811 bm_pool->buf_num -= i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003812}
3813
3814/* Cleanup pool */
3815static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
3816 struct mvpp2 *priv,
3817 struct mvpp2_bm_pool *bm_pool)
3818{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003819 u32 val;
3820
Marcin Wojtas4229d502015-12-03 15:20:50 +01003821 mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03003822 if (bm_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03003823 WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
3824 return 0;
3825 }
3826
3827 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
3828 val |= MVPP2_BM_STOP_MASK;
3829 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
3830
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003831 dma_free_coherent(&pdev->dev, bm_pool->size_bytes,
Marcin Wojtas3f518502014-07-10 16:52:13 -03003832 bm_pool->virt_addr,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003833 bm_pool->dma_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003834 return 0;
3835}
3836
3837static int mvpp2_bm_pools_init(struct platform_device *pdev,
3838 struct mvpp2 *priv)
3839{
3840 int i, err, size;
3841 struct mvpp2_bm_pool *bm_pool;
3842
3843 /* Create all pools with maximum size */
3844 size = MVPP2_BM_POOL_SIZE_MAX;
3845 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3846 bm_pool = &priv->bm_pools[i];
3847 bm_pool->id = i;
3848 err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
3849 if (err)
3850 goto err_unroll_pools;
3851 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
3852 }
3853 return 0;
3854
3855err_unroll_pools:
3856 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
3857 for (i = i - 1; i >= 0; i--)
3858 mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
3859 return err;
3860}
3861
3862static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
3863{
3864 int i, err;
3865
3866 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
3867 /* Mask BM all interrupts */
3868 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
3869 /* Clear BM cause register */
3870 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
3871 }
3872
3873 /* Allocate and initialize BM pools */
3874 priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
Markus Elfring81f915e2017-04-17 09:06:33 +02003875 sizeof(*priv->bm_pools), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003876 if (!priv->bm_pools)
3877 return -ENOMEM;
3878
3879 err = mvpp2_bm_pools_init(pdev, priv);
3880 if (err < 0)
3881 return err;
3882 return 0;
3883}
3884
3885/* Attach long pool to rxq */
3886static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
3887 int lrxq, int long_pool)
3888{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003889 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003890 int prxq;
3891
3892 /* Get queue physical ID */
3893 prxq = port->rxqs[lrxq]->id;
3894
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003895 if (port->priv->hw_version == MVPP21)
3896 mask = MVPP21_RXQ_POOL_LONG_MASK;
3897 else
3898 mask = MVPP22_RXQ_POOL_LONG_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003899
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003900 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3901 val &= ~mask;
3902 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003903 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3904}
3905
3906/* Attach short pool to rxq */
3907static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
3908 int lrxq, int short_pool)
3909{
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003910 u32 val, mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003911 int prxq;
3912
3913 /* Get queue physical ID */
3914 prxq = port->rxqs[lrxq]->id;
3915
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003916 if (port->priv->hw_version == MVPP21)
3917 mask = MVPP21_RXQ_POOL_SHORT_MASK;
3918 else
3919 mask = MVPP22_RXQ_POOL_SHORT_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003920
Thomas Petazzoni5eac8922017-03-07 16:53:10 +01003921 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3922 val &= ~mask;
3923 val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003924 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3925}
3926
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003927static void *mvpp2_buf_alloc(struct mvpp2_port *port,
3928 struct mvpp2_bm_pool *bm_pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003929 dma_addr_t *buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003930 phys_addr_t *buf_phys_addr,
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003931 gfp_t gfp_mask)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003932{
Thomas Petazzoni20396132017-03-07 16:53:00 +01003933 dma_addr_t dma_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003934 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003935
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003936 data = mvpp2_frag_alloc(bm_pool);
3937 if (!data)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003938 return NULL;
3939
Thomas Petazzoni20396132017-03-07 16:53:00 +01003940 dma_addr = dma_map_single(port->dev->dev.parent, data,
3941 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
3942 DMA_FROM_DEVICE);
3943 if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003944 mvpp2_frag_free(bm_pool, data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003945 return NULL;
3946 }
Thomas Petazzoni20396132017-03-07 16:53:00 +01003947 *buf_dma_addr = dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003948 *buf_phys_addr = virt_to_phys(data);
Marcin Wojtas3f518502014-07-10 16:52:13 -03003949
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003950 return data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003951}
3952
Marcin Wojtas3f518502014-07-10 16:52:13 -03003953/* Release buffer to BM */
3954static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
Thomas Petazzoni20396132017-03-07 16:53:00 +01003955 dma_addr_t buf_dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003956 phys_addr_t buf_phys_addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03003957{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003958 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01003959
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003960 if (port->priv->hw_version == MVPP22) {
3961 u32 val = 0;
3962
3963 if (sizeof(dma_addr_t) == 8)
3964 val |= upper_32_bits(buf_dma_addr) &
3965 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
3966
3967 if (sizeof(phys_addr_t) == 8)
3968 val |= (upper_32_bits(buf_phys_addr)
3969 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
3970 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
3971
Thomas Petazzonia7868412017-03-07 16:53:13 +01003972 mvpp2_percpu_write(port->priv, cpu,
3973 MVPP22_BM_ADDR_HIGH_RLS_REG, val);
Thomas Petazzonid01524d2017-03-07 16:53:09 +01003974 }
3975
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003976 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
3977 * returned in the "cookie" field of the RX
3978 * descriptor. Instead of storing the virtual address, we
3979 * store the physical address
3980 */
Thomas Petazzonia7868412017-03-07 16:53:13 +01003981 mvpp2_percpu_write(port->priv, cpu,
3982 MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
3983 mvpp2_percpu_write(port->priv, cpu,
3984 MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02003985
3986 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03003987}
3988
Marcin Wojtas3f518502014-07-10 16:52:13 -03003989/* Allocate buffers for the pool */
3990static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
3991 struct mvpp2_bm_pool *bm_pool, int buf_num)
3992{
Marcin Wojtas3f518502014-07-10 16:52:13 -03003993 int i, buf_size, total_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01003994 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01003995 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01003996 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03003997
3998 buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
3999 total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
4000
4001 if (buf_num < 0 ||
4002 (buf_num + bm_pool->buf_num > bm_pool->size)) {
4003 netdev_err(port->dev,
4004 "cannot allocate %d buffers for pool %d\n",
4005 buf_num, bm_pool->id);
4006 return 0;
4007 }
4008
Marcin Wojtas3f518502014-07-10 16:52:13 -03004009 for (i = 0; i < buf_num; i++) {
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004010 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
4011 &phys_addr, GFP_KERNEL);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004012 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004013 break;
4014
Thomas Petazzoni20396132017-03-07 16:53:00 +01004015 mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01004016 phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004017 }
4018
4019 /* Update BM driver with number of buffers added to pool */
4020 bm_pool->buf_num += i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004021
4022 netdev_dbg(port->dev,
4023 "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
4024 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4025 bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
4026
4027 netdev_dbg(port->dev,
4028 "%s pool %d: %d of %d buffers added\n",
4029 bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
4030 bm_pool->id, i, buf_num);
4031 return i;
4032}
4033
4034/* Notify the driver that BM pool is being used as specific type and return the
4035 * pool pointer on success
4036 */
4037static struct mvpp2_bm_pool *
4038mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
4039 int pkt_size)
4040{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004041 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
4042 int num;
4043
4044 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
4045 netdev_err(port->dev, "mixing pool types is forbidden\n");
4046 return NULL;
4047 }
4048
Marcin Wojtas3f518502014-07-10 16:52:13 -03004049 if (new_pool->type == MVPP2_BM_FREE)
4050 new_pool->type = type;
4051
4052 /* Allocate buffers in case BM pool is used as long pool, but packet
4053 * size doesn't match MTU or BM pool hasn't being used yet
4054 */
4055 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
4056 (new_pool->pkt_size == 0)) {
4057 int pkts_num;
4058
4059 /* Set default buffer number or free all the buffers in case
4060 * the pool is not empty
4061 */
4062 pkts_num = new_pool->buf_num;
4063 if (pkts_num == 0)
4064 pkts_num = type == MVPP2_BM_SWF_LONG ?
4065 MVPP2_BM_LONG_BUF_NUM :
4066 MVPP2_BM_SHORT_BUF_NUM;
4067 else
Marcin Wojtas4229d502015-12-03 15:20:50 +01004068 mvpp2_bm_bufs_free(port->dev->dev.parent,
4069 port->priv, new_pool);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004070
4071 new_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004072 new_pool->frag_size =
4073 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4074 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004075
4076 /* Allocate buffers for this pool */
4077 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
4078 if (num != pkts_num) {
4079 WARN(1, "pool %d: %d of %d allocated\n",
4080 new_pool->id, num, pkts_num);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004081 return NULL;
4082 }
4083 }
4084
4085 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
4086 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
4087
Marcin Wojtas3f518502014-07-10 16:52:13 -03004088 return new_pool;
4089}
4090
4091/* Initialize pools for swf */
4092static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
4093{
Marcin Wojtas3f518502014-07-10 16:52:13 -03004094 int rxq;
4095
4096 if (!port->pool_long) {
4097 port->pool_long =
4098 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
4099 MVPP2_BM_SWF_LONG,
4100 port->pkt_size);
4101 if (!port->pool_long)
4102 return -ENOMEM;
4103
Marcin Wojtas3f518502014-07-10 16:52:13 -03004104 port->pool_long->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004105
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004106 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004107 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
4108 }
4109
4110 if (!port->pool_short) {
4111 port->pool_short =
4112 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
4113 MVPP2_BM_SWF_SHORT,
4114 MVPP2_BM_SHORT_PKT_SIZE);
4115 if (!port->pool_short)
4116 return -ENOMEM;
4117
Marcin Wojtas3f518502014-07-10 16:52:13 -03004118 port->pool_short->port_map |= (1 << port->id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004119
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004120 for (rxq = 0; rxq < port->nrxqs; rxq++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004121 mvpp2_rxq_short_pool_set(port, rxq,
4122 port->pool_short->id);
4123 }
4124
4125 return 0;
4126}
4127
4128static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
4129{
4130 struct mvpp2_port *port = netdev_priv(dev);
4131 struct mvpp2_bm_pool *port_pool = port->pool_long;
4132 int num, pkts_num = port_pool->buf_num;
4133 int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
4134
4135 /* Update BM pool with new buffer size */
Marcin Wojtas4229d502015-12-03 15:20:50 +01004136 mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
Ezequiel Garciad74c96c2014-07-21 13:48:13 -03004137 if (port_pool->buf_num) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004138 WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
4139 return -EIO;
4140 }
4141
4142 port_pool->pkt_size = pkt_size;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01004143 port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
4144 MVPP2_SKB_SHINFO_SIZE;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004145 num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
4146 if (num != pkts_num) {
4147 WARN(1, "pool %d: %d of %d allocated\n",
4148 port_pool->id, num, pkts_num);
4149 return -EIO;
4150 }
4151
4152 mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
4153 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
4154 dev->mtu = mtu;
4155 netdev_update_features(dev);
4156 return 0;
4157}
4158
4159static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
4160{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004161 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004162
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004163 for (i = 0; i < port->nqvecs; i++)
4164 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4165
Marcin Wojtas3f518502014-07-10 16:52:13 -03004166 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004167 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004168}
4169
4170static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
4171{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004172 int i, sw_thread_mask = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004173
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004174 for (i = 0; i < port->nqvecs; i++)
4175 sw_thread_mask |= port->qvecs[i].sw_thread_mask;
4176
Marcin Wojtas3f518502014-07-10 16:52:13 -03004177 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02004178 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
4179}
4180
4181static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
4182{
4183 struct mvpp2_port *port = qvec->port;
4184
4185 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4186 MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
4187}
4188
4189static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
4190{
4191 struct mvpp2_port *port = qvec->port;
4192
4193 mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
4194 MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004195}
4196
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004197/* Mask the current CPU's Rx/Tx interrupts
4198 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4199 * using smp_processor_id() is OK.
4200 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004201static void mvpp2_interrupts_mask(void *arg)
4202{
4203 struct mvpp2_port *port = arg;
4204
Thomas Petazzonia7868412017-03-07 16:53:13 +01004205 mvpp2_percpu_write(port->priv, smp_processor_id(),
4206 MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004207}
4208
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004209/* Unmask the current CPU's Rx/Tx interrupts.
4210 * Called by on_each_cpu(), guaranteed to run with migration disabled,
4211 * using smp_processor_id() is OK.
4212 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004213static void mvpp2_interrupts_unmask(void *arg)
4214{
4215 struct mvpp2_port *port = arg;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004216 u32 val;
4217
4218 val = MVPP2_CAUSE_MISC_SUM_MASK |
4219 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4220 if (port->has_tx_irqs)
4221 val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004222
Thomas Petazzonia7868412017-03-07 16:53:13 +01004223 mvpp2_percpu_write(port->priv, smp_processor_id(),
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004224 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4225}
4226
4227static void
4228mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
4229{
4230 u32 val;
4231 int i;
4232
4233 if (port->priv->hw_version != MVPP22)
4234 return;
4235
4236 if (mask)
4237 val = 0;
4238 else
4239 val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
4240
4241 for (i = 0; i < port->nqvecs; i++) {
4242 struct mvpp2_queue_vector *v = port->qvecs + i;
4243
4244 if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
4245 continue;
4246
4247 mvpp2_percpu_write(port->priv, v->sw_thread_id,
4248 MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
4249 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004250}
4251
4252/* Port configuration routines */
4253
Antoine Ténart39193572017-08-22 19:08:24 +02004254static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4255{
4256 u32 val;
4257
4258 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4259 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4260 val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL |
4261 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4262 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4263 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4264
4265 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4266 val |= MVPP2_GMAC_DISABLE_PADDING;
4267 val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
4268 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4269 } else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
4270 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
4271 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
4272 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4273 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4274 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
4275 MVPP22_CTRL4_SYNC_BYPASS_DIS |
4276 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4277 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4278 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4279
4280 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4281 val &= ~MVPP2_GMAC_DISABLE_PADDING;
4282 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4283 }
4284
4285 /* The port is connected to a copper PHY */
4286 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4287 val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
4288 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4289
4290 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4291 val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
4292 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
4293 MVPP2_GMAC_AN_DUPLEX_EN;
4294 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4295 val |= MVPP2_GMAC_IN_BAND_AUTONEG;
4296 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4297}
4298
4299static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4300{
4301 u32 val;
4302
4303 /* Force link down */
4304 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4305 val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
4306 val |= MVPP2_GMAC_FORCE_LINK_DOWN;
4307 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4308
4309 /* Set the GMAC in a reset state */
4310 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4311 val |= MVPP2_GMAC_PORT_RESET_MASK;
4312 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4313
4314 /* Configure the PCS and in-band AN */
4315 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4316 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
4317 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4318 } else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
4319 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
4320 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
4321 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
4322 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4323 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4324 }
4325 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4326
4327 mvpp2_port_mii_gmac_configure_mode(port);
4328
4329 /* Unset the GMAC reset state */
4330 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4331 val &= ~MVPP2_GMAC_PORT_RESET_MASK;
4332 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4333
4334 /* Stop forcing link down */
4335 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4336 val &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
4337 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4338}
4339
Thomas Petazzoni26975822017-03-07 16:53:14 +01004340static void mvpp22_port_mii_set(struct mvpp2_port *port)
4341{
4342 u32 val;
4343
Thomas Petazzoni26975822017-03-07 16:53:14 +01004344 /* Only GOP port 0 has an XLG MAC */
4345 if (port->gop_id == 0) {
4346 val = readl(port->base + MVPP22_XLG_CTRL3_REG);
4347 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
Antoine Ténart725757a2017-06-12 16:01:39 +02004348
4349 if (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4350 port->phy_interface == PHY_INTERFACE_MODE_10GKR)
4351 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4352 else
4353 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4354
Thomas Petazzoni26975822017-03-07 16:53:14 +01004355 writel(val, port->base + MVPP22_XLG_CTRL3_REG);
4356 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01004357}
4358
Marcin Wojtas3f518502014-07-10 16:52:13 -03004359static void mvpp2_port_mii_set(struct mvpp2_port *port)
4360{
Thomas Petazzoni26975822017-03-07 16:53:14 +01004361 if (port->priv->hw_version == MVPP22)
4362 mvpp22_port_mii_set(port);
4363
Antoine Ténart39193572017-08-22 19:08:24 +02004364 if (port->phy_interface == PHY_INTERFACE_MODE_RGMII ||
4365 port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
4366 port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
4367 port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID ||
4368 port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4369 mvpp2_port_mii_gmac_configure(port);
Marcin Wojtas08a23752014-07-21 13:48:12 -03004370}
4371
4372static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
4373{
4374 u32 val;
4375
4376 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4377 val |= MVPP2_GMAC_FC_ADV_EN;
4378 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004379}
4380
4381static void mvpp2_port_enable(struct mvpp2_port *port)
4382{
4383 u32 val;
4384
Antoine Ténart725757a2017-06-12 16:01:39 +02004385 /* Only GOP port 0 has an XLG MAC */
4386 if (port->gop_id == 0 &&
4387 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4388 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4389 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4390 val |= MVPP22_XLG_CTRL0_PORT_EN |
4391 MVPP22_XLG_CTRL0_MAC_RESET_DIS;
4392 val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
4393 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4394 } else {
4395 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4396 val |= MVPP2_GMAC_PORT_EN_MASK;
4397 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
4398 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4399 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004400}
4401
4402static void mvpp2_port_disable(struct mvpp2_port *port)
4403{
4404 u32 val;
4405
Antoine Ténart725757a2017-06-12 16:01:39 +02004406 /* Only GOP port 0 has an XLG MAC */
4407 if (port->gop_id == 0 &&
4408 (port->phy_interface == PHY_INTERFACE_MODE_XAUI ||
4409 port->phy_interface == PHY_INTERFACE_MODE_10GKR)) {
4410 val = readl(port->base + MVPP22_XLG_CTRL0_REG);
4411 val &= ~(MVPP22_XLG_CTRL0_PORT_EN |
4412 MVPP22_XLG_CTRL0_MAC_RESET_DIS);
4413 writel(val, port->base + MVPP22_XLG_CTRL0_REG);
4414 } else {
4415 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4416 val &= ~(MVPP2_GMAC_PORT_EN_MASK);
4417 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4418 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004419}
4420
4421/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
4422static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
4423{
4424 u32 val;
4425
4426 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
4427 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
4428 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4429}
4430
4431/* Configure loopback port */
4432static void mvpp2_port_loopback_set(struct mvpp2_port *port)
4433{
4434 u32 val;
4435
4436 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4437
4438 if (port->speed == 1000)
4439 val |= MVPP2_GMAC_GMII_LB_EN_MASK;
4440 else
4441 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
4442
4443 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
4444 val |= MVPP2_GMAC_PCS_LB_EN_MASK;
4445 else
4446 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
4447
4448 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
4449}
4450
4451static void mvpp2_port_reset(struct mvpp2_port *port)
4452{
4453 u32 val;
4454
4455 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4456 ~MVPP2_GMAC_PORT_RESET_MASK;
4457 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4458
4459 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
4460 MVPP2_GMAC_PORT_RESET_MASK)
4461 continue;
4462}
4463
4464/* Change maximum receive size of the port */
4465static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
4466{
4467 u32 val;
4468
4469 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
4470 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
4471 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
4472 MVPP2_GMAC_MAX_RX_SIZE_OFFS);
4473 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
4474}
4475
4476/* Set defaults to the MVPP2 port */
4477static void mvpp2_defaults_set(struct mvpp2_port *port)
4478{
4479 int tx_port_num, val, queue, ptxq, lrxq;
4480
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004481 if (port->priv->hw_version == MVPP21) {
4482 /* Configure port to loopback if needed */
4483 if (port->flags & MVPP2_F_LOOPBACK)
4484 mvpp2_port_loopback_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004485
Thomas Petazzoni3d9017d2017-03-07 16:53:11 +01004486 /* Update TX FIFO MIN Threshold */
4487 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4488 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
4489 /* Min. TX threshold must be less than minimal packet length */
4490 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
4491 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
4492 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03004493
4494 /* Disable Legacy WRR, Disable EJP, Release from reset */
4495 tx_port_num = mvpp2_egress_port(port);
4496 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
4497 tx_port_num);
4498 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
4499
4500 /* Close bandwidth for all queues */
4501 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
4502 ptxq = mvpp2_txq_phys(port->id, queue);
4503 mvpp2_write(port->priv,
4504 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
4505 }
4506
4507 /* Set refill period to 1 usec, refill tokens
4508 * and bucket size to maximum
4509 */
4510 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
4511 port->priv->tclk / USEC_PER_SEC);
4512 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
4513 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
4514 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
4515 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
4516 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
4517 val = MVPP2_TXP_TOKEN_SIZE_MAX;
4518 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4519
4520 /* Set MaximumLowLatencyPacketSize value to 256 */
4521 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
4522 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
4523 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
4524
4525 /* Enable Rx cache snoop */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004526 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004527 queue = port->rxqs[lrxq]->id;
4528 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4529 val |= MVPP2_SNOOP_PKT_SIZE_MASK |
4530 MVPP2_SNOOP_BUF_HDR_MASK;
4531 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4532 }
4533
4534 /* At default, mask all interrupts to all present cpus */
4535 mvpp2_interrupts_disable(port);
4536}
4537
4538/* Enable/disable receiving packets */
4539static void mvpp2_ingress_enable(struct mvpp2_port *port)
4540{
4541 u32 val;
4542 int lrxq, queue;
4543
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004544 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004545 queue = port->rxqs[lrxq]->id;
4546 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4547 val &= ~MVPP2_RXQ_DISABLE_MASK;
4548 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4549 }
4550}
4551
4552static void mvpp2_ingress_disable(struct mvpp2_port *port)
4553{
4554 u32 val;
4555 int lrxq, queue;
4556
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004557 for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004558 queue = port->rxqs[lrxq]->id;
4559 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
4560 val |= MVPP2_RXQ_DISABLE_MASK;
4561 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
4562 }
4563}
4564
4565/* Enable transmit via physical egress queue
4566 * - HW starts take descriptors from DRAM
4567 */
4568static void mvpp2_egress_enable(struct mvpp2_port *port)
4569{
4570 u32 qmap;
4571 int queue;
4572 int tx_port_num = mvpp2_egress_port(port);
4573
4574 /* Enable all initialized TXs. */
4575 qmap = 0;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004576 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004577 struct mvpp2_tx_queue *txq = port->txqs[queue];
4578
Markus Elfringdbbb2f02017-04-17 14:07:52 +02004579 if (txq->descs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004580 qmap |= (1 << queue);
4581 }
4582
4583 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4584 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
4585}
4586
4587/* Disable transmit via physical egress queue
4588 * - HW doesn't take descriptors from DRAM
4589 */
4590static void mvpp2_egress_disable(struct mvpp2_port *port)
4591{
4592 u32 reg_data;
4593 int delay;
4594 int tx_port_num = mvpp2_egress_port(port);
4595
4596 /* Issue stop command for active channels only */
4597 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4598 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
4599 MVPP2_TXP_SCHED_ENQ_MASK;
4600 if (reg_data != 0)
4601 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
4602 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
4603
4604 /* Wait for all Tx activity to terminate. */
4605 delay = 0;
4606 do {
4607 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
4608 netdev_warn(port->dev,
4609 "Tx stop timed out, status=0x%08x\n",
4610 reg_data);
4611 break;
4612 }
4613 mdelay(1);
4614 delay++;
4615
4616 /* Check port TX Command register that all
4617 * Tx queues are stopped
4618 */
4619 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
4620 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
4621}
4622
4623/* Rx descriptors helper methods */
4624
4625/* Get number of Rx descriptors occupied by received packets */
4626static inline int
4627mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
4628{
4629 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
4630
4631 return val & MVPP2_RXQ_OCCUPIED_MASK;
4632}
4633
4634/* Update Rx queue status with the number of occupied and available
4635 * Rx descriptor slots.
4636 */
4637static inline void
4638mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
4639 int used_count, int free_count)
4640{
4641 /* Decrement the number of used descriptors and increment count
4642 * increment the number of free descriptors.
4643 */
4644 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
4645
4646 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
4647}
4648
4649/* Get pointer to next RX descriptor to be processed by SW */
4650static inline struct mvpp2_rx_desc *
4651mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
4652{
4653 int rx_desc = rxq->next_desc_to_proc;
4654
4655 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
4656 prefetch(rxq->descs + rxq->next_desc_to_proc);
4657 return rxq->descs + rx_desc;
4658}
4659
4660/* Set rx queue offset */
4661static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
4662 int prxq, int offset)
4663{
4664 u32 val;
4665
4666 /* Convert offset from bytes to units of 32 bytes */
4667 offset = offset >> 5;
4668
4669 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
4670 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
4671
4672 /* Offset is in */
4673 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
4674 MVPP2_RXQ_PACKET_OFFSET_MASK);
4675
4676 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
4677}
4678
Marcin Wojtas3f518502014-07-10 16:52:13 -03004679/* Tx descriptors helper methods */
4680
Marcin Wojtas3f518502014-07-10 16:52:13 -03004681/* Get pointer to next Tx descriptor to be processed (send) by HW */
4682static struct mvpp2_tx_desc *
4683mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
4684{
4685 int tx_desc = txq->next_desc_to_proc;
4686
4687 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
4688 return txq->descs + tx_desc;
4689}
4690
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004691/* Update HW with number of aggregated Tx descriptors to be sent
4692 *
4693 * Called only from mvpp2_tx(), so migration is disabled, using
4694 * smp_processor_id() is OK.
4695 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004696static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
4697{
4698 /* aggregated access - relevant TXQ number is written in TX desc */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004699 mvpp2_percpu_write(port->priv, smp_processor_id(),
4700 MVPP2_AGGR_TXQ_UPDATE_REG, pending);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004701}
4702
4703
4704/* Check if there are enough free descriptors in aggregated txq.
4705 * If not, update the number of occupied descriptors and repeat the check.
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004706 *
4707 * Called only from mvpp2_tx(), so migration is disabled, using
4708 * smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03004709 */
4710static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
4711 struct mvpp2_tx_queue *aggr_txq, int num)
4712{
4713 if ((aggr_txq->count + num) > aggr_txq->size) {
4714 /* Update number of occupied aggregated Tx descriptors */
4715 int cpu = smp_processor_id();
4716 u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
4717
4718 aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
4719 }
4720
4721 if ((aggr_txq->count + num) > aggr_txq->size)
4722 return -ENOMEM;
4723
4724 return 0;
4725}
4726
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004727/* Reserved Tx descriptors allocation request
4728 *
4729 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
4730 * only by mvpp2_tx(), so migration is disabled, using
4731 * smp_processor_id() is OK.
4732 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004733static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
4734 struct mvpp2_tx_queue *txq, int num)
4735{
4736 u32 val;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004737 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004738
4739 val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
Thomas Petazzonia7868412017-03-07 16:53:13 +01004740 mvpp2_percpu_write(priv, cpu, MVPP2_TXQ_RSVD_REQ_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004741
Thomas Petazzonia7868412017-03-07 16:53:13 +01004742 val = mvpp2_percpu_read(priv, cpu, MVPP2_TXQ_RSVD_RSLT_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004743
4744 return val & MVPP2_TXQ_RSVD_RSLT_MASK;
4745}
4746
4747/* Check if there are enough reserved descriptors for transmission.
4748 * If not, request chunk of reserved descriptors and check again.
4749 */
4750static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
4751 struct mvpp2_tx_queue *txq,
4752 struct mvpp2_txq_pcpu *txq_pcpu,
4753 int num)
4754{
4755 int req, cpu, desc_count;
4756
4757 if (txq_pcpu->reserved_num >= num)
4758 return 0;
4759
4760 /* Not enough descriptors reserved! Update the reserved descriptor
4761 * count and check again.
4762 */
4763
4764 desc_count = 0;
4765 /* Compute total of used descriptors */
4766 for_each_present_cpu(cpu) {
4767 struct mvpp2_txq_pcpu *txq_pcpu_aux;
4768
4769 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
4770 desc_count += txq_pcpu_aux->count;
4771 desc_count += txq_pcpu_aux->reserved_num;
4772 }
4773
4774 req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
4775 desc_count += req;
4776
4777 if (desc_count >
4778 (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
4779 return -ENOMEM;
4780
4781 txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
4782
4783 /* OK, the descriptor cound has been updated: check again. */
4784 if (txq_pcpu->reserved_num < num)
4785 return -ENOMEM;
4786 return 0;
4787}
4788
4789/* Release the last allocated Tx descriptor. Useful to handle DMA
4790 * mapping failures in the Tx path.
4791 */
4792static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
4793{
4794 if (txq->next_desc_to_proc == 0)
4795 txq->next_desc_to_proc = txq->last_desc - 1;
4796 else
4797 txq->next_desc_to_proc--;
4798}
4799
4800/* Set Tx descriptors fields relevant for CSUM calculation */
4801static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
4802 int ip_hdr_len, int l4_proto)
4803{
4804 u32 command;
4805
4806 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
4807 * G_L4_chk, L4_type required only for checksum calculation
4808 */
4809 command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
4810 command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
4811 command |= MVPP2_TXD_IP_CSUM_DISABLE;
4812
4813 if (l3_proto == swab16(ETH_P_IP)) {
4814 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */
4815 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */
4816 } else {
4817 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */
4818 }
4819
4820 if (l4_proto == IPPROTO_TCP) {
4821 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */
4822 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4823 } else if (l4_proto == IPPROTO_UDP) {
4824 command |= MVPP2_TXD_L4_UDP; /* enable UDP */
4825 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */
4826 } else {
4827 command |= MVPP2_TXD_L4_CSUM_NOT;
4828 }
4829
4830 return command;
4831}
4832
4833/* Get number of sent descriptors and decrement counter.
4834 * The number of sent descriptors is returned.
4835 * Per-CPU access
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004836 *
4837 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
4838 * (migration disabled) and from the TX completion tasklet (migration
4839 * disabled) so using smp_processor_id() is OK.
Marcin Wojtas3f518502014-07-10 16:52:13 -03004840 */
4841static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
4842 struct mvpp2_tx_queue *txq)
4843{
4844 u32 val;
4845
4846 /* Reading status reg resets transmitted descriptor counter */
Thomas Petazzonia7868412017-03-07 16:53:13 +01004847 val = mvpp2_percpu_read(port->priv, smp_processor_id(),
4848 MVPP2_TXQ_SENT_REG(txq->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004849
4850 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
4851 MVPP2_TRANSMITTED_COUNT_OFFSET;
4852}
4853
Thomas Petazzonie0af22d2017-06-22 14:23:18 +02004854/* Called through on_each_cpu(), so runs on all CPUs, with migration
4855 * disabled, therefore using smp_processor_id() is OK.
4856 */
Marcin Wojtas3f518502014-07-10 16:52:13 -03004857static void mvpp2_txq_sent_counter_clear(void *arg)
4858{
4859 struct mvpp2_port *port = arg;
4860 int queue;
4861
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004862 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004863 int id = port->txqs[queue]->id;
4864
Thomas Petazzonia7868412017-03-07 16:53:13 +01004865 mvpp2_percpu_read(port->priv, smp_processor_id(),
4866 MVPP2_TXQ_SENT_REG(id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03004867 }
4868}
4869
4870/* Set max sizes for Tx queues */
4871static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
4872{
4873 u32 val, size, mtu;
4874 int txq, tx_port_num;
4875
4876 mtu = port->pkt_size * 8;
4877 if (mtu > MVPP2_TXP_MTU_MAX)
4878 mtu = MVPP2_TXP_MTU_MAX;
4879
4880 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
4881 mtu = 3 * mtu;
4882
4883 /* Indirect access to registers */
4884 tx_port_num = mvpp2_egress_port(port);
4885 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4886
4887 /* Set MTU */
4888 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
4889 val &= ~MVPP2_TXP_MTU_MAX;
4890 val |= mtu;
4891 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
4892
4893 /* TXP token size and all TXQs token size must be larger that MTU */
4894 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
4895 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
4896 if (size < mtu) {
4897 size = mtu;
4898 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
4899 val |= size;
4900 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
4901 }
4902
Thomas Petazzoni09f83972017-08-03 10:41:57 +02004903 for (txq = 0; txq < port->ntxqs; txq++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03004904 val = mvpp2_read(port->priv,
4905 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4906 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4907
4908 if (size < mtu) {
4909 size = mtu;
4910 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4911 val |= size;
4912 mvpp2_write(port->priv,
4913 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4914 val);
4915 }
4916 }
4917}
4918
4919/* Set the number of packets that will be received before Rx interrupt
4920 * will be generated by HW.
4921 */
4922static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004923 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004924{
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004925 int cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01004926
Thomas Petazzonif8b0d5f2017-02-21 11:28:03 +01004927 if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
4928 rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03004929
Thomas Petazzonia7868412017-03-07 16:53:13 +01004930 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
4931 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG,
4932 rxq->pkts_coal);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02004933
4934 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03004935}
4936
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004937/* For some reason in the LSP this is done on each CPU. Why ? */
4938static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
4939 struct mvpp2_tx_queue *txq)
4940{
4941 int cpu = get_cpu();
4942 u32 val;
4943
4944 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
4945 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
4946
4947 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
4948 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
4949 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val);
4950
4951 put_cpu();
4952}
4953
Thomas Petazzoniab426762017-02-21 11:28:04 +01004954static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
4955{
4956 u64 tmp = (u64)clk_hz * usec;
4957
4958 do_div(tmp, USEC_PER_SEC);
4959
4960 return tmp > U32_MAX ? U32_MAX : tmp;
4961}
4962
4963static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
4964{
4965 u64 tmp = (u64)cycles * USEC_PER_SEC;
4966
4967 do_div(tmp, clk_hz);
4968
4969 return tmp > U32_MAX ? U32_MAX : tmp;
4970}
4971
Marcin Wojtas3f518502014-07-10 16:52:13 -03004972/* Set the time delay in usec before Rx interrupt */
4973static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01004974 struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03004975{
Thomas Petazzoniab426762017-02-21 11:28:04 +01004976 unsigned long freq = port->priv->tclk;
4977 u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004978
Thomas Petazzoniab426762017-02-21 11:28:04 +01004979 if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
4980 rxq->time_coal =
4981 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
4982
4983 /* re-evaluate to get actual register value */
4984 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
4985 }
4986
Marcin Wojtas3f518502014-07-10 16:52:13 -03004987 mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03004988}
4989
Thomas Petazzoni213f4282017-08-03 10:42:00 +02004990static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
4991{
4992 unsigned long freq = port->priv->tclk;
4993 u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
4994
4995 if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
4996 port->tx_time_coal =
4997 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
4998
4999 /* re-evaluate to get actual register value */
5000 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
5001 }
5002
5003 mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
5004}
5005
Marcin Wojtas3f518502014-07-10 16:52:13 -03005006/* Free Tx queue skbuffs */
5007static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
5008 struct mvpp2_tx_queue *txq,
5009 struct mvpp2_txq_pcpu *txq_pcpu, int num)
5010{
5011 int i;
5012
5013 for (i = 0; i < num; i++) {
Thomas Petazzoni83544912016-12-21 11:28:49 +01005014 struct mvpp2_txq_pcpu_buf *tx_buf =
5015 txq_pcpu->buffs + txq_pcpu->txq_get_index;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005016
Thomas Petazzoni20396132017-03-07 16:53:00 +01005017 dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
Thomas Petazzoni83544912016-12-21 11:28:49 +01005018 tx_buf->size, DMA_TO_DEVICE);
Thomas Petazzoni36fb7432017-02-21 11:28:05 +01005019 if (tx_buf->skb)
5020 dev_kfree_skb_any(tx_buf->skb);
5021
5022 mvpp2_txq_inc_get(txq_pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005023 }
5024}
5025
5026static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
5027 u32 cause)
5028{
5029 int queue = fls(cause) - 1;
5030
5031 return port->rxqs[queue];
5032}
5033
5034static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
5035 u32 cause)
5036{
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005037 int queue = fls(cause) - 1;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005038
5039 return port->txqs[queue];
5040}
5041
5042/* Handle end of transmission */
5043static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
5044 struct mvpp2_txq_pcpu *txq_pcpu)
5045{
5046 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
5047 int tx_done;
5048
5049 if (txq_pcpu->cpu != smp_processor_id())
5050 netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
5051
5052 tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5053 if (!tx_done)
5054 return;
5055 mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
5056
5057 txq_pcpu->count -= tx_done;
5058
5059 if (netif_tx_queue_stopped(nq))
5060 if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
5061 netif_tx_wake_queue(nq);
5062}
5063
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005064static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
5065 int cpu)
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005066{
5067 struct mvpp2_tx_queue *txq;
5068 struct mvpp2_txq_pcpu *txq_pcpu;
5069 unsigned int tx_todo = 0;
5070
5071 while (cause) {
5072 txq = mvpp2_get_tx_queue(port, cause);
5073 if (!txq)
5074 break;
5075
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005076 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005077
5078 if (txq_pcpu->count) {
5079 mvpp2_txq_done(port, txq, txq_pcpu);
5080 tx_todo += txq_pcpu->count;
5081 }
5082
5083 cause &= ~(1 << txq->log_id);
5084 }
5085 return tx_todo;
5086}
5087
Marcin Wojtas3f518502014-07-10 16:52:13 -03005088/* Rx/Tx queue initialization/cleanup methods */
5089
5090/* Allocate and initialize descriptors for aggr TXQ */
5091static int mvpp2_aggr_txq_init(struct platform_device *pdev,
5092 struct mvpp2_tx_queue *aggr_txq,
5093 int desc_num, int cpu,
5094 struct mvpp2 *priv)
5095{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005096 u32 txq_dma;
5097
Marcin Wojtas3f518502014-07-10 16:52:13 -03005098 /* Allocate memory for TX descriptors */
5099 aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
5100 desc_num * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005101 &aggr_txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005102 if (!aggr_txq->descs)
5103 return -ENOMEM;
5104
Marcin Wojtas3f518502014-07-10 16:52:13 -03005105 aggr_txq->last_desc = aggr_txq->size - 1;
5106
5107 /* Aggr TXQ no reset WA */
5108 aggr_txq->next_desc_to_proc = mvpp2_read(priv,
5109 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
5110
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005111 /* Set Tx descriptors queue starting address indirect
5112 * access
5113 */
5114 if (priv->hw_version == MVPP21)
5115 txq_dma = aggr_txq->descs_dma;
5116 else
5117 txq_dma = aggr_txq->descs_dma >>
5118 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
5119
5120 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005121 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
5122
5123 return 0;
5124}
5125
5126/* Create a specified Rx queue */
5127static int mvpp2_rxq_init(struct mvpp2_port *port,
5128 struct mvpp2_rx_queue *rxq)
5129
5130{
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005131 u32 rxq_dma;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005132 int cpu;
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005133
Marcin Wojtas3f518502014-07-10 16:52:13 -03005134 rxq->size = port->rx_ring_size;
5135
5136 /* Allocate memory for RX descriptors */
5137 rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
5138 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005139 &rxq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005140 if (!rxq->descs)
5141 return -ENOMEM;
5142
Marcin Wojtas3f518502014-07-10 16:52:13 -03005143 rxq->last_desc = rxq->size - 1;
5144
5145 /* Zero occupied and non-occupied counters - direct access */
5146 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
5147
5148 /* Set Rx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005149 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005150 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
Thomas Petazzonib02f31f2017-03-07 16:53:12 +01005151 if (port->priv->hw_version == MVPP21)
5152 rxq_dma = rxq->descs_dma;
5153 else
5154 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005155 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
5156 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
5157 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005158 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005159
5160 /* Set Offset */
5161 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
5162
5163 /* Set coalescing pkts and time */
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01005164 mvpp2_rx_pkts_coal_set(port, rxq);
5165 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005166
5167 /* Add number of descriptors ready for receiving packets */
5168 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
5169
5170 return 0;
5171}
5172
5173/* Push packets received by the RXQ to BM pool */
5174static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
5175 struct mvpp2_rx_queue *rxq)
5176{
5177 int rx_received, i;
5178
5179 rx_received = mvpp2_rxq_received(port, rxq->id);
5180 if (!rx_received)
5181 return;
5182
5183 for (i = 0; i < rx_received; i++) {
5184 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005185 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5186 int pool;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005187
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005188 pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5189 MVPP2_RXD_BM_POOL_ID_OFFS;
5190
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005191 mvpp2_bm_pool_put(port, pool,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005192 mvpp2_rxdesc_dma_addr_get(port, rx_desc),
5193 mvpp2_rxdesc_cookie_get(port, rx_desc));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005194 }
5195 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
5196}
5197
5198/* Cleanup Rx queue */
5199static void mvpp2_rxq_deinit(struct mvpp2_port *port,
5200 struct mvpp2_rx_queue *rxq)
5201{
Thomas Petazzonia7868412017-03-07 16:53:13 +01005202 int cpu;
5203
Marcin Wojtas3f518502014-07-10 16:52:13 -03005204 mvpp2_rxq_drop_pkts(port, rxq);
5205
5206 if (rxq->descs)
5207 dma_free_coherent(port->dev->dev.parent,
5208 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
5209 rxq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005210 rxq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005211
5212 rxq->descs = NULL;
5213 rxq->last_desc = 0;
5214 rxq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005215 rxq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005216
5217 /* Clear Rx descriptors queue starting address and size;
5218 * free descriptor number
5219 */
5220 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005221 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005222 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id);
5223 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0);
5224 mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005225 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005226}
5227
5228/* Create and initialize a Tx queue */
5229static int mvpp2_txq_init(struct mvpp2_port *port,
5230 struct mvpp2_tx_queue *txq)
5231{
5232 u32 val;
5233 int cpu, desc, desc_per_txq, tx_port_num;
5234 struct mvpp2_txq_pcpu *txq_pcpu;
5235
5236 txq->size = port->tx_ring_size;
5237
5238 /* Allocate memory for Tx descriptors */
5239 txq->descs = dma_alloc_coherent(port->dev->dev.parent,
5240 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005241 &txq->descs_dma, GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005242 if (!txq->descs)
5243 return -ENOMEM;
5244
Marcin Wojtas3f518502014-07-10 16:52:13 -03005245 txq->last_desc = txq->size - 1;
5246
5247 /* Set Tx descriptors queue starting address - indirect access */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005248 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005249 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5250 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG,
5251 txq->descs_dma);
5252 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG,
5253 txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
5254 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_INDEX_REG, 0);
5255 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_RSVD_CLR_REG,
5256 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
5257 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PENDING_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005258 val &= ~MVPP2_TXQ_PENDING_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005259 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PENDING_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005260
5261 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
5262 * for each existing TXQ.
5263 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
5264 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
5265 */
5266 desc_per_txq = 16;
5267 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
5268 (txq->log_id * desc_per_txq);
5269
Thomas Petazzonia7868412017-03-07 16:53:13 +01005270 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG,
5271 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
5272 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005273 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005274
5275 /* WRR / EJP configuration - indirect access */
5276 tx_port_num = mvpp2_egress_port(port);
5277 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
5278
5279 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
5280 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
5281 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
5282 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
5283 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
5284
5285 val = MVPP2_TXQ_TOKEN_SIZE_MAX;
5286 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
5287 val);
5288
5289 for_each_present_cpu(cpu) {
5290 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5291 txq_pcpu->size = txq->size;
Markus Elfring02c91ec2017-04-17 08:09:07 +02005292 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
5293 sizeof(*txq_pcpu->buffs),
5294 GFP_KERNEL);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005295 if (!txq_pcpu->buffs)
Markus Elfring20b1e162017-04-17 12:58:33 +02005296 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005297
5298 txq_pcpu->count = 0;
5299 txq_pcpu->reserved_num = 0;
5300 txq_pcpu->txq_put_index = 0;
5301 txq_pcpu->txq_get_index = 0;
5302 }
5303
5304 return 0;
Markus Elfring20b1e162017-04-17 12:58:33 +02005305cleanup:
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005306 for_each_present_cpu(cpu) {
5307 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005308 kfree(txq_pcpu->buffs);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005309 }
5310
5311 dma_free_coherent(port->dev->dev.parent,
5312 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005313 txq->descs, txq->descs_dma);
Marcin Wojtas71ce3912015-08-06 19:00:29 +02005314
5315 return -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005316}
5317
5318/* Free allocated TXQ resources */
5319static void mvpp2_txq_deinit(struct mvpp2_port *port,
5320 struct mvpp2_tx_queue *txq)
5321{
5322 struct mvpp2_txq_pcpu *txq_pcpu;
5323 int cpu;
5324
5325 for_each_present_cpu(cpu) {
5326 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
Thomas Petazzoni83544912016-12-21 11:28:49 +01005327 kfree(txq_pcpu->buffs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005328 }
5329
5330 if (txq->descs)
5331 dma_free_coherent(port->dev->dev.parent,
5332 txq->size * MVPP2_DESC_ALIGNED_SIZE,
Thomas Petazzoni20396132017-03-07 16:53:00 +01005333 txq->descs, txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005334
5335 txq->descs = NULL;
5336 txq->last_desc = 0;
5337 txq->next_desc_to_proc = 0;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005338 txq->descs_dma = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005339
5340 /* Set minimum bandwidth for disabled TXQs */
5341 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
5342
5343 /* Set Tx descriptors queue starting address and size */
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005344 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005345 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5346 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0);
5347 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005348 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005349}
5350
5351/* Cleanup Tx ports */
5352static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
5353{
5354 struct mvpp2_txq_pcpu *txq_pcpu;
5355 int delay, pending, cpu;
5356 u32 val;
5357
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005358 cpu = get_cpu();
Thomas Petazzonia7868412017-03-07 16:53:13 +01005359 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id);
5360 val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005361 val |= MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005362 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005363
5364 /* The napi queue has been stopped so wait for all packets
5365 * to be transmitted.
5366 */
5367 delay = 0;
5368 do {
5369 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
5370 netdev_warn(port->dev,
5371 "port %d: cleaning queue %d timed out\n",
5372 port->id, txq->log_id);
5373 break;
5374 }
5375 mdelay(1);
5376 delay++;
5377
Thomas Petazzonia7868412017-03-07 16:53:13 +01005378 pending = mvpp2_percpu_read(port->priv, cpu,
5379 MVPP2_TXQ_PENDING_REG);
5380 pending &= MVPP2_TXQ_PENDING_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005381 } while (pending);
5382
5383 val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005384 mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val);
Thomas Petazzonia704bb52017-06-10 23:18:22 +02005385 put_cpu();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005386
5387 for_each_present_cpu(cpu) {
5388 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5389
5390 /* Release all packets */
5391 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
5392
5393 /* Reset queue */
5394 txq_pcpu->count = 0;
5395 txq_pcpu->txq_put_index = 0;
5396 txq_pcpu->txq_get_index = 0;
5397 }
5398}
5399
5400/* Cleanup all Tx queues */
5401static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
5402{
5403 struct mvpp2_tx_queue *txq;
5404 int queue;
5405 u32 val;
5406
5407 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
5408
5409 /* Reset Tx ports and delete Tx queues */
5410 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
5411 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5412
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005413 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005414 txq = port->txqs[queue];
5415 mvpp2_txq_clean(port, txq);
5416 mvpp2_txq_deinit(port, txq);
5417 }
5418
5419 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5420
5421 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
5422 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
5423}
5424
5425/* Cleanup all Rx queues */
5426static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
5427{
5428 int queue;
5429
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005430 for (queue = 0; queue < port->nrxqs; queue++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005431 mvpp2_rxq_deinit(port, port->rxqs[queue]);
5432}
5433
5434/* Init all Rx queues for port */
5435static int mvpp2_setup_rxqs(struct mvpp2_port *port)
5436{
5437 int queue, err;
5438
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005439 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005440 err = mvpp2_rxq_init(port, port->rxqs[queue]);
5441 if (err)
5442 goto err_cleanup;
5443 }
5444 return 0;
5445
5446err_cleanup:
5447 mvpp2_cleanup_rxqs(port);
5448 return err;
5449}
5450
5451/* Init all tx queues for port */
5452static int mvpp2_setup_txqs(struct mvpp2_port *port)
5453{
5454 struct mvpp2_tx_queue *txq;
5455 int queue, err;
5456
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005457 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005458 txq = port->txqs[queue];
5459 err = mvpp2_txq_init(port, txq);
5460 if (err)
5461 goto err_cleanup;
5462 }
5463
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005464 if (port->has_tx_irqs) {
5465 mvpp2_tx_time_coal_set(port);
5466 for (queue = 0; queue < port->ntxqs; queue++) {
5467 txq = port->txqs[queue];
5468 mvpp2_tx_pkts_coal_set(port, txq);
5469 }
5470 }
5471
Marcin Wojtas3f518502014-07-10 16:52:13 -03005472 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
5473 return 0;
5474
5475err_cleanup:
5476 mvpp2_cleanup_txqs(port);
5477 return err;
5478}
5479
5480/* The callback for per-port interrupt */
5481static irqreturn_t mvpp2_isr(int irq, void *dev_id)
5482{
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005483 struct mvpp2_queue_vector *qv = dev_id;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005484
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005485 mvpp2_qvec_interrupt_disable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005486
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005487 napi_schedule(&qv->napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005488
5489 return IRQ_HANDLED;
5490}
5491
5492/* Adjust link */
5493static void mvpp2_link_event(struct net_device *dev)
5494{
5495 struct mvpp2_port *port = netdev_priv(dev);
Philippe Reynes8e072692016-06-28 00:08:11 +02005496 struct phy_device *phydev = dev->phydev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005497 int status_change = 0;
5498 u32 val;
5499
5500 if (phydev->link) {
5501 if ((port->speed != phydev->speed) ||
5502 (port->duplex != phydev->duplex)) {
5503 u32 val;
5504
5505 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5506 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
5507 MVPP2_GMAC_CONFIG_GMII_SPEED |
5508 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
5509 MVPP2_GMAC_AN_SPEED_EN |
5510 MVPP2_GMAC_AN_DUPLEX_EN);
5511
5512 if (phydev->duplex)
5513 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5514
5515 if (phydev->speed == SPEED_1000)
5516 val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
Thomas Petazzoni2add5112014-07-27 23:21:35 +02005517 else if (phydev->speed == SPEED_100)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005518 val |= MVPP2_GMAC_CONFIG_MII_SPEED;
5519
5520 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5521
5522 port->duplex = phydev->duplex;
5523 port->speed = phydev->speed;
5524 }
5525 }
5526
5527 if (phydev->link != port->link) {
5528 if (!phydev->link) {
5529 port->duplex = -1;
5530 port->speed = 0;
5531 }
5532
5533 port->link = phydev->link;
5534 status_change = 1;
5535 }
5536
5537 if (status_change) {
5538 if (phydev->link) {
5539 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5540 val |= (MVPP2_GMAC_FORCE_LINK_PASS |
5541 MVPP2_GMAC_FORCE_LINK_DOWN);
5542 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5543 mvpp2_egress_enable(port);
5544 mvpp2_ingress_enable(port);
5545 } else {
5546 mvpp2_ingress_disable(port);
5547 mvpp2_egress_disable(port);
5548 }
5549 phy_print_status(phydev);
5550 }
5551}
5552
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005553static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
5554{
5555 ktime_t interval;
5556
5557 if (!port_pcpu->timer_scheduled) {
5558 port_pcpu->timer_scheduled = true;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +01005559 interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005560 hrtimer_start(&port_pcpu->tx_done_timer, interval,
5561 HRTIMER_MODE_REL_PINNED);
5562 }
5563}
5564
5565static void mvpp2_tx_proc_cb(unsigned long data)
5566{
5567 struct net_device *dev = (struct net_device *)data;
5568 struct mvpp2_port *port = netdev_priv(dev);
5569 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5570 unsigned int tx_todo, cause;
5571
5572 if (!netif_running(dev))
5573 return;
5574 port_pcpu->timer_scheduled = false;
5575
5576 /* Process all the Tx queues */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02005577 cause = (1 << port->ntxqs) - 1;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005578 tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005579
5580 /* Set the timer in case not all the packets were processed */
5581 if (tx_todo)
5582 mvpp2_timer_set(port_pcpu);
5583}
5584
5585static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
5586{
5587 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
5588 struct mvpp2_port_pcpu,
5589 tx_done_timer);
5590
5591 tasklet_schedule(&port_pcpu->tx_done_tasklet);
5592
5593 return HRTIMER_NORESTART;
5594}
5595
Marcin Wojtas3f518502014-07-10 16:52:13 -03005596/* Main RX/TX processing routines */
5597
5598/* Display more error info */
5599static void mvpp2_rx_error(struct mvpp2_port *port,
5600 struct mvpp2_rx_desc *rx_desc)
5601{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005602 u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
5603 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005604
5605 switch (status & MVPP2_RXD_ERR_CODE_MASK) {
5606 case MVPP2_RXD_ERR_CRC:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005607 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
5608 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005609 break;
5610 case MVPP2_RXD_ERR_OVERRUN:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005611 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
5612 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005613 break;
5614 case MVPP2_RXD_ERR_RESOURCE:
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005615 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
5616 status, sz);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005617 break;
5618 }
5619}
5620
5621/* Handle RX checksum offload */
5622static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
5623 struct sk_buff *skb)
5624{
5625 if (((status & MVPP2_RXD_L3_IP4) &&
5626 !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
5627 (status & MVPP2_RXD_L3_IP6))
5628 if (((status & MVPP2_RXD_L4_UDP) ||
5629 (status & MVPP2_RXD_L4_TCP)) &&
5630 (status & MVPP2_RXD_L4_CSUM_OK)) {
5631 skb->csum = 0;
5632 skb->ip_summed = CHECKSUM_UNNECESSARY;
5633 return;
5634 }
5635
5636 skb->ip_summed = CHECKSUM_NONE;
5637}
5638
5639/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
5640static int mvpp2_rx_refill(struct mvpp2_port *port,
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005641 struct mvpp2_bm_pool *bm_pool, int pool)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005642{
Thomas Petazzoni20396132017-03-07 16:53:00 +01005643 dma_addr_t dma_addr;
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005644 phys_addr_t phys_addr;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005645 void *buf;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005646
Marcin Wojtas3f518502014-07-10 16:52:13 -03005647 /* No recycle or too many buffers are in use, so allocate a new skb */
Thomas Petazzoni4e4a1052017-03-07 16:53:04 +01005648 buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
5649 GFP_ATOMIC);
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005650 if (!buf)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005651 return -ENOMEM;
5652
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005653 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Thomas Petazzoni7ef7e1d2017-02-21 11:28:07 +01005654
Marcin Wojtas3f518502014-07-10 16:52:13 -03005655 return 0;
5656}
5657
5658/* Handle tx checksum */
5659static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
5660{
5661 if (skb->ip_summed == CHECKSUM_PARTIAL) {
5662 int ip_hdr_len = 0;
5663 u8 l4_proto;
5664
5665 if (skb->protocol == htons(ETH_P_IP)) {
5666 struct iphdr *ip4h = ip_hdr(skb);
5667
5668 /* Calculate IPv4 checksum and L4 checksum */
5669 ip_hdr_len = ip4h->ihl;
5670 l4_proto = ip4h->protocol;
5671 } else if (skb->protocol == htons(ETH_P_IPV6)) {
5672 struct ipv6hdr *ip6h = ipv6_hdr(skb);
5673
5674 /* Read l4_protocol from one of IPv6 extra headers */
5675 if (skb_network_header_len(skb) > 0)
5676 ip_hdr_len = (skb_network_header_len(skb) >> 2);
5677 l4_proto = ip6h->nexthdr;
5678 } else {
5679 return MVPP2_TXD_L4_CSUM_NOT;
5680 }
5681
5682 return mvpp2_txq_desc_csum(skb_network_offset(skb),
5683 skb->protocol, ip_hdr_len, l4_proto);
5684 }
5685
5686 return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
5687}
5688
Marcin Wojtas3f518502014-07-10 16:52:13 -03005689/* Main rx processing */
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005690static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
5691 int rx_todo, struct mvpp2_rx_queue *rxq)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005692{
5693 struct net_device *dev = port->dev;
Marcin Wojtasb5015852015-12-03 15:20:51 +01005694 int rx_received;
5695 int rx_done = 0;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005696 u32 rcvd_pkts = 0;
5697 u32 rcvd_bytes = 0;
5698
5699 /* Get number of received packets and clamp the to-do */
5700 rx_received = mvpp2_rxq_received(port, rxq->id);
5701 if (rx_todo > rx_received)
5702 rx_todo = rx_received;
5703
Marcin Wojtasb5015852015-12-03 15:20:51 +01005704 while (rx_done < rx_todo) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005705 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
5706 struct mvpp2_bm_pool *bm_pool;
5707 struct sk_buff *skb;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005708 unsigned int frag_size;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005709 dma_addr_t dma_addr;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005710 phys_addr_t phys_addr;
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005711 u32 rx_status;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005712 int pool, rx_bytes, err;
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005713 void *data;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005714
Marcin Wojtasb5015852015-12-03 15:20:51 +01005715 rx_done++;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005716 rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5717 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5718 rx_bytes -= MVPP2_MH_SIZE;
5719 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5720 phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
5721 data = (void *)phys_to_virt(phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005722
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005723 pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
5724 MVPP2_RXD_BM_POOL_ID_OFFS;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005725 bm_pool = &port->priv->bm_pools[pool];
Marcin Wojtas3f518502014-07-10 16:52:13 -03005726
5727 /* In case of an error, release the requested buffer pointer
5728 * to the Buffer Manager. This request process is controlled
5729 * by the hardware, and the information about the buffer is
5730 * comprised by the RX descriptor.
5731 */
5732 if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
Markus Elfring8a524882017-04-17 10:52:02 +02005733err_drop_frame:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005734 dev->stats.rx_errors++;
5735 mvpp2_rx_error(port, rx_desc);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005736 /* Return the buffer to the pool */
Thomas Petazzoni7d7627b2017-06-22 14:23:20 +02005737 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005738 continue;
5739 }
5740
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005741 if (bm_pool->frag_size > PAGE_SIZE)
5742 frag_size = 0;
5743 else
5744 frag_size = bm_pool->frag_size;
5745
5746 skb = build_skb(data, frag_size);
5747 if (!skb) {
5748 netdev_warn(port->dev, "skb build failed\n");
5749 goto err_drop_frame;
5750 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03005751
Thomas Petazzoni56b8aae2017-06-10 23:18:21 +02005752 err = mvpp2_rx_refill(port, bm_pool, pool);
Marcin Wojtasb5015852015-12-03 15:20:51 +01005753 if (err) {
5754 netdev_err(port->dev, "failed to refill BM pools\n");
5755 goto err_drop_frame;
5756 }
5757
Thomas Petazzoni20396132017-03-07 16:53:00 +01005758 dma_unmap_single(dev->dev.parent, dma_addr,
Marcin Wojtas4229d502015-12-03 15:20:50 +01005759 bm_pool->buf_size, DMA_FROM_DEVICE);
5760
Marcin Wojtas3f518502014-07-10 16:52:13 -03005761 rcvd_pkts++;
5762 rcvd_bytes += rx_bytes;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005763
Thomas Petazzoni0e037282017-02-21 11:28:12 +01005764 skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005765 skb_put(skb, rx_bytes);
5766 skb->protocol = eth_type_trans(skb, dev);
5767 mvpp2_rx_csum(port, rx_status, skb);
5768
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005769 napi_gro_receive(napi, skb);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005770 }
5771
5772 if (rcvd_pkts) {
5773 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5774
5775 u64_stats_update_begin(&stats->syncp);
5776 stats->rx_packets += rcvd_pkts;
5777 stats->rx_bytes += rcvd_bytes;
5778 u64_stats_update_end(&stats->syncp);
5779 }
5780
5781 /* Update Rx queue management counters */
5782 wmb();
Marcin Wojtasb5015852015-12-03 15:20:51 +01005783 mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005784
5785 return rx_todo;
5786}
5787
5788static inline void
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005789tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
Marcin Wojtas3f518502014-07-10 16:52:13 -03005790 struct mvpp2_tx_desc *desc)
5791{
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005792 dma_addr_t buf_dma_addr =
5793 mvpp2_txdesc_dma_addr_get(port, desc);
5794 size_t buf_sz =
5795 mvpp2_txdesc_size_get(port, desc);
5796 dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
5797 buf_sz, DMA_TO_DEVICE);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005798 mvpp2_txq_desc_put(txq);
5799}
5800
5801/* Handle tx fragmentation processing */
5802static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5803 struct mvpp2_tx_queue *aggr_txq,
5804 struct mvpp2_tx_queue *txq)
5805{
5806 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5807 struct mvpp2_tx_desc *tx_desc;
5808 int i;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005809 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005810
5811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5812 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5813 void *addr = page_address(frag->page.p) + frag->page_offset;
5814
5815 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005816 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5817 mvpp2_txdesc_size_set(port, tx_desc, frag->size);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005818
Thomas Petazzoni20396132017-03-07 16:53:00 +01005819 buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005820 frag->size,
5821 DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01005822 if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005823 mvpp2_txq_desc_put(txq);
Markus Elfring32bae632017-04-17 11:36:34 +02005824 goto cleanup;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005825 }
5826
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005827 mvpp2_txdesc_offset_set(port, tx_desc,
5828 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5829 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5830 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005831
5832 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5833 /* Last descriptor */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005834 mvpp2_txdesc_cmd_set(port, tx_desc,
5835 MVPP2_TXD_L_DESC);
5836 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005837 } else {
5838 /* Descriptor in the middle: Not First, Not Last */
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005839 mvpp2_txdesc_cmd_set(port, tx_desc, 0);
5840 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005841 }
5842 }
5843
5844 return 0;
Markus Elfring32bae632017-04-17 11:36:34 +02005845cleanup:
Marcin Wojtas3f518502014-07-10 16:52:13 -03005846 /* Release all descriptors that were used to map fragments of
5847 * this packet, as well as the corresponding DMA mappings
5848 */
5849 for (i = i - 1; i >= 0; i--) {
5850 tx_desc = txq->descs + i;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005851 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005852 }
5853
5854 return -ENOMEM;
5855}
5856
5857/* Main tx processing */
5858static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5859{
5860 struct mvpp2_port *port = netdev_priv(dev);
5861 struct mvpp2_tx_queue *txq, *aggr_txq;
5862 struct mvpp2_txq_pcpu *txq_pcpu;
5863 struct mvpp2_tx_desc *tx_desc;
Thomas Petazzoni20396132017-03-07 16:53:00 +01005864 dma_addr_t buf_dma_addr;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005865 int frags = 0;
5866 u16 txq_id;
5867 u32 tx_cmd;
5868
5869 txq_id = skb_get_queue_mapping(skb);
5870 txq = port->txqs[txq_id];
5871 txq_pcpu = this_cpu_ptr(txq->pcpu);
5872 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5873
5874 frags = skb_shinfo(skb)->nr_frags + 1;
5875
5876 /* Check number of available descriptors */
5877 if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
5878 mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
5879 txq_pcpu, frags)) {
5880 frags = 0;
5881 goto out;
5882 }
5883
5884 /* Get a descriptor for the first part of the packet */
5885 tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005886 mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5887 mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005888
Thomas Petazzoni20396132017-03-07 16:53:00 +01005889 buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005890 skb_headlen(skb), DMA_TO_DEVICE);
Thomas Petazzoni20396132017-03-07 16:53:00 +01005891 if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03005892 mvpp2_txq_desc_put(txq);
5893 frags = 0;
5894 goto out;
5895 }
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005896
5897 mvpp2_txdesc_offset_set(port, tx_desc,
5898 buf_dma_addr & MVPP2_TX_DESC_ALIGN);
5899 mvpp2_txdesc_dma_addr_set(port, tx_desc,
5900 buf_dma_addr & ~MVPP2_TX_DESC_ALIGN);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005901
5902 tx_cmd = mvpp2_skb_tx_csum(port, skb);
5903
5904 if (frags == 1) {
5905 /* First and Last descriptor */
5906 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005907 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5908 mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005909 } else {
5910 /* First but not Last */
5911 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005912 mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
5913 mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005914
5915 /* Continue with other skb fragments */
5916 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
Thomas Petazzoniac3dd2772017-03-07 16:53:05 +01005917 tx_desc_unmap_put(port, txq, tx_desc);
Marcin Wojtas3f518502014-07-10 16:52:13 -03005918 frags = 0;
5919 goto out;
5920 }
5921 }
5922
5923 txq_pcpu->reserved_num -= frags;
5924 txq_pcpu->count += frags;
5925 aggr_txq->count += frags;
5926
5927 /* Enable transmit */
5928 wmb();
5929 mvpp2_aggr_txq_pend_desc_add(port, frags);
5930
5931 if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
5932 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
5933
5934 netif_tx_stop_queue(nq);
5935 }
5936out:
5937 if (frags > 0) {
5938 struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
5939
5940 u64_stats_update_begin(&stats->syncp);
5941 stats->tx_packets++;
5942 stats->tx_bytes += skb->len;
5943 u64_stats_update_end(&stats->syncp);
5944 } else {
5945 dev->stats.tx_dropped++;
5946 dev_kfree_skb_any(skb);
5947 }
5948
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005949 /* Finalize TX processing */
5950 if (txq_pcpu->count >= txq->done_pkts_coal)
5951 mvpp2_txq_done(port, txq, txq_pcpu);
5952
5953 /* Set the timer in case not all frags were processed */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005954 if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
5955 txq_pcpu->count > 0) {
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005956 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5957
5958 mvpp2_timer_set(port_pcpu);
5959 }
5960
Marcin Wojtas3f518502014-07-10 16:52:13 -03005961 return NETDEV_TX_OK;
5962}
5963
5964static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5965{
5966 if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
5967 netdev_err(dev, "FCS error\n");
5968 if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
5969 netdev_err(dev, "rx fifo overrun error\n");
5970 if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
5971 netdev_err(dev, "tx fifo underrun error\n");
5972}
5973
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005974static int mvpp2_poll(struct napi_struct *napi, int budget)
Marcin Wojtas3f518502014-07-10 16:52:13 -03005975{
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005976 u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02005977 int rx_done = 0;
5978 struct mvpp2_port *port = netdev_priv(napi->dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005979 struct mvpp2_queue_vector *qv;
Thomas Petazzonia7868412017-03-07 16:53:13 +01005980 int cpu = smp_processor_id();
Marcin Wojtas3f518502014-07-10 16:52:13 -03005981
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02005982 qv = container_of(napi, struct mvpp2_queue_vector, napi);
5983
Marcin Wojtas3f518502014-07-10 16:52:13 -03005984 /* Rx/Tx cause register
5985 *
5986 * Bits 0-15: each bit indicates received packets on the Rx queue
5987 * (bit 0 is for Rx queue 0).
5988 *
5989 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
5990 * (bit 16 is for Tx queue 0).
5991 *
5992 * Each CPU has its own Rx/Tx cause register
5993 */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005994 cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id,
Thomas Petazzonia7868412017-03-07 16:53:13 +01005995 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
Marcin Wojtas3f518502014-07-10 16:52:13 -03005996
Thomas Petazzoni213f4282017-08-03 10:42:00 +02005997 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
Marcin Wojtas3f518502014-07-10 16:52:13 -03005998 if (cause_misc) {
5999 mvpp2_cause_error(port->dev, cause_misc);
6000
6001 /* Clear the cause register */
6002 mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01006003 mvpp2_percpu_write(port->priv, cpu,
6004 MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
6005 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006006 }
6007
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006008 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
6009 if (cause_tx) {
6010 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
6011 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
6012 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006013
6014 /* Process RX packets */
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006015 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
6016 cause_rx <<= qv->first_rxq;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006017 cause_rx |= qv->pending_cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006018 while (cause_rx && budget > 0) {
6019 int count;
6020 struct mvpp2_rx_queue *rxq;
6021
6022 rxq = mvpp2_get_rx_queue(port, cause_rx);
6023 if (!rxq)
6024 break;
6025
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006026 count = mvpp2_rx(port, napi, budget, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006027 rx_done += count;
6028 budget -= count;
6029 if (budget > 0) {
6030 /* Clear the bit associated to this Rx queue
6031 * so that next iteration will continue from
6032 * the next Rx queue.
6033 */
6034 cause_rx &= ~(1 << rxq->logic_rxq);
6035 }
6036 }
6037
6038 if (budget > 0) {
6039 cause_rx = 0;
Eric Dumazet6ad20162017-01-30 08:22:01 -08006040 napi_complete_done(napi, rx_done);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006041
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006042 mvpp2_qvec_interrupt_enable(qv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006043 }
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006044 qv->pending_cause_rx = cause_rx;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006045 return rx_done;
6046}
6047
6048/* Set hw internals when starting port */
6049static void mvpp2_start_dev(struct mvpp2_port *port)
6050{
Philippe Reynes8e072692016-06-28 00:08:11 +02006051 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006052 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02006053
Marcin Wojtas3f518502014-07-10 16:52:13 -03006054 mvpp2_gmac_max_rx_size_set(port);
6055 mvpp2_txp_max_tx_size_set(port);
6056
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006057 for (i = 0; i < port->nqvecs; i++)
6058 napi_enable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006059
6060 /* Enable interrupts on all CPUs */
6061 mvpp2_interrupts_enable(port);
6062
Antoine Ténart2055d622017-08-22 19:08:23 +02006063 mvpp2_port_mii_set(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006064 mvpp2_port_enable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02006065 phy_start(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006066 netif_tx_start_all_queues(port->dev);
6067}
6068
6069/* Set hw internals when stopping port */
6070static void mvpp2_stop_dev(struct mvpp2_port *port)
6071{
Philippe Reynes8e072692016-06-28 00:08:11 +02006072 struct net_device *ndev = port->dev;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006073 int i;
Philippe Reynes8e072692016-06-28 00:08:11 +02006074
Marcin Wojtas3f518502014-07-10 16:52:13 -03006075 /* Stop new packets from arriving to RXQs */
6076 mvpp2_ingress_disable(port);
6077
6078 mdelay(10);
6079
6080 /* Disable interrupts on all CPUs */
6081 mvpp2_interrupts_disable(port);
6082
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006083 for (i = 0; i < port->nqvecs; i++)
6084 napi_disable(&port->qvecs[i].napi);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006085
6086 netif_carrier_off(port->dev);
6087 netif_tx_stop_all_queues(port->dev);
6088
6089 mvpp2_egress_disable(port);
6090 mvpp2_port_disable(port);
Philippe Reynes8e072692016-06-28 00:08:11 +02006091 phy_stop(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006092}
6093
Marcin Wojtas3f518502014-07-10 16:52:13 -03006094static int mvpp2_check_ringparam_valid(struct net_device *dev,
6095 struct ethtool_ringparam *ring)
6096{
6097 u16 new_rx_pending = ring->rx_pending;
6098 u16 new_tx_pending = ring->tx_pending;
6099
6100 if (ring->rx_pending == 0 || ring->tx_pending == 0)
6101 return -EINVAL;
6102
6103 if (ring->rx_pending > MVPP2_MAX_RXD)
6104 new_rx_pending = MVPP2_MAX_RXD;
6105 else if (!IS_ALIGNED(ring->rx_pending, 16))
6106 new_rx_pending = ALIGN(ring->rx_pending, 16);
6107
6108 if (ring->tx_pending > MVPP2_MAX_TXD)
6109 new_tx_pending = MVPP2_MAX_TXD;
6110 else if (!IS_ALIGNED(ring->tx_pending, 32))
6111 new_tx_pending = ALIGN(ring->tx_pending, 32);
6112
6113 if (ring->rx_pending != new_rx_pending) {
6114 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6115 ring->rx_pending, new_rx_pending);
6116 ring->rx_pending = new_rx_pending;
6117 }
6118
6119 if (ring->tx_pending != new_tx_pending) {
6120 netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
6121 ring->tx_pending, new_tx_pending);
6122 ring->tx_pending = new_tx_pending;
6123 }
6124
6125 return 0;
6126}
6127
Thomas Petazzoni26975822017-03-07 16:53:14 +01006128static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006129{
6130 u32 mac_addr_l, mac_addr_m, mac_addr_h;
6131
6132 mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
6133 mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
6134 mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
6135 addr[0] = (mac_addr_h >> 24) & 0xFF;
6136 addr[1] = (mac_addr_h >> 16) & 0xFF;
6137 addr[2] = (mac_addr_h >> 8) & 0xFF;
6138 addr[3] = mac_addr_h & 0xFF;
6139 addr[4] = mac_addr_m & 0xFF;
6140 addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
6141}
6142
6143static int mvpp2_phy_connect(struct mvpp2_port *port)
6144{
6145 struct phy_device *phy_dev;
6146
6147 phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
6148 port->phy_interface);
6149 if (!phy_dev) {
6150 netdev_err(port->dev, "cannot connect to phy\n");
6151 return -ENODEV;
6152 }
6153 phy_dev->supported &= PHY_GBIT_FEATURES;
6154 phy_dev->advertising = phy_dev->supported;
6155
Marcin Wojtas3f518502014-07-10 16:52:13 -03006156 port->link = 0;
6157 port->duplex = 0;
6158 port->speed = 0;
6159
6160 return 0;
6161}
6162
6163static void mvpp2_phy_disconnect(struct mvpp2_port *port)
6164{
Philippe Reynes8e072692016-06-28 00:08:11 +02006165 struct net_device *ndev = port->dev;
6166
6167 phy_disconnect(ndev->phydev);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006168}
6169
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006170static int mvpp2_irqs_init(struct mvpp2_port *port)
6171{
6172 int err, i;
6173
6174 for (i = 0; i < port->nqvecs; i++) {
6175 struct mvpp2_queue_vector *qv = port->qvecs + i;
6176
6177 err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
6178 if (err)
6179 goto err;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006180
6181 if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
6182 irq_set_affinity_hint(qv->irq,
6183 cpumask_of(qv->sw_thread_id));
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006184 }
6185
6186 return 0;
6187err:
6188 for (i = 0; i < port->nqvecs; i++) {
6189 struct mvpp2_queue_vector *qv = port->qvecs + i;
6190
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006191 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006192 free_irq(qv->irq, qv);
6193 }
6194
6195 return err;
6196}
6197
6198static void mvpp2_irqs_deinit(struct mvpp2_port *port)
6199{
6200 int i;
6201
6202 for (i = 0; i < port->nqvecs; i++) {
6203 struct mvpp2_queue_vector *qv = port->qvecs + i;
6204
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006205 irq_set_affinity_hint(qv->irq, NULL);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006206 free_irq(qv->irq, qv);
6207 }
6208}
6209
Marcin Wojtas3f518502014-07-10 16:52:13 -03006210static int mvpp2_open(struct net_device *dev)
6211{
6212 struct mvpp2_port *port = netdev_priv(dev);
6213 unsigned char mac_bcast[ETH_ALEN] = {
6214 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6215 int err;
6216
6217 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
6218 if (err) {
6219 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
6220 return err;
6221 }
6222 err = mvpp2_prs_mac_da_accept(port->priv, port->id,
6223 dev->dev_addr, true);
6224 if (err) {
6225 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
6226 return err;
6227 }
6228 err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
6229 if (err) {
6230 netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
6231 return err;
6232 }
6233 err = mvpp2_prs_def_flow(port);
6234 if (err) {
6235 netdev_err(dev, "mvpp2_prs_def_flow failed\n");
6236 return err;
6237 }
6238
6239 /* Allocate the Rx/Tx queues */
6240 err = mvpp2_setup_rxqs(port);
6241 if (err) {
6242 netdev_err(port->dev, "cannot allocate Rx queues\n");
6243 return err;
6244 }
6245
6246 err = mvpp2_setup_txqs(port);
6247 if (err) {
6248 netdev_err(port->dev, "cannot allocate Tx queues\n");
6249 goto err_cleanup_rxqs;
6250 }
6251
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006252 err = mvpp2_irqs_init(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006253 if (err) {
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006254 netdev_err(port->dev, "cannot init IRQs\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006255 goto err_cleanup_txqs;
6256 }
6257
6258 /* In default link is down */
6259 netif_carrier_off(port->dev);
6260
6261 err = mvpp2_phy_connect(port);
6262 if (err < 0)
6263 goto err_free_irq;
6264
6265 /* Unmask interrupts on all CPUs */
6266 on_each_cpu(mvpp2_interrupts_unmask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006267 mvpp2_shared_interrupt_mask_unmask(port, false);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006268
6269 mvpp2_start_dev(port);
6270
6271 return 0;
6272
6273err_free_irq:
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006274 mvpp2_irqs_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006275err_cleanup_txqs:
6276 mvpp2_cleanup_txqs(port);
6277err_cleanup_rxqs:
6278 mvpp2_cleanup_rxqs(port);
6279 return err;
6280}
6281
6282static int mvpp2_stop(struct net_device *dev)
6283{
6284 struct mvpp2_port *port = netdev_priv(dev);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006285 struct mvpp2_port_pcpu *port_pcpu;
6286 int cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006287
6288 mvpp2_stop_dev(port);
6289 mvpp2_phy_disconnect(port);
6290
6291 /* Mask interrupts on all CPUs */
6292 on_each_cpu(mvpp2_interrupts_mask, port, 1);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006293 mvpp2_shared_interrupt_mask_unmask(port, true);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006294
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006295 mvpp2_irqs_deinit(port);
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006296 if (!port->has_tx_irqs) {
6297 for_each_present_cpu(cpu) {
6298 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006299
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006300 hrtimer_cancel(&port_pcpu->tx_done_timer);
6301 port_pcpu->timer_scheduled = false;
6302 tasklet_kill(&port_pcpu->tx_done_tasklet);
6303 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006304 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006305 mvpp2_cleanup_rxqs(port);
6306 mvpp2_cleanup_txqs(port);
6307
6308 return 0;
6309}
6310
6311static void mvpp2_set_rx_mode(struct net_device *dev)
6312{
6313 struct mvpp2_port *port = netdev_priv(dev);
6314 struct mvpp2 *priv = port->priv;
6315 struct netdev_hw_addr *ha;
6316 int id = port->id;
6317 bool allmulti = dev->flags & IFF_ALLMULTI;
6318
6319 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
6320 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
6321 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
6322
6323 /* Remove all port->id's mcast enries */
6324 mvpp2_prs_mcast_del_all(priv, id);
6325
6326 if (allmulti && !netdev_mc_empty(dev)) {
6327 netdev_for_each_mc_addr(ha, dev)
6328 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
6329 }
6330}
6331
6332static int mvpp2_set_mac_address(struct net_device *dev, void *p)
6333{
6334 struct mvpp2_port *port = netdev_priv(dev);
6335 const struct sockaddr *addr = p;
6336 int err;
6337
6338 if (!is_valid_ether_addr(addr->sa_data)) {
6339 err = -EADDRNOTAVAIL;
Markus Elfringc1175542017-04-17 11:10:47 +02006340 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006341 }
6342
6343 if (!netif_running(dev)) {
6344 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6345 if (!err)
6346 return 0;
6347 /* Reconfigure parser to accept the original MAC address */
6348 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6349 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006350 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006351 }
6352
6353 mvpp2_stop_dev(port);
6354
6355 err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
6356 if (!err)
6357 goto out_start;
6358
6359 /* Reconfigure parser accept the original MAC address */
6360 err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
6361 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006362 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006363out_start:
6364 mvpp2_start_dev(port);
6365 mvpp2_egress_enable(port);
6366 mvpp2_ingress_enable(port);
6367 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02006368log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02006369 netdev_err(dev, "failed to change MAC address\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006370 return err;
6371}
6372
6373static int mvpp2_change_mtu(struct net_device *dev, int mtu)
6374{
6375 struct mvpp2_port *port = netdev_priv(dev);
6376 int err;
6377
Jarod Wilson57779872016-10-17 15:54:06 -04006378 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
6379 netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
6380 ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
6381 mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006382 }
6383
6384 if (!netif_running(dev)) {
6385 err = mvpp2_bm_update_mtu(dev, mtu);
6386 if (!err) {
6387 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6388 return 0;
6389 }
6390
6391 /* Reconfigure BM to the original MTU */
6392 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6393 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006394 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006395 }
6396
6397 mvpp2_stop_dev(port);
6398
6399 err = mvpp2_bm_update_mtu(dev, mtu);
6400 if (!err) {
6401 port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
6402 goto out_start;
6403 }
6404
6405 /* Reconfigure BM to the original MTU */
6406 err = mvpp2_bm_update_mtu(dev, dev->mtu);
6407 if (err)
Markus Elfringc1175542017-04-17 11:10:47 +02006408 goto log_error;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006409
6410out_start:
6411 mvpp2_start_dev(port);
6412 mvpp2_egress_enable(port);
6413 mvpp2_ingress_enable(port);
6414
6415 return 0;
Markus Elfringc1175542017-04-17 11:10:47 +02006416log_error:
Markus Elfringdfd42402017-04-17 11:20:41 +02006417 netdev_err(dev, "failed to change MTU\n");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006418 return err;
6419}
6420
stephen hemmingerbc1f4472017-01-06 19:12:52 -08006421static void
Marcin Wojtas3f518502014-07-10 16:52:13 -03006422mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6423{
6424 struct mvpp2_port *port = netdev_priv(dev);
6425 unsigned int start;
6426 int cpu;
6427
6428 for_each_possible_cpu(cpu) {
6429 struct mvpp2_pcpu_stats *cpu_stats;
6430 u64 rx_packets;
6431 u64 rx_bytes;
6432 u64 tx_packets;
6433 u64 tx_bytes;
6434
6435 cpu_stats = per_cpu_ptr(port->stats, cpu);
6436 do {
6437 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
6438 rx_packets = cpu_stats->rx_packets;
6439 rx_bytes = cpu_stats->rx_bytes;
6440 tx_packets = cpu_stats->tx_packets;
6441 tx_bytes = cpu_stats->tx_bytes;
6442 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
6443
6444 stats->rx_packets += rx_packets;
6445 stats->rx_bytes += rx_bytes;
6446 stats->tx_packets += tx_packets;
6447 stats->tx_bytes += tx_bytes;
6448 }
6449
6450 stats->rx_errors = dev->stats.rx_errors;
6451 stats->rx_dropped = dev->stats.rx_dropped;
6452 stats->tx_dropped = dev->stats.tx_dropped;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006453}
6454
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006455static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6456{
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006457 int ret;
6458
Philippe Reynes8e072692016-06-28 00:08:11 +02006459 if (!dev->phydev)
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006460 return -ENOTSUPP;
6461
Philippe Reynes8e072692016-06-28 00:08:11 +02006462 ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006463 if (!ret)
6464 mvpp2_link_event(dev);
6465
6466 return ret;
6467}
6468
Marcin Wojtas3f518502014-07-10 16:52:13 -03006469/* Ethtool methods */
6470
Marcin Wojtas3f518502014-07-10 16:52:13 -03006471/* Set interrupt coalescing for ethtools */
6472static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
6473 struct ethtool_coalesce *c)
6474{
6475 struct mvpp2_port *port = netdev_priv(dev);
6476 int queue;
6477
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006478 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006479 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6480
6481 rxq->time_coal = c->rx_coalesce_usecs;
6482 rxq->pkts_coal = c->rx_max_coalesced_frames;
Thomas Petazzonid63f9e42017-02-21 11:28:02 +01006483 mvpp2_rx_pkts_coal_set(port, rxq);
6484 mvpp2_rx_time_coal_set(port, rxq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006485 }
6486
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006487 if (port->has_tx_irqs) {
6488 port->tx_time_coal = c->tx_coalesce_usecs;
6489 mvpp2_tx_time_coal_set(port);
6490 }
6491
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006492 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006493 struct mvpp2_tx_queue *txq = port->txqs[queue];
6494
6495 txq->done_pkts_coal = c->tx_max_coalesced_frames;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006496
6497 if (port->has_tx_irqs)
6498 mvpp2_tx_pkts_coal_set(port, txq);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006499 }
6500
Marcin Wojtas3f518502014-07-10 16:52:13 -03006501 return 0;
6502}
6503
6504/* get coalescing for ethtools */
6505static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
6506 struct ethtool_coalesce *c)
6507{
6508 struct mvpp2_port *port = netdev_priv(dev);
6509
6510 c->rx_coalesce_usecs = port->rxqs[0]->time_coal;
6511 c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
6512 c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
6513 return 0;
6514}
6515
6516static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
6517 struct ethtool_drvinfo *drvinfo)
6518{
6519 strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
6520 sizeof(drvinfo->driver));
6521 strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
6522 sizeof(drvinfo->version));
6523 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
6524 sizeof(drvinfo->bus_info));
6525}
6526
6527static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
6528 struct ethtool_ringparam *ring)
6529{
6530 struct mvpp2_port *port = netdev_priv(dev);
6531
6532 ring->rx_max_pending = MVPP2_MAX_RXD;
6533 ring->tx_max_pending = MVPP2_MAX_TXD;
6534 ring->rx_pending = port->rx_ring_size;
6535 ring->tx_pending = port->tx_ring_size;
6536}
6537
6538static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
6539 struct ethtool_ringparam *ring)
6540{
6541 struct mvpp2_port *port = netdev_priv(dev);
6542 u16 prev_rx_ring_size = port->rx_ring_size;
6543 u16 prev_tx_ring_size = port->tx_ring_size;
6544 int err;
6545
6546 err = mvpp2_check_ringparam_valid(dev, ring);
6547 if (err)
6548 return err;
6549
6550 if (!netif_running(dev)) {
6551 port->rx_ring_size = ring->rx_pending;
6552 port->tx_ring_size = ring->tx_pending;
6553 return 0;
6554 }
6555
6556 /* The interface is running, so we have to force a
6557 * reallocation of the queues
6558 */
6559 mvpp2_stop_dev(port);
6560 mvpp2_cleanup_rxqs(port);
6561 mvpp2_cleanup_txqs(port);
6562
6563 port->rx_ring_size = ring->rx_pending;
6564 port->tx_ring_size = ring->tx_pending;
6565
6566 err = mvpp2_setup_rxqs(port);
6567 if (err) {
6568 /* Reallocate Rx queues with the original ring size */
6569 port->rx_ring_size = prev_rx_ring_size;
6570 ring->rx_pending = prev_rx_ring_size;
6571 err = mvpp2_setup_rxqs(port);
6572 if (err)
6573 goto err_out;
6574 }
6575 err = mvpp2_setup_txqs(port);
6576 if (err) {
6577 /* Reallocate Tx queues with the original ring size */
6578 port->tx_ring_size = prev_tx_ring_size;
6579 ring->tx_pending = prev_tx_ring_size;
6580 err = mvpp2_setup_txqs(port);
6581 if (err)
6582 goto err_clean_rxqs;
6583 }
6584
6585 mvpp2_start_dev(port);
6586 mvpp2_egress_enable(port);
6587 mvpp2_ingress_enable(port);
6588
6589 return 0;
6590
6591err_clean_rxqs:
6592 mvpp2_cleanup_rxqs(port);
6593err_out:
Markus Elfringdfd42402017-04-17 11:20:41 +02006594 netdev_err(dev, "failed to change ring parameters");
Marcin Wojtas3f518502014-07-10 16:52:13 -03006595 return err;
6596}
6597
6598/* Device ops */
6599
6600static const struct net_device_ops mvpp2_netdev_ops = {
6601 .ndo_open = mvpp2_open,
6602 .ndo_stop = mvpp2_stop,
6603 .ndo_start_xmit = mvpp2_tx,
6604 .ndo_set_rx_mode = mvpp2_set_rx_mode,
6605 .ndo_set_mac_address = mvpp2_set_mac_address,
6606 .ndo_change_mtu = mvpp2_change_mtu,
6607 .ndo_get_stats64 = mvpp2_get_stats64,
Thomas Petazzonibd695a52014-07-27 23:21:36 +02006608 .ndo_do_ioctl = mvpp2_ioctl,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006609};
6610
6611static const struct ethtool_ops mvpp2_eth_tool_ops = {
Florian Fainelli00606c42016-11-15 11:19:48 -08006612 .nway_reset = phy_ethtool_nway_reset,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006613 .get_link = ethtool_op_get_link,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006614 .set_coalesce = mvpp2_ethtool_set_coalesce,
6615 .get_coalesce = mvpp2_ethtool_get_coalesce,
6616 .get_drvinfo = mvpp2_ethtool_get_drvinfo,
6617 .get_ringparam = mvpp2_ethtool_get_ringparam,
6618 .set_ringparam = mvpp2_ethtool_set_ringparam,
Philippe Reynesfb773e92016-06-28 00:08:12 +02006619 .get_link_ksettings = phy_ethtool_get_link_ksettings,
6620 .set_link_ksettings = phy_ethtool_set_link_ksettings,
Marcin Wojtas3f518502014-07-10 16:52:13 -03006621};
6622
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006623/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
6624 * had a single IRQ defined per-port.
6625 */
6626static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
6627 struct device_node *port_node)
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006628{
6629 struct mvpp2_queue_vector *v = &port->qvecs[0];
6630
6631 v->first_rxq = 0;
6632 v->nrxqs = port->nrxqs;
6633 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6634 v->sw_thread_id = 0;
6635 v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
6636 v->port = port;
6637 v->irq = irq_of_parse_and_map(port_node, 0);
6638 if (v->irq <= 0)
6639 return -EINVAL;
6640 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6641 NAPI_POLL_WEIGHT);
6642
6643 port->nqvecs = 1;
6644
6645 return 0;
6646}
6647
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006648static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
6649 struct device_node *port_node)
6650{
6651 struct mvpp2_queue_vector *v;
6652 int i, ret;
6653
6654 port->nqvecs = num_possible_cpus();
6655 if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
6656 port->nqvecs += 1;
6657
6658 for (i = 0; i < port->nqvecs; i++) {
6659 char irqname[16];
6660
6661 v = port->qvecs + i;
6662
6663 v->port = port;
6664 v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
6665 v->sw_thread_id = i;
6666 v->sw_thread_mask = BIT(i);
6667
6668 snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
6669
6670 if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
6671 v->first_rxq = i * MVPP2_DEFAULT_RXQ;
6672 v->nrxqs = MVPP2_DEFAULT_RXQ;
6673 } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
6674 i == (port->nqvecs - 1)) {
6675 v->first_rxq = 0;
6676 v->nrxqs = port->nrxqs;
6677 v->type = MVPP2_QUEUE_VECTOR_SHARED;
6678 strncpy(irqname, "rx-shared", sizeof(irqname));
6679 }
6680
6681 v->irq = of_irq_get_byname(port_node, irqname);
6682 if (v->irq <= 0) {
6683 ret = -EINVAL;
6684 goto err;
6685 }
6686
6687 netif_napi_add(port->dev, &v->napi, mvpp2_poll,
6688 NAPI_POLL_WEIGHT);
6689 }
6690
6691 return 0;
6692
6693err:
6694 for (i = 0; i < port->nqvecs; i++)
6695 irq_dispose_mapping(port->qvecs[i].irq);
6696 return ret;
6697}
6698
6699static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
6700 struct device_node *port_node)
6701{
6702 if (port->has_tx_irqs)
6703 return mvpp2_multi_queue_vectors_init(port, port_node);
6704 else
6705 return mvpp2_simple_queue_vectors_init(port, port_node);
6706}
6707
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006708static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
6709{
6710 int i;
6711
6712 for (i = 0; i < port->nqvecs; i++)
6713 irq_dispose_mapping(port->qvecs[i].irq);
6714}
6715
6716/* Configure Rx queue group interrupt for this port */
6717static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
6718{
6719 struct mvpp2 *priv = port->priv;
6720 u32 val;
6721 int i;
6722
6723 if (priv->hw_version == MVPP21) {
6724 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
6725 port->nrxqs);
6726 return;
6727 }
6728
6729 /* Handle the more complicated PPv2.2 case */
6730 for (i = 0; i < port->nqvecs; i++) {
6731 struct mvpp2_queue_vector *qv = port->qvecs + i;
6732
6733 if (!qv->nrxqs)
6734 continue;
6735
6736 val = qv->sw_thread_id;
6737 val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
6738 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
6739
6740 val = qv->first_rxq;
6741 val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
6742 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
6743 }
6744}
6745
Marcin Wojtas3f518502014-07-10 16:52:13 -03006746/* Initialize port HW */
6747static int mvpp2_port_init(struct mvpp2_port *port)
6748{
6749 struct device *dev = port->dev->dev.parent;
6750 struct mvpp2 *priv = port->priv;
6751 struct mvpp2_txq_pcpu *txq_pcpu;
6752 int queue, cpu, err;
6753
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006754 /* Checks for hardware constraints */
6755 if (port->first_rxq + port->nrxqs >
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006756 MVPP2_MAX_PORTS * priv->max_port_rxqs)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006757 return -EINVAL;
6758
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006759 if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) ||
6760 (port->ntxqs > MVPP2_MAX_TXQ))
6761 return -EINVAL;
6762
Marcin Wojtas3f518502014-07-10 16:52:13 -03006763 /* Disable port */
6764 mvpp2_egress_disable(port);
6765 mvpp2_port_disable(port);
6766
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006767 port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
6768
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006769 port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03006770 GFP_KERNEL);
6771 if (!port->txqs)
6772 return -ENOMEM;
6773
6774 /* Associate physical Tx queues to this port and initialize.
6775 * The mapping is predefined.
6776 */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006777 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006778 int queue_phy_id = mvpp2_txq_phys(port->id, queue);
6779 struct mvpp2_tx_queue *txq;
6780
6781 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
Christophe Jaillet177c8d12017-02-19 10:19:57 +01006782 if (!txq) {
6783 err = -ENOMEM;
6784 goto err_free_percpu;
6785 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006786
6787 txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
6788 if (!txq->pcpu) {
6789 err = -ENOMEM;
6790 goto err_free_percpu;
6791 }
6792
6793 txq->id = queue_phy_id;
6794 txq->log_id = queue;
6795 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
6796 for_each_present_cpu(cpu) {
6797 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
6798 txq_pcpu->cpu = cpu;
6799 }
6800
6801 port->txqs[queue] = txq;
6802 }
6803
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006804 port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03006805 GFP_KERNEL);
6806 if (!port->rxqs) {
6807 err = -ENOMEM;
6808 goto err_free_percpu;
6809 }
6810
6811 /* Allocate and initialize Rx queue for this port */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006812 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006813 struct mvpp2_rx_queue *rxq;
6814
6815 /* Map physical Rx queue to port's logical Rx queue */
6816 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006817 if (!rxq) {
6818 err = -ENOMEM;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006819 goto err_free_percpu;
Jisheng Zhangd82b0c22016-03-31 17:01:23 +08006820 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03006821 /* Map this Rx queue to a physical queue */
6822 rxq->id = port->first_rxq + queue;
6823 rxq->port = port->id;
6824 rxq->logic_rxq = queue;
6825
6826 port->rxqs[queue] = rxq;
6827 }
6828
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006829 mvpp2_rx_irqs_setup(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006830
6831 /* Create Rx descriptor rings */
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006832 for (queue = 0; queue < port->nrxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006833 struct mvpp2_rx_queue *rxq = port->rxqs[queue];
6834
6835 rxq->size = port->rx_ring_size;
6836 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
6837 rxq->time_coal = MVPP2_RX_COAL_USEC;
6838 }
6839
6840 mvpp2_ingress_disable(port);
6841
6842 /* Port default configuration */
6843 mvpp2_defaults_set(port);
6844
6845 /* Port's classifier configuration */
6846 mvpp2_cls_oversize_rxq_set(port);
6847 mvpp2_cls_port_config(port);
6848
6849 /* Provide an initial Rx packet size */
6850 port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
6851
6852 /* Initialize pools for swf */
6853 err = mvpp2_swf_bm_pool_init(port);
6854 if (err)
6855 goto err_free_percpu;
6856
6857 return 0;
6858
6859err_free_percpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006860 for (queue = 0; queue < port->ntxqs; queue++) {
Marcin Wojtas3f518502014-07-10 16:52:13 -03006861 if (!port->txqs[queue])
6862 continue;
6863 free_percpu(port->txqs[queue]->pcpu);
6864 }
6865 return err;
6866}
6867
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006868/* Checks if the port DT description has the TX interrupts
6869 * described. On PPv2.1, there are no such interrupts. On PPv2.2,
6870 * there are available, but we need to keep support for old DTs.
6871 */
6872static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv,
6873 struct device_node *port_node)
6874{
6875 char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1",
6876 "tx-cpu2", "tx-cpu3" };
6877 int ret, i;
6878
6879 if (priv->hw_version == MVPP21)
6880 return false;
6881
6882 for (i = 0; i < 5; i++) {
6883 ret = of_property_match_string(port_node, "interrupt-names",
6884 irqs[i]);
6885 if (ret < 0)
6886 return false;
6887 }
6888
6889 return true;
6890}
6891
Marcin Wojtas3f518502014-07-10 16:52:13 -03006892/* Ports initialization */
6893static int mvpp2_port_probe(struct platform_device *pdev,
6894 struct device_node *port_node,
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006895 struct mvpp2 *priv)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006896{
6897 struct device_node *phy_node;
6898 struct mvpp2_port *port;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006899 struct mvpp2_port_pcpu *port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006900 struct net_device *dev;
6901 struct resource *res;
6902 const char *dt_mac_addr;
6903 const char *mac_from;
6904 char hw_mac_addr[ETH_ALEN];
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006905 unsigned int ntxqs, nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006906 bool has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006907 u32 id;
6908 int features;
6909 int phy_mode;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02006910 int err, i, cpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006911
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006912 has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node);
6913
6914 if (!has_tx_irqs)
6915 queue_mode = MVPP2_QDIST_SINGLE_MODE;
6916
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006917 ntxqs = MVPP2_MAX_TXQ;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006918 if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE)
6919 nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus();
6920 else
6921 nrxqs = MVPP2_DEFAULT_RXQ;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006922
6923 dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006924 if (!dev)
6925 return -ENOMEM;
6926
6927 phy_node = of_parse_phandle(port_node, "phy", 0);
6928 if (!phy_node) {
6929 dev_err(&pdev->dev, "missing phy\n");
6930 err = -ENODEV;
6931 goto err_free_netdev;
6932 }
6933
6934 phy_mode = of_get_phy_mode(port_node);
6935 if (phy_mode < 0) {
6936 dev_err(&pdev->dev, "incorrect phy mode\n");
6937 err = phy_mode;
6938 goto err_free_netdev;
6939 }
6940
6941 if (of_property_read_u32(port_node, "port-id", &id)) {
6942 err = -EINVAL;
6943 dev_err(&pdev->dev, "missing port-id value\n");
6944 goto err_free_netdev;
6945 }
6946
6947 dev->tx_queue_len = MVPP2_MAX_TXD;
6948 dev->watchdog_timeo = 5 * HZ;
6949 dev->netdev_ops = &mvpp2_netdev_ops;
6950 dev->ethtool_ops = &mvpp2_eth_tool_ops;
6951
6952 port = netdev_priv(dev);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006953 port->dev = dev;
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006954 port->ntxqs = ntxqs;
6955 port->nrxqs = nrxqs;
Thomas Petazzoni213f4282017-08-03 10:42:00 +02006956 port->priv = priv;
6957 port->has_tx_irqs = has_tx_irqs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006958
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006959 err = mvpp2_queue_vectors_init(port, port_node);
6960 if (err)
Marcin Wojtas3f518502014-07-10 16:52:13 -03006961 goto err_free_netdev;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006962
6963 if (of_property_read_bool(port_node, "marvell,loopback"))
6964 port->flags |= MVPP2_F_LOOPBACK;
6965
Marcin Wojtas3f518502014-07-10 16:52:13 -03006966 port->id = id;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006967 if (priv->hw_version == MVPP21)
Thomas Petazzoni09f83972017-08-03 10:41:57 +02006968 port->first_rxq = port->id * port->nrxqs;
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01006969 else
6970 port->first_rxq = port->id * priv->max_port_rxqs;
6971
Marcin Wojtas3f518502014-07-10 16:52:13 -03006972 port->phy_node = phy_node;
6973 port->phy_interface = phy_mode;
6974
Thomas Petazzonia7868412017-03-07 16:53:13 +01006975 if (priv->hw_version == MVPP21) {
6976 res = platform_get_resource(pdev, IORESOURCE_MEM, 2 + id);
6977 port->base = devm_ioremap_resource(&pdev->dev, res);
6978 if (IS_ERR(port->base)) {
6979 err = PTR_ERR(port->base);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006980 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006981 }
6982 } else {
6983 if (of_property_read_u32(port_node, "gop-port-id",
6984 &port->gop_id)) {
6985 err = -EINVAL;
6986 dev_err(&pdev->dev, "missing gop-port-id value\n");
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006987 goto err_deinit_qvecs;
Thomas Petazzonia7868412017-03-07 16:53:13 +01006988 }
6989
6990 port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
Marcin Wojtas3f518502014-07-10 16:52:13 -03006991 }
6992
6993 /* Alloc per-cpu stats */
6994 port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6995 if (!port->stats) {
6996 err = -ENOMEM;
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02006997 goto err_deinit_qvecs;
Marcin Wojtas3f518502014-07-10 16:52:13 -03006998 }
6999
7000 dt_mac_addr = of_get_mac_address(port_node);
7001 if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
7002 mac_from = "device tree";
7003 ether_addr_copy(dev->dev_addr, dt_mac_addr);
7004 } else {
Thomas Petazzoni26975822017-03-07 16:53:14 +01007005 if (priv->hw_version == MVPP21)
7006 mvpp21_get_mac_address(port, hw_mac_addr);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007007 if (is_valid_ether_addr(hw_mac_addr)) {
7008 mac_from = "hardware";
7009 ether_addr_copy(dev->dev_addr, hw_mac_addr);
7010 } else {
7011 mac_from = "random";
7012 eth_hw_addr_random(dev);
7013 }
7014 }
7015
7016 port->tx_ring_size = MVPP2_MAX_TXD;
7017 port->rx_ring_size = MVPP2_MAX_RXD;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007018 SET_NETDEV_DEV(dev, &pdev->dev);
7019
7020 err = mvpp2_port_init(port);
7021 if (err < 0) {
7022 dev_err(&pdev->dev, "failed to init port %d\n", id);
7023 goto err_free_stats;
7024 }
Thomas Petazzoni26975822017-03-07 16:53:14 +01007025
Thomas Petazzoni26975822017-03-07 16:53:14 +01007026 mvpp2_port_periodic_xon_disable(port);
7027
7028 if (priv->hw_version == MVPP21)
7029 mvpp2_port_fc_adv_enable(port);
7030
7031 mvpp2_port_reset(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007032
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007033 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
7034 if (!port->pcpu) {
7035 err = -ENOMEM;
7036 goto err_free_txq_pcpu;
7037 }
7038
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007039 if (!port->has_tx_irqs) {
7040 for_each_present_cpu(cpu) {
7041 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007042
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007043 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
7044 HRTIMER_MODE_REL_PINNED);
7045 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
7046 port_pcpu->timer_scheduled = false;
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007047
Thomas Petazzoni213f4282017-08-03 10:42:00 +02007048 tasklet_init(&port_pcpu->tx_done_tasklet,
7049 mvpp2_tx_proc_cb,
7050 (unsigned long)dev);
7051 }
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007052 }
7053
Marcin Wojtas3f518502014-07-10 16:52:13 -03007054 features = NETIF_F_SG | NETIF_F_IP_CSUM;
7055 dev->features = features | NETIF_F_RXCSUM;
7056 dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
7057 dev->vlan_features |= features;
7058
Jarod Wilson57779872016-10-17 15:54:06 -04007059 /* MTU range: 68 - 9676 */
7060 dev->min_mtu = ETH_MIN_MTU;
7061 /* 9676 == 9700 - 20 and rounding to 8 */
7062 dev->max_mtu = 9676;
7063
Marcin Wojtas3f518502014-07-10 16:52:13 -03007064 err = register_netdev(dev);
7065 if (err < 0) {
7066 dev_err(&pdev->dev, "failed to register netdev\n");
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007067 goto err_free_port_pcpu;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007068 }
7069 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7070
Marcin Wojtas3f518502014-07-10 16:52:13 -03007071 priv->port_list[id] = port;
7072 return 0;
7073
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007074err_free_port_pcpu:
7075 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007076err_free_txq_pcpu:
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007077 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007078 free_percpu(port->txqs[i]->pcpu);
7079err_free_stats:
7080 free_percpu(port->stats);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007081err_deinit_qvecs:
7082 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007083err_free_netdev:
Peter Chenccb80392016-08-01 15:02:37 +08007084 of_node_put(phy_node);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007085 free_netdev(dev);
7086 return err;
7087}
7088
7089/* Ports removal routine */
7090static void mvpp2_port_remove(struct mvpp2_port *port)
7091{
7092 int i;
7093
7094 unregister_netdev(port->dev);
Peter Chenccb80392016-08-01 15:02:37 +08007095 of_node_put(port->phy_node);
Marcin Wojtasedc660f2015-08-06 19:00:30 +02007096 free_percpu(port->pcpu);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007097 free_percpu(port->stats);
Thomas Petazzoni09f83972017-08-03 10:41:57 +02007098 for (i = 0; i < port->ntxqs; i++)
Marcin Wojtas3f518502014-07-10 16:52:13 -03007099 free_percpu(port->txqs[i]->pcpu);
Thomas Petazzoni591f4cf2017-08-03 10:41:59 +02007100 mvpp2_queue_vectors_deinit(port);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007101 free_netdev(port->dev);
7102}
7103
7104/* Initialize decoding windows */
7105static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7106 struct mvpp2 *priv)
7107{
7108 u32 win_enable;
7109 int i;
7110
7111 for (i = 0; i < 6; i++) {
7112 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7113 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7114
7115 if (i < 4)
7116 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7117 }
7118
7119 win_enable = 0;
7120
7121 for (i = 0; i < dram->num_cs; i++) {
7122 const struct mbus_dram_window *cs = dram->cs + i;
7123
7124 mvpp2_write(priv, MVPP2_WIN_BASE(i),
7125 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7126 dram->mbus_dram_target_id);
7127
7128 mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7129 (cs->size - 1) & 0xffff0000);
7130
7131 win_enable |= (1 << i);
7132 }
7133
7134 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7135}
7136
7137/* Initialize Rx FIFO's */
7138static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7139{
7140 int port;
7141
7142 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7143 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7144 MVPP2_RX_FIFO_PORT_DATA_SIZE);
7145 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7146 MVPP2_RX_FIFO_PORT_ATTR_SIZE);
7147 }
7148
7149 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7150 MVPP2_RX_FIFO_PORT_MIN_PKT);
7151 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7152}
7153
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01007154static void mvpp2_axi_init(struct mvpp2 *priv)
7155{
7156 u32 val, rdval, wrval;
7157
7158 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7159
7160 /* AXI Bridge Configuration */
7161
7162 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7163 << MVPP22_AXI_ATTR_CACHE_OFFS;
7164 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7165 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7166
7167 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7168 << MVPP22_AXI_ATTR_CACHE_OFFS;
7169 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7170 << MVPP22_AXI_ATTR_DOMAIN_OFFS;
7171
7172 /* BM */
7173 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7174 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7175
7176 /* Descriptors */
7177 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7178 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7179 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7180 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7181
7182 /* Buffer Data */
7183 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7184 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7185
7186 val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7187 << MVPP22_AXI_CODE_CACHE_OFFS;
7188 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7189 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7190 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7191 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7192
7193 val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7194 << MVPP22_AXI_CODE_CACHE_OFFS;
7195 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7196 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7197
7198 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7199
7200 val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7201 << MVPP22_AXI_CODE_CACHE_OFFS;
7202 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7203 << MVPP22_AXI_CODE_DOMAIN_OFFS;
7204
7205 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7206}
7207
Marcin Wojtas3f518502014-07-10 16:52:13 -03007208/* Initialize network controller common part HW */
7209static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7210{
7211 const struct mbus_dram_target_info *dram_target_info;
7212 int err, i;
Marcin Wojtas08a23752014-07-21 13:48:12 -03007213 u32 val;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007214
Marcin Wojtas3f518502014-07-10 16:52:13 -03007215 /* MBUS windows configuration */
7216 dram_target_info = mv_mbus_dram_info();
7217 if (dram_target_info)
7218 mvpp2_conf_mbus_windows(dram_target_info, priv);
7219
Thomas Petazzoni6763ce32017-03-07 16:53:15 +01007220 if (priv->hw_version == MVPP22)
7221 mvpp2_axi_init(priv);
7222
Marcin Wojtas08a23752014-07-21 13:48:12 -03007223 /* Disable HW PHY polling */
Thomas Petazzoni26975822017-03-07 16:53:14 +01007224 if (priv->hw_version == MVPP21) {
7225 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7226 val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7227 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7228 } else {
7229 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7230 val &= ~MVPP22_SMI_POLLING_EN;
7231 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7232 }
Marcin Wojtas08a23752014-07-21 13:48:12 -03007233
Marcin Wojtas3f518502014-07-10 16:52:13 -03007234 /* Allocate and initialize aggregated TXQs */
7235 priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
Markus Elfringd7ce3ce2017-04-17 08:48:23 +02007236 sizeof(*priv->aggr_txqs),
Marcin Wojtas3f518502014-07-10 16:52:13 -03007237 GFP_KERNEL);
7238 if (!priv->aggr_txqs)
7239 return -ENOMEM;
7240
7241 for_each_present_cpu(i) {
7242 priv->aggr_txqs[i].id = i;
7243 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7244 err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
7245 MVPP2_AGGR_TXQ_SIZE, i, priv);
7246 if (err < 0)
7247 return err;
7248 }
7249
7250 /* Rx Fifo Init */
7251 mvpp2_rx_fifo_init(priv);
7252
Thomas Petazzoni26975822017-03-07 16:53:14 +01007253 if (priv->hw_version == MVPP21)
7254 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7255 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007256
7257 /* Allow cache snoop when transmiting packets */
7258 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7259
7260 /* Buffer Manager initialization */
7261 err = mvpp2_bm_init(pdev, priv);
7262 if (err < 0)
7263 return err;
7264
7265 /* Parser default initialization */
7266 err = mvpp2_prs_default_init(pdev, priv);
7267 if (err < 0)
7268 return err;
7269
7270 /* Classifier default initialization */
7271 mvpp2_cls_init(priv);
7272
7273 return 0;
7274}
7275
7276static int mvpp2_probe(struct platform_device *pdev)
7277{
7278 struct device_node *dn = pdev->dev.of_node;
7279 struct device_node *port_node;
7280 struct mvpp2 *priv;
7281 struct resource *res;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007282 void __iomem *base;
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007283 int port_count, i;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007284 int err;
7285
Markus Elfring0b92e592017-04-17 08:38:32 +02007286 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007287 if (!priv)
7288 return -ENOMEM;
7289
Thomas Petazzonifaca9242017-03-07 16:53:06 +01007290 priv->hw_version =
7291 (unsigned long)of_device_get_match_data(&pdev->dev);
7292
Marcin Wojtas3f518502014-07-10 16:52:13 -03007293 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Thomas Petazzonia7868412017-03-07 16:53:13 +01007294 base = devm_ioremap_resource(&pdev->dev, res);
7295 if (IS_ERR(base))
7296 return PTR_ERR(base);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007297
Thomas Petazzonia7868412017-03-07 16:53:13 +01007298 if (priv->hw_version == MVPP21) {
7299 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7300 priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
7301 if (IS_ERR(priv->lms_base))
7302 return PTR_ERR(priv->lms_base);
7303 } else {
7304 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7305 priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7306 if (IS_ERR(priv->iface_base))
7307 return PTR_ERR(priv->iface_base);
7308 }
7309
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007310 for (i = 0; i < MVPP2_MAX_THREADS; i++) {
Thomas Petazzonia7868412017-03-07 16:53:13 +01007311 u32 addr_space_sz;
7312
7313 addr_space_sz = (priv->hw_version == MVPP21 ?
7314 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
Thomas Petazzonidf089aa2017-08-03 10:41:58 +02007315 priv->swth_base[i] = base + i * addr_space_sz;
Thomas Petazzonia7868412017-03-07 16:53:13 +01007316 }
Marcin Wojtas3f518502014-07-10 16:52:13 -03007317
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007318 if (priv->hw_version == MVPP21)
7319 priv->max_port_rxqs = 8;
7320 else
7321 priv->max_port_rxqs = 32;
7322
Marcin Wojtas3f518502014-07-10 16:52:13 -03007323 priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7324 if (IS_ERR(priv->pp_clk))
7325 return PTR_ERR(priv->pp_clk);
7326 err = clk_prepare_enable(priv->pp_clk);
7327 if (err < 0)
7328 return err;
7329
7330 priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7331 if (IS_ERR(priv->gop_clk)) {
7332 err = PTR_ERR(priv->gop_clk);
7333 goto err_pp_clk;
7334 }
7335 err = clk_prepare_enable(priv->gop_clk);
7336 if (err < 0)
7337 goto err_pp_clk;
7338
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007339 if (priv->hw_version == MVPP22) {
7340 priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7341 if (IS_ERR(priv->mg_clk)) {
7342 err = PTR_ERR(priv->mg_clk);
7343 goto err_gop_clk;
7344 }
7345
7346 err = clk_prepare_enable(priv->mg_clk);
7347 if (err < 0)
7348 goto err_gop_clk;
7349 }
7350
Marcin Wojtas3f518502014-07-10 16:52:13 -03007351 /* Get system's tclk rate */
7352 priv->tclk = clk_get_rate(priv->pp_clk);
7353
Thomas Petazzoni2067e0a2017-03-07 16:53:19 +01007354 if (priv->hw_version == MVPP22) {
7355 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
7356 if (err)
7357 goto err_mg_clk;
7358 /* Sadly, the BM pools all share the same register to
7359 * store the high 32 bits of their address. So they
7360 * must all have the same high 32 bits, which forces
7361 * us to restrict coherent memory to DMA_BIT_MASK(32).
7362 */
7363 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7364 if (err)
7365 goto err_mg_clk;
7366 }
7367
Marcin Wojtas3f518502014-07-10 16:52:13 -03007368 /* Initialize network controller */
7369 err = mvpp2_init(pdev, priv);
7370 if (err < 0) {
7371 dev_err(&pdev->dev, "failed to initialize controller\n");
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007372 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007373 }
7374
7375 port_count = of_get_available_child_count(dn);
7376 if (port_count == 0) {
7377 dev_err(&pdev->dev, "no ports enabled\n");
Wei Yongjun575a1932014-07-20 22:02:43 +08007378 err = -ENODEV;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007379 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007380 }
7381
7382 priv->port_list = devm_kcalloc(&pdev->dev, port_count,
Markus Elfring0b92e592017-04-17 08:38:32 +02007383 sizeof(*priv->port_list),
7384 GFP_KERNEL);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007385 if (!priv->port_list) {
7386 err = -ENOMEM;
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007387 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007388 }
7389
7390 /* Initialize ports */
Marcin Wojtas3f518502014-07-10 16:52:13 -03007391 for_each_available_child_of_node(dn, port_node) {
Thomas Petazzoni59b9a312017-03-07 16:53:17 +01007392 err = mvpp2_port_probe(pdev, port_node, priv);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007393 if (err < 0)
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007394 goto err_mg_clk;
Marcin Wojtas3f518502014-07-10 16:52:13 -03007395 }
7396
7397 platform_set_drvdata(pdev, priv);
7398 return 0;
7399
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007400err_mg_clk:
7401 if (priv->hw_version == MVPP22)
7402 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007403err_gop_clk:
7404 clk_disable_unprepare(priv->gop_clk);
7405err_pp_clk:
7406 clk_disable_unprepare(priv->pp_clk);
7407 return err;
7408}
7409
7410static int mvpp2_remove(struct platform_device *pdev)
7411{
7412 struct mvpp2 *priv = platform_get_drvdata(pdev);
7413 struct device_node *dn = pdev->dev.of_node;
7414 struct device_node *port_node;
7415 int i = 0;
7416
7417 for_each_available_child_of_node(dn, port_node) {
7418 if (priv->port_list[i])
7419 mvpp2_port_remove(priv->port_list[i]);
7420 i++;
7421 }
7422
7423 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
7424 struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7425
7426 mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
7427 }
7428
7429 for_each_present_cpu(i) {
7430 struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7431
7432 dma_free_coherent(&pdev->dev,
7433 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7434 aggr_txq->descs,
Thomas Petazzoni20396132017-03-07 16:53:00 +01007435 aggr_txq->descs_dma);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007436 }
7437
Thomas Petazzonifceb55d2017-03-07 16:53:18 +01007438 clk_disable_unprepare(priv->mg_clk);
Marcin Wojtas3f518502014-07-10 16:52:13 -03007439 clk_disable_unprepare(priv->pp_clk);
7440 clk_disable_unprepare(priv->gop_clk);
7441
7442 return 0;
7443}
7444
7445static const struct of_device_id mvpp2_match[] = {
Thomas Petazzonifaca9242017-03-07 16:53:06 +01007446 {
7447 .compatible = "marvell,armada-375-pp2",
7448 .data = (void *)MVPP21,
7449 },
Thomas Petazzonifc5e1552017-03-07 16:53:20 +01007450 {
7451 .compatible = "marvell,armada-7k-pp22",
7452 .data = (void *)MVPP22,
7453 },
Marcin Wojtas3f518502014-07-10 16:52:13 -03007454 { }
7455};
7456MODULE_DEVICE_TABLE(of, mvpp2_match);
7457
7458static struct platform_driver mvpp2_driver = {
7459 .probe = mvpp2_probe,
7460 .remove = mvpp2_remove,
7461 .driver = {
7462 .name = MVPP2_DRIVER_NAME,
7463 .of_match_table = mvpp2_match,
7464 },
7465};
7466
7467module_platform_driver(mvpp2_driver);
7468
7469MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7470MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
Ezequiel Garciac6340992014-07-14 10:34:47 -03007471MODULE_LICENSE("GPL v2");