blob: af11feea681c6cb151c366ae196c7e8a61d0a74f [file] [log] [blame]
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +02001/*
2 * Header Parser helpers for Marvell PPv2 Network Controller
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <uapi/linux/ppp_defs.h>
18#include <net/ip.h>
19#include <net/ipv6.h>
20
21#include "mvpp2.h"
22#include "mvpp2_prs.h"
23
24/* Update parser tcam and sram hw entries */
25static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
26{
27 int i;
28
29 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
30 return -EINVAL;
31
32 /* Clear entry invalidation bit */
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020033 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020034
35 /* Write tcam index - indirect access */
36 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
37 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020038 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020039
40 /* Write sram index - indirect access */
41 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
42 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020043 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020044
45 return 0;
46}
47
48/* Initialize tcam entry from hw */
49static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
50 struct mvpp2_prs_entry *pe, int tid)
51{
52 int i;
53
54 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
55 return -EINVAL;
56
57 memset(pe, 0, sizeof(*pe));
58 pe->index = tid;
59
60 /* Write tcam index - indirect access */
61 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
62
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020063 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020064 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020065 if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020066 return MVPP2_PRS_TCAM_ENTRY_INVALID;
67
68 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020069 pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020070
71 /* Write sram index - indirect access */
72 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
73 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +020074 pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +020075
76 return 0;
77}
78
79/* Invalidate tcam hw entry */
80static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
81{
82 /* Write index - indirect access */
83 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
84 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
85 MVPP2_PRS_TCAM_INV_MASK);
86}
87
88/* Enable shadow table entry and set its lookup ID */
89static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
90{
91 priv->prs_shadow[index].valid = true;
92 priv->prs_shadow[index].lu = lu;
93}
94
95/* Update ri fields in shadow table entry */
96static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
97 unsigned int ri, unsigned int ri_mask)
98{
99 priv->prs_shadow[index].ri_mask = ri_mask;
100 priv->prs_shadow[index].ri = ri;
101}
102
103/* Update lookup field in tcam sw entry */
104static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
105{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200106 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
107 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
108 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
109 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200110}
111
112/* Update mask for single port in tcam sw entry */
113static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
114 unsigned int port, bool add)
115{
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200116 if (add)
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200117 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200118 else
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200119 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200120}
121
122/* Update port map in tcam sw entry */
123static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
124 unsigned int ports)
125{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200126 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
127 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
128 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200129}
130
131/* Obtain port map from tcam sw entry */
132static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
133{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200134 return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200135}
136
137/* Set byte of data and its enable bits in tcam sw entry */
138static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
139 unsigned int offs, unsigned char byte,
140 unsigned char enable)
141{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200142 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
143
144 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
145 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
146 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
147 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200148}
149
150/* Get byte of data and its enable bits from tcam sw entry */
151static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
152 unsigned int offs, unsigned char *byte,
153 unsigned char *enable)
154{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200155 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
156
157 *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
158 *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200159}
160
161/* Compare tcam data bytes with a pattern */
162static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
163 u16 data)
164{
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200165 u16 tcam_data;
166
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200167 tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
168 return tcam_data == data;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200169}
170
171/* Update ai bits in tcam sw entry */
172static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
173 unsigned int bits, unsigned int enable)
174{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200175 int i;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200176
177 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
178 if (!(enable & BIT(i)))
179 continue;
180
181 if (bits & BIT(i))
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200182 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200183 else
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200184 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200185 }
186
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200187 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200188}
189
190/* Get ai bits from tcam sw entry */
191static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
192{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200193 return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200194}
195
196/* Set ethertype in tcam sw entry */
197static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
198 unsigned short ethertype)
199{
200 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
201 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
202}
203
204/* Set vid in tcam sw entry */
205static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
206 unsigned short vid)
207{
208 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
209 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
210}
211
212/* Set bits in sram sw entry */
213static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200214 u32 val)
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200215{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200216 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200217}
218
219/* Clear bits in sram sw entry */
220static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200221 u32 val)
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200222{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200223 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200224}
225
226/* Update ri bits in sram sw entry */
227static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
228 unsigned int bits, unsigned int mask)
229{
230 unsigned int i;
231
232 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200233 if (!(mask & BIT(i)))
234 continue;
235
236 if (bits & BIT(i))
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200237 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
238 1);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200239 else
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200240 mvpp2_prs_sram_bits_clear(pe,
241 MVPP2_PRS_SRAM_RI_OFFS + i,
242 1);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200243
244 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
245 }
246}
247
248/* Obtain ri bits from sram sw entry */
249static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
250{
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200251 return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200252}
253
254/* Update ai bits in sram sw entry */
255static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
256 unsigned int bits, unsigned int mask)
257{
258 unsigned int i;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200259
260 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
261 if (!(mask & BIT(i)))
262 continue;
263
264 if (bits & BIT(i))
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200265 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
266 1);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200267 else
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200268 mvpp2_prs_sram_bits_clear(pe,
269 MVPP2_PRS_SRAM_AI_OFFS + i,
270 1);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200271
272 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
273 }
274}
275
276/* Read ai bits from sram sw entry */
277static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
278{
279 u8 bits;
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200280 /* ai is stored on bits 90->97; so it spreads across two u32 */
281 int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
282 int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200283
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200284 bits = (pe->sram[ai_off] >> ai_shift) |
285 (pe->sram[ai_off + 1] << (32 - ai_shift));
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200286
287 return bits;
288}
289
290/* In sram sw entry set lookup ID field of the tcam key to be used in the next
291 * lookup interation
292 */
293static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
294 unsigned int lu)
295{
296 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
297
298 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
299 MVPP2_PRS_SRAM_NEXT_LU_MASK);
300 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
301}
302
303/* In the sram sw entry set sign and value of the next lookup offset
304 * and the offset value generated to the classifier
305 */
306static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
307 unsigned int op)
308{
309 /* Set sign */
310 if (shift < 0) {
311 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
312 shift = 0 - shift;
313 } else {
314 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
315 }
316
317 /* Set value */
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200318 pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200319
320 /* Reset and set operation */
321 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
322 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
323 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
324
325 /* Set base offset as current */
326 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
327}
328
329/* In the sram sw entry set sign and value of the user defined offset
330 * generated to the classifier
331 */
332static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
333 unsigned int type, int offset,
334 unsigned int op)
335{
336 /* Set sign */
337 if (offset < 0) {
338 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
339 offset = 0 - offset;
340 } else {
341 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
342 }
343
344 /* Set value */
345 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
346 MVPP2_PRS_SRAM_UDF_MASK);
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200347 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
348 offset & MVPP2_PRS_SRAM_UDF_MASK);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200349
350 /* Set offset type */
351 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
352 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
353 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
354
355 /* Set offset operation */
356 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
357 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200358 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
359 op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200360
361 /* Set base offset as current */
362 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
363}
364
365/* Find parser flow entry */
366static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
367{
368 struct mvpp2_prs_entry pe;
369 int tid;
370
371 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
372 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
373 u8 bits;
374
375 if (!priv->prs_shadow[tid].valid ||
376 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
377 continue;
378
379 mvpp2_prs_init_from_hw(priv, &pe, tid);
380 bits = mvpp2_prs_sram_ai_get(&pe);
381
382 /* Sram store classification lookup ID in AI bits [5:0] */
383 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
384 return tid;
385 }
386
387 return -ENOENT;
388}
389
390/* Return first free tcam index, seeking from start to end */
391static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
392 unsigned char end)
393{
394 int tid;
395
396 if (start > end)
397 swap(start, end);
398
399 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
400 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
401
402 for (tid = start; tid <= end; tid++) {
403 if (!priv->prs_shadow[tid].valid)
404 return tid;
405 }
406
407 return -EINVAL;
408}
409
410/* Enable/disable dropping all mac da's */
411static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
412{
413 struct mvpp2_prs_entry pe;
414
415 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
416 /* Entry exist - update port only */
417 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
418 } else {
419 /* Entry doesn't exist - create new */
420 memset(&pe, 0, sizeof(pe));
421 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
422 pe.index = MVPP2_PE_DROP_ALL;
423
424 /* Non-promiscuous mode for all ports - DROP unknown packets */
425 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
426 MVPP2_PRS_RI_DROP_MASK);
427
428 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
429 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
430
431 /* Update shadow table */
432 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
433
434 /* Mask all ports */
435 mvpp2_prs_tcam_port_map_set(&pe, 0);
436 }
437
438 /* Update port mask */
439 mvpp2_prs_tcam_port_set(&pe, port, add);
440
441 mvpp2_prs_hw_write(priv, &pe);
442}
443
444/* Set port to unicast or multicast promiscuous mode */
445void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
446 enum mvpp2_prs_l2_cast l2_cast, bool add)
447{
448 struct mvpp2_prs_entry pe;
449 unsigned char cast_match;
450 unsigned int ri;
451 int tid;
452
453 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
454 cast_match = MVPP2_PRS_UCAST_VAL;
455 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
456 ri = MVPP2_PRS_RI_L2_UCAST;
457 } else {
458 cast_match = MVPP2_PRS_MCAST_VAL;
459 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
460 ri = MVPP2_PRS_RI_L2_MCAST;
461 }
462
463 /* promiscuous mode - Accept unknown unicast or multicast packets */
464 if (priv->prs_shadow[tid].valid) {
465 mvpp2_prs_init_from_hw(priv, &pe, tid);
466 } else {
467 memset(&pe, 0, sizeof(pe));
468 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
469 pe.index = tid;
470
471 /* Continue - set next lookup */
472 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
473
474 /* Set result info bits */
475 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
476
477 /* Match UC or MC addresses */
478 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
479 MVPP2_PRS_CAST_MASK);
480
481 /* Shift to ethertype */
482 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
483 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
484
485 /* Mask all ports */
486 mvpp2_prs_tcam_port_map_set(&pe, 0);
487
488 /* Update shadow table */
489 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
490 }
491
492 /* Update port mask */
493 mvpp2_prs_tcam_port_set(&pe, port, add);
494
495 mvpp2_prs_hw_write(priv, &pe);
496}
497
498/* Set entry for dsa packets */
499static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
500 bool tagged, bool extend)
501{
502 struct mvpp2_prs_entry pe;
503 int tid, shift;
504
505 if (extend) {
506 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
507 shift = 8;
508 } else {
509 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
510 shift = 4;
511 }
512
513 if (priv->prs_shadow[tid].valid) {
514 /* Entry exist - update port only */
515 mvpp2_prs_init_from_hw(priv, &pe, tid);
516 } else {
517 /* Entry doesn't exist - create new */
518 memset(&pe, 0, sizeof(pe));
519 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
520 pe.index = tid;
521
522 /* Update shadow table */
523 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
524
525 if (tagged) {
526 /* Set tagged bit in DSA tag */
527 mvpp2_prs_tcam_data_byte_set(&pe, 0,
528 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
529 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
530
531 /* Set ai bits for next iteration */
532 if (extend)
533 mvpp2_prs_sram_ai_update(&pe, 1,
534 MVPP2_PRS_SRAM_AI_MASK);
535 else
536 mvpp2_prs_sram_ai_update(&pe, 0,
537 MVPP2_PRS_SRAM_AI_MASK);
538
539 /* Set result info bits to 'single vlan' */
540 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
541 MVPP2_PRS_RI_VLAN_MASK);
542 /* If packet is tagged continue check vid filtering */
543 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
544 } else {
545 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
546 mvpp2_prs_sram_shift_set(&pe, shift,
547 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
548
549 /* Set result info bits to 'no vlans' */
550 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
551 MVPP2_PRS_RI_VLAN_MASK);
552 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
553 }
554
555 /* Mask all ports */
556 mvpp2_prs_tcam_port_map_set(&pe, 0);
557 }
558
559 /* Update port mask */
560 mvpp2_prs_tcam_port_set(&pe, port, add);
561
562 mvpp2_prs_hw_write(priv, &pe);
563}
564
565/* Set entry for dsa ethertype */
566static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
567 bool add, bool tagged, bool extend)
568{
569 struct mvpp2_prs_entry pe;
570 int tid, shift, port_mask;
571
572 if (extend) {
573 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
574 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
575 port_mask = 0;
576 shift = 8;
577 } else {
578 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
579 MVPP2_PE_ETYPE_DSA_UNTAGGED;
580 port_mask = MVPP2_PRS_PORT_MASK;
581 shift = 4;
582 }
583
584 if (priv->prs_shadow[tid].valid) {
585 /* Entry exist - update port only */
586 mvpp2_prs_init_from_hw(priv, &pe, tid);
587 } else {
588 /* Entry doesn't exist - create new */
589 memset(&pe, 0, sizeof(pe));
590 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
591 pe.index = tid;
592
593 /* Set ethertype */
594 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
595 mvpp2_prs_match_etype(&pe, 2, 0);
596
597 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
598 MVPP2_PRS_RI_DSA_MASK);
599 /* Shift ethertype + 2 byte reserved + tag*/
600 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
601 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
602
603 /* Update shadow table */
604 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
605
606 if (tagged) {
607 /* Set tagged bit in DSA tag */
608 mvpp2_prs_tcam_data_byte_set(&pe,
609 MVPP2_ETH_TYPE_LEN + 2 + 3,
610 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
611 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
612 /* Clear all ai bits for next iteration */
613 mvpp2_prs_sram_ai_update(&pe, 0,
614 MVPP2_PRS_SRAM_AI_MASK);
615 /* If packet is tagged continue check vlans */
616 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
617 } else {
618 /* Set result info bits to 'no vlans' */
619 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
620 MVPP2_PRS_RI_VLAN_MASK);
621 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
622 }
623 /* Mask/unmask all ports, depending on dsa type */
624 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
625 }
626
627 /* Update port mask */
628 mvpp2_prs_tcam_port_set(&pe, port, add);
629
630 mvpp2_prs_hw_write(priv, &pe);
631}
632
633/* Search for existing single/triple vlan entry */
634static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
635{
636 struct mvpp2_prs_entry pe;
637 int tid;
638
639 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
640 for (tid = MVPP2_PE_FIRST_FREE_TID;
641 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
642 unsigned int ri_bits, ai_bits;
643 bool match;
644
645 if (!priv->prs_shadow[tid].valid ||
646 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
647 continue;
648
649 mvpp2_prs_init_from_hw(priv, &pe, tid);
650 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
651 if (!match)
652 continue;
653
654 /* Get vlan type */
655 ri_bits = mvpp2_prs_sram_ri_get(&pe);
656 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
657
658 /* Get current ai value from tcam */
659 ai_bits = mvpp2_prs_tcam_ai_get(&pe);
660 /* Clear double vlan bit */
661 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
662
663 if (ai != ai_bits)
664 continue;
665
666 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
667 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
668 return tid;
669 }
670
671 return -ENOENT;
672}
673
674/* Add/update single/triple vlan entry */
675static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
676 unsigned int port_map)
677{
678 struct mvpp2_prs_entry pe;
679 int tid_aux, tid;
680 int ret = 0;
681
682 memset(&pe, 0, sizeof(pe));
683
684 tid = mvpp2_prs_vlan_find(priv, tpid, ai);
685
686 if (tid < 0) {
687 /* Create new tcam entry */
688 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
689 MVPP2_PE_FIRST_FREE_TID);
690 if (tid < 0)
691 return tid;
692
693 /* Get last double vlan tid */
694 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
695 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
696 unsigned int ri_bits;
697
698 if (!priv->prs_shadow[tid_aux].valid ||
699 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
700 continue;
701
702 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
703 ri_bits = mvpp2_prs_sram_ri_get(&pe);
704 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
705 MVPP2_PRS_RI_VLAN_DOUBLE)
706 break;
707 }
708
709 if (tid <= tid_aux)
710 return -EINVAL;
711
712 memset(&pe, 0, sizeof(pe));
713 pe.index = tid;
714 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
715
716 mvpp2_prs_match_etype(&pe, 0, tpid);
717
718 /* VLAN tag detected, proceed with VID filtering */
719 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
720
721 /* Clear all ai bits for next iteration */
722 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
723
724 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
725 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
726 MVPP2_PRS_RI_VLAN_MASK);
727 } else {
728 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
729 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
730 MVPP2_PRS_RI_VLAN_MASK);
731 }
732 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
733
734 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
735 } else {
736 mvpp2_prs_init_from_hw(priv, &pe, tid);
737 }
738 /* Update ports' mask */
739 mvpp2_prs_tcam_port_map_set(&pe, port_map);
740
741 mvpp2_prs_hw_write(priv, &pe);
742
743 return ret;
744}
745
746/* Get first free double vlan ai number */
747static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
748{
749 int i;
750
751 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
752 if (!priv->prs_double_vlans[i])
753 return i;
754 }
755
756 return -EINVAL;
757}
758
759/* Search for existing double vlan entry */
760static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
761 unsigned short tpid2)
762{
763 struct mvpp2_prs_entry pe;
764 int tid;
765
766 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
767 for (tid = MVPP2_PE_FIRST_FREE_TID;
768 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
769 unsigned int ri_mask;
770 bool match;
771
772 if (!priv->prs_shadow[tid].valid ||
773 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
774 continue;
775
776 mvpp2_prs_init_from_hw(priv, &pe, tid);
777
778 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
779 mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
780
781 if (!match)
782 continue;
783
784 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
785 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
786 return tid;
787 }
788
789 return -ENOENT;
790}
791
792/* Add or update double vlan entry */
793static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
794 unsigned short tpid2,
795 unsigned int port_map)
796{
797 int tid_aux, tid, ai, ret = 0;
798 struct mvpp2_prs_entry pe;
799
800 memset(&pe, 0, sizeof(pe));
801
802 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
803
804 if (tid < 0) {
805 /* Create new tcam entry */
806 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
807 MVPP2_PE_LAST_FREE_TID);
808 if (tid < 0)
809 return tid;
810
811 /* Set ai value for new double vlan entry */
812 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
813 if (ai < 0)
814 return ai;
815
816 /* Get first single/triple vlan tid */
817 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
818 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
819 unsigned int ri_bits;
820
821 if (!priv->prs_shadow[tid_aux].valid ||
822 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
823 continue;
824
825 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
826 ri_bits = mvpp2_prs_sram_ri_get(&pe);
827 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
828 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
829 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
830 break;
831 }
832
833 if (tid >= tid_aux)
834 return -ERANGE;
835
836 memset(&pe, 0, sizeof(pe));
837 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
838 pe.index = tid;
839
840 priv->prs_double_vlans[ai] = true;
841
842 mvpp2_prs_match_etype(&pe, 0, tpid1);
843 mvpp2_prs_match_etype(&pe, 4, tpid2);
844
845 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
846 /* Shift 4 bytes - skip outer vlan tag */
847 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
848 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
849 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
850 MVPP2_PRS_RI_VLAN_MASK);
851 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
852 MVPP2_PRS_SRAM_AI_MASK);
853
854 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
855 } else {
856 mvpp2_prs_init_from_hw(priv, &pe, tid);
857 }
858
859 /* Update ports' mask */
860 mvpp2_prs_tcam_port_map_set(&pe, port_map);
861 mvpp2_prs_hw_write(priv, &pe);
862
863 return ret;
864}
865
866/* IPv4 header parsing for fragmentation and L4 offset */
867static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
868 unsigned int ri, unsigned int ri_mask)
869{
870 struct mvpp2_prs_entry pe;
871 int tid;
872
873 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
874 (proto != IPPROTO_IGMP))
875 return -EINVAL;
876
877 /* Not fragmented packet */
878 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
879 MVPP2_PE_LAST_FREE_TID);
880 if (tid < 0)
881 return tid;
882
883 memset(&pe, 0, sizeof(pe));
884 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
885 pe.index = tid;
886
887 /* Set next lu to IPv4 */
888 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
889 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
890 /* Set L4 offset */
891 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
892 sizeof(struct iphdr) - 4,
893 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
894 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
895 MVPP2_PRS_IPV4_DIP_AI_BIT);
896 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
897
898 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
899 MVPP2_PRS_TCAM_PROTO_MASK_L);
900 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
901 MVPP2_PRS_TCAM_PROTO_MASK);
902
903 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
904 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
905 /* Unmask all ports */
906 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
907
908 /* Update shadow table and hw entry */
909 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
910 mvpp2_prs_hw_write(priv, &pe);
911
912 /* Fragmented packet */
913 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
914 MVPP2_PE_LAST_FREE_TID);
915 if (tid < 0)
916 return tid;
917
918 pe.index = tid;
919 /* Clear ri before updating */
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +0200920 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
921 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +0200922 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
923
924 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
925 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
926
927 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
928 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
929
930 /* Update shadow table and hw entry */
931 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
932 mvpp2_prs_hw_write(priv, &pe);
933
934 return 0;
935}
936
937/* IPv4 L3 multicast or broadcast */
938static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
939{
940 struct mvpp2_prs_entry pe;
941 int mask, tid;
942
943 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
944 MVPP2_PE_LAST_FREE_TID);
945 if (tid < 0)
946 return tid;
947
948 memset(&pe, 0, sizeof(pe));
949 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
950 pe.index = tid;
951
952 switch (l3_cast) {
953 case MVPP2_PRS_L3_MULTI_CAST:
954 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
955 MVPP2_PRS_IPV4_MC_MASK);
956 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
957 MVPP2_PRS_RI_L3_ADDR_MASK);
958 break;
959 case MVPP2_PRS_L3_BROAD_CAST:
960 mask = MVPP2_PRS_IPV4_BC_MASK;
961 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
962 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
963 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
964 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
965 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
966 MVPP2_PRS_RI_L3_ADDR_MASK);
967 break;
968 default:
969 return -EINVAL;
970 }
971
972 /* Finished: go to flowid generation */
973 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
974 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
975
976 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
977 MVPP2_PRS_IPV4_DIP_AI_BIT);
978 /* Unmask all ports */
979 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
980
981 /* Update shadow table and hw entry */
982 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
983 mvpp2_prs_hw_write(priv, &pe);
984
985 return 0;
986}
987
988/* Set entries for protocols over IPv6 */
989static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
990 unsigned int ri, unsigned int ri_mask)
991{
992 struct mvpp2_prs_entry pe;
993 int tid;
994
995 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
996 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
997 return -EINVAL;
998
999 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1000 MVPP2_PE_LAST_FREE_TID);
1001 if (tid < 0)
1002 return tid;
1003
1004 memset(&pe, 0, sizeof(pe));
1005 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1006 pe.index = tid;
1007
1008 /* Finished: go to flowid generation */
1009 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1010 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1011 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1012 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1013 sizeof(struct ipv6hdr) - 6,
1014 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1015
1016 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1017 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1018 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1019 /* Unmask all ports */
1020 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1021
1022 /* Write HW */
1023 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1024 mvpp2_prs_hw_write(priv, &pe);
1025
1026 return 0;
1027}
1028
1029/* IPv6 L3 multicast entry */
1030static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1031{
1032 struct mvpp2_prs_entry pe;
1033 int tid;
1034
1035 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1036 return -EINVAL;
1037
1038 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1039 MVPP2_PE_LAST_FREE_TID);
1040 if (tid < 0)
1041 return tid;
1042
1043 memset(&pe, 0, sizeof(pe));
1044 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1045 pe.index = tid;
1046
1047 /* Finished: go to flowid generation */
1048 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1049 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1050 MVPP2_PRS_RI_L3_ADDR_MASK);
1051 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1052 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1053 /* Shift back to IPv6 NH */
1054 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1055
1056 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1057 MVPP2_PRS_IPV6_MC_MASK);
1058 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1059 /* Unmask all ports */
1060 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1061
1062 /* Update shadow table and hw entry */
1063 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1064 mvpp2_prs_hw_write(priv, &pe);
1065
1066 return 0;
1067}
1068
1069/* Parser per-port initialization */
1070static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1071 int lu_max, int offset)
1072{
1073 u32 val;
1074
1075 /* Set lookup ID */
1076 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1077 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1078 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1079 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1080
1081 /* Set maximum number of loops for packet received from port */
1082 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1083 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1084 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1085 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1086
1087 /* Set initial offset for packet header extraction for the first
1088 * searching loop
1089 */
1090 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1091 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1092 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1093 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1094}
1095
1096/* Default flow entries initialization for all ports */
1097static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1098{
1099 struct mvpp2_prs_entry pe;
1100 int port;
1101
1102 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1103 memset(&pe, 0, sizeof(pe));
1104 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1105 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1106
1107 /* Mask all ports */
1108 mvpp2_prs_tcam_port_map_set(&pe, 0);
1109
1110 /* Set flow ID*/
1111 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1112 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1113
1114 /* Update shadow table and hw entry */
1115 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1116 mvpp2_prs_hw_write(priv, &pe);
1117 }
1118}
1119
1120/* Set default entry for Marvell Header field */
1121static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1122{
1123 struct mvpp2_prs_entry pe;
1124
1125 memset(&pe, 0, sizeof(pe));
1126
1127 pe.index = MVPP2_PE_MH_DEFAULT;
1128 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1129 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1130 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1131 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1132
1133 /* Unmask all ports */
1134 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1135
1136 /* Update shadow table and hw entry */
1137 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1138 mvpp2_prs_hw_write(priv, &pe);
1139}
1140
1141/* Set default entires (place holder) for promiscuous, non-promiscuous and
1142 * multicast MAC addresses
1143 */
1144static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1145{
1146 struct mvpp2_prs_entry pe;
1147
1148 memset(&pe, 0, sizeof(pe));
1149
1150 /* Non-promiscuous mode for all ports - DROP unknown packets */
1151 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1152 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1153
1154 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1155 MVPP2_PRS_RI_DROP_MASK);
1156 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1157 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1158
1159 /* Unmask all ports */
1160 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1161
1162 /* Update shadow table and hw entry */
1163 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1164 mvpp2_prs_hw_write(priv, &pe);
1165
1166 /* Create dummy entries for drop all and promiscuous modes */
1167 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1168 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1169 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1170}
1171
1172/* Set default entries for various types of dsa packets */
1173static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1174{
1175 struct mvpp2_prs_entry pe;
1176
1177 /* None tagged EDSA entry - place holder */
1178 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1179 MVPP2_PRS_EDSA);
1180
1181 /* Tagged EDSA entry - place holder */
1182 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1183
1184 /* None tagged DSA entry - place holder */
1185 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1186 MVPP2_PRS_DSA);
1187
1188 /* Tagged DSA entry - place holder */
1189 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1190
1191 /* None tagged EDSA ethertype entry - place holder*/
1192 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1193 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1194
1195 /* Tagged EDSA ethertype entry - place holder*/
1196 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1197 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1198
1199 /* None tagged DSA ethertype entry */
1200 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1201 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1202
1203 /* Tagged DSA ethertype entry */
1204 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1205 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1206
1207 /* Set default entry, in case DSA or EDSA tag not found */
1208 memset(&pe, 0, sizeof(pe));
1209 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1210 pe.index = MVPP2_PE_DSA_DEFAULT;
1211 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1212
1213 /* Shift 0 bytes */
1214 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1215 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1216
1217 /* Clear all sram ai bits for next iteration */
1218 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1219
1220 /* Unmask all ports */
1221 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1222
1223 mvpp2_prs_hw_write(priv, &pe);
1224}
1225
1226/* Initialize parser entries for VID filtering */
1227static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1228{
1229 struct mvpp2_prs_entry pe;
1230
1231 memset(&pe, 0, sizeof(pe));
1232
1233 /* Set default vid entry */
1234 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1235 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1236
1237 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1238
1239 /* Skip VLAN header - Set offset to 4 bytes */
1240 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1241 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1242
1243 /* Clear all ai bits for next iteration */
1244 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1245
1246 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1247
1248 /* Unmask all ports */
1249 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1250
1251 /* Update shadow table and hw entry */
1252 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1253 mvpp2_prs_hw_write(priv, &pe);
1254
1255 /* Set default vid entry for extended DSA*/
1256 memset(&pe, 0, sizeof(pe));
1257
1258 /* Set default vid entry */
1259 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1260 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1261
1262 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1263 MVPP2_PRS_EDSA_VID_AI_BIT);
1264
1265 /* Skip VLAN header - Set offset to 8 bytes */
1266 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1267 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1268
1269 /* Clear all ai bits for next iteration */
1270 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1271
1272 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1273
1274 /* Unmask all ports */
1275 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1276
1277 /* Update shadow table and hw entry */
1278 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1279 mvpp2_prs_hw_write(priv, &pe);
1280}
1281
1282/* Match basic ethertypes */
1283static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1284{
1285 struct mvpp2_prs_entry pe;
1286 int tid;
1287
1288 /* Ethertype: PPPoE */
1289 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1290 MVPP2_PE_LAST_FREE_TID);
1291 if (tid < 0)
1292 return tid;
1293
1294 memset(&pe, 0, sizeof(pe));
1295 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1296 pe.index = tid;
1297
1298 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1299
1300 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1301 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1302 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1303 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1304 MVPP2_PRS_RI_PPPOE_MASK);
1305
1306 /* Update shadow table and hw entry */
1307 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1308 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1309 priv->prs_shadow[pe.index].finish = false;
1310 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1311 MVPP2_PRS_RI_PPPOE_MASK);
1312 mvpp2_prs_hw_write(priv, &pe);
1313
1314 /* Ethertype: ARP */
1315 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1316 MVPP2_PE_LAST_FREE_TID);
1317 if (tid < 0)
1318 return tid;
1319
1320 memset(&pe, 0, sizeof(pe));
1321 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1322 pe.index = tid;
1323
1324 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1325
1326 /* Generate flow in the next iteration*/
1327 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1328 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1329 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1330 MVPP2_PRS_RI_L3_PROTO_MASK);
1331 /* Set L3 offset */
1332 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1333 MVPP2_ETH_TYPE_LEN,
1334 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1335
1336 /* Update shadow table and hw entry */
1337 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1338 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1339 priv->prs_shadow[pe.index].finish = true;
1340 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1341 MVPP2_PRS_RI_L3_PROTO_MASK);
1342 mvpp2_prs_hw_write(priv, &pe);
1343
1344 /* Ethertype: LBTD */
1345 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1346 MVPP2_PE_LAST_FREE_TID);
1347 if (tid < 0)
1348 return tid;
1349
1350 memset(&pe, 0, sizeof(pe));
1351 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1352 pe.index = tid;
1353
1354 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1355
1356 /* Generate flow in the next iteration*/
1357 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1358 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1359 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1360 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1361 MVPP2_PRS_RI_CPU_CODE_MASK |
1362 MVPP2_PRS_RI_UDF3_MASK);
1363 /* Set L3 offset */
1364 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1365 MVPP2_ETH_TYPE_LEN,
1366 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1367
1368 /* Update shadow table and hw entry */
1369 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1370 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1371 priv->prs_shadow[pe.index].finish = true;
1372 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1373 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1374 MVPP2_PRS_RI_CPU_CODE_MASK |
1375 MVPP2_PRS_RI_UDF3_MASK);
1376 mvpp2_prs_hw_write(priv, &pe);
1377
1378 /* Ethertype: IPv4 without options */
1379 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1380 MVPP2_PE_LAST_FREE_TID);
1381 if (tid < 0)
1382 return tid;
1383
1384 memset(&pe, 0, sizeof(pe));
1385 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1386 pe.index = tid;
1387
1388 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1389 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1390 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1391 MVPP2_PRS_IPV4_HEAD_MASK |
1392 MVPP2_PRS_IPV4_IHL_MASK);
1393
1394 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1395 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1396 MVPP2_PRS_RI_L3_PROTO_MASK);
1397 /* Skip eth_type + 4 bytes of IP header */
1398 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1399 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1400 /* Set L3 offset */
1401 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1402 MVPP2_ETH_TYPE_LEN,
1403 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1404
1405 /* Update shadow table and hw entry */
1406 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1407 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1408 priv->prs_shadow[pe.index].finish = false;
1409 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1410 MVPP2_PRS_RI_L3_PROTO_MASK);
1411 mvpp2_prs_hw_write(priv, &pe);
1412
1413 /* Ethertype: IPv4 with options */
1414 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1415 MVPP2_PE_LAST_FREE_TID);
1416 if (tid < 0)
1417 return tid;
1418
1419 pe.index = tid;
1420
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +02001421 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1422 MVPP2_PRS_IPV4_HEAD,
1423 MVPP2_PRS_IPV4_HEAD_MASK);
1424
1425 /* Clear ri before updating */
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +02001426 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1427 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +02001428 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1429 MVPP2_PRS_RI_L3_PROTO_MASK);
1430
1431 /* Update shadow table and hw entry */
1432 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1433 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1434 priv->prs_shadow[pe.index].finish = false;
1435 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1436 MVPP2_PRS_RI_L3_PROTO_MASK);
1437 mvpp2_prs_hw_write(priv, &pe);
1438
1439 /* Ethertype: IPv6 without options */
1440 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1441 MVPP2_PE_LAST_FREE_TID);
1442 if (tid < 0)
1443 return tid;
1444
1445 memset(&pe, 0, sizeof(pe));
1446 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1447 pe.index = tid;
1448
1449 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1450
1451 /* Skip DIP of IPV6 header */
1452 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1453 MVPP2_MAX_L3_ADDR_SIZE,
1454 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1455 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1456 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1457 MVPP2_PRS_RI_L3_PROTO_MASK);
1458 /* Set L3 offset */
1459 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1460 MVPP2_ETH_TYPE_LEN,
1461 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1462
1463 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1464 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1465 priv->prs_shadow[pe.index].finish = false;
1466 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1467 MVPP2_PRS_RI_L3_PROTO_MASK);
1468 mvpp2_prs_hw_write(priv, &pe);
1469
1470 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1471 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1472 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1473 pe.index = MVPP2_PE_ETH_TYPE_UN;
1474
1475 /* Unmask all ports */
1476 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1477
1478 /* Generate flow in the next iteration*/
1479 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1480 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1481 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1482 MVPP2_PRS_RI_L3_PROTO_MASK);
1483 /* Set L3 offset even it's unknown L3 */
1484 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1485 MVPP2_ETH_TYPE_LEN,
1486 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1487
1488 /* Update shadow table and hw entry */
1489 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1490 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1491 priv->prs_shadow[pe.index].finish = true;
1492 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1493 MVPP2_PRS_RI_L3_PROTO_MASK);
1494 mvpp2_prs_hw_write(priv, &pe);
1495
1496 return 0;
1497}
1498
1499/* Configure vlan entries and detect up to 2 successive VLAN tags.
1500 * Possible options:
1501 * 0x8100, 0x88A8
1502 * 0x8100, 0x8100
1503 * 0x8100
1504 * 0x88A8
1505 */
1506static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1507{
1508 struct mvpp2_prs_entry pe;
1509 int err;
1510
1511 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1512 MVPP2_PRS_DBL_VLANS_MAX,
1513 GFP_KERNEL);
1514 if (!priv->prs_double_vlans)
1515 return -ENOMEM;
1516
1517 /* Double VLAN: 0x8100, 0x88A8 */
1518 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1519 MVPP2_PRS_PORT_MASK);
1520 if (err)
1521 return err;
1522
1523 /* Double VLAN: 0x8100, 0x8100 */
1524 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1525 MVPP2_PRS_PORT_MASK);
1526 if (err)
1527 return err;
1528
1529 /* Single VLAN: 0x88a8 */
1530 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1531 MVPP2_PRS_PORT_MASK);
1532 if (err)
1533 return err;
1534
1535 /* Single VLAN: 0x8100 */
1536 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1537 MVPP2_PRS_PORT_MASK);
1538 if (err)
1539 return err;
1540
1541 /* Set default double vlan entry */
1542 memset(&pe, 0, sizeof(pe));
1543 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1544 pe.index = MVPP2_PE_VLAN_DBL;
1545
1546 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1547
1548 /* Clear ai for next iterations */
1549 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1550 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1551 MVPP2_PRS_RI_VLAN_MASK);
1552
1553 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1554 MVPP2_PRS_DBL_VLAN_AI_BIT);
1555 /* Unmask all ports */
1556 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1557
1558 /* Update shadow table and hw entry */
1559 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1560 mvpp2_prs_hw_write(priv, &pe);
1561
1562 /* Set default vlan none entry */
1563 memset(&pe, 0, sizeof(pe));
1564 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1565 pe.index = MVPP2_PE_VLAN_NONE;
1566
1567 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1568 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1569 MVPP2_PRS_RI_VLAN_MASK);
1570
1571 /* Unmask all ports */
1572 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1573
1574 /* Update shadow table and hw entry */
1575 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1576 mvpp2_prs_hw_write(priv, &pe);
1577
1578 return 0;
1579}
1580
1581/* Set entries for PPPoE ethertype */
1582static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1583{
1584 struct mvpp2_prs_entry pe;
1585 int tid;
1586
1587 /* IPv4 over PPPoE with options */
1588 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1589 MVPP2_PE_LAST_FREE_TID);
1590 if (tid < 0)
1591 return tid;
1592
1593 memset(&pe, 0, sizeof(pe));
1594 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1595 pe.index = tid;
1596
1597 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1598
1599 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1600 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1601 MVPP2_PRS_RI_L3_PROTO_MASK);
1602 /* Skip eth_type + 4 bytes of IP header */
1603 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1604 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1605 /* Set L3 offset */
1606 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1607 MVPP2_ETH_TYPE_LEN,
1608 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1609
1610 /* Update shadow table and hw entry */
1611 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1612 mvpp2_prs_hw_write(priv, &pe);
1613
1614 /* IPv4 over PPPoE without options */
1615 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1616 MVPP2_PE_LAST_FREE_TID);
1617 if (tid < 0)
1618 return tid;
1619
1620 pe.index = tid;
1621
1622 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1623 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1624 MVPP2_PRS_IPV4_HEAD_MASK |
1625 MVPP2_PRS_IPV4_IHL_MASK);
1626
1627 /* Clear ri before updating */
Maxime Chevallierbd43d1b2018-06-28 14:42:05 +02001628 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1629 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +02001630 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1631 MVPP2_PRS_RI_L3_PROTO_MASK);
1632
1633 /* Update shadow table and hw entry */
1634 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1635 mvpp2_prs_hw_write(priv, &pe);
1636
1637 /* IPv6 over PPPoE */
1638 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1639 MVPP2_PE_LAST_FREE_TID);
1640 if (tid < 0)
1641 return tid;
1642
1643 memset(&pe, 0, sizeof(pe));
1644 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1645 pe.index = tid;
1646
1647 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1648
1649 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1650 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1651 MVPP2_PRS_RI_L3_PROTO_MASK);
1652 /* Skip eth_type + 4 bytes of IPv6 header */
1653 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1654 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1655 /* Set L3 offset */
1656 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1657 MVPP2_ETH_TYPE_LEN,
1658 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1659
1660 /* Update shadow table and hw entry */
1661 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1662 mvpp2_prs_hw_write(priv, &pe);
1663
1664 /* Non-IP over PPPoE */
1665 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1666 MVPP2_PE_LAST_FREE_TID);
1667 if (tid < 0)
1668 return tid;
1669
1670 memset(&pe, 0, sizeof(pe));
1671 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1672 pe.index = tid;
1673
1674 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1675 MVPP2_PRS_RI_L3_PROTO_MASK);
1676
1677 /* Finished: go to flowid generation */
1678 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1679 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1680 /* Set L3 offset even if it's unknown L3 */
1681 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1682 MVPP2_ETH_TYPE_LEN,
1683 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1684
1685 /* Update shadow table and hw entry */
1686 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1687 mvpp2_prs_hw_write(priv, &pe);
1688
1689 return 0;
1690}
1691
1692/* Initialize entries for IPv4 */
1693static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1694{
1695 struct mvpp2_prs_entry pe;
1696 int err;
1697
1698 /* Set entries for TCP, UDP and IGMP over IPv4 */
1699 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1700 MVPP2_PRS_RI_L4_PROTO_MASK);
1701 if (err)
1702 return err;
1703
1704 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1705 MVPP2_PRS_RI_L4_PROTO_MASK);
1706 if (err)
1707 return err;
1708
1709 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1710 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1711 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1712 MVPP2_PRS_RI_CPU_CODE_MASK |
1713 MVPP2_PRS_RI_UDF3_MASK);
1714 if (err)
1715 return err;
1716
1717 /* IPv4 Broadcast */
1718 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1719 if (err)
1720 return err;
1721
1722 /* IPv4 Multicast */
1723 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1724 if (err)
1725 return err;
1726
1727 /* Default IPv4 entry for unknown protocols */
1728 memset(&pe, 0, sizeof(pe));
1729 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1730 pe.index = MVPP2_PE_IP4_PROTO_UN;
1731
1732 /* Set next lu to IPv4 */
1733 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1734 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1735 /* Set L4 offset */
1736 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1737 sizeof(struct iphdr) - 4,
1738 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1739 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1740 MVPP2_PRS_IPV4_DIP_AI_BIT);
1741 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1742 MVPP2_PRS_RI_L4_PROTO_MASK);
1743
1744 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1745 /* Unmask all ports */
1746 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1747
1748 /* Update shadow table and hw entry */
1749 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1750 mvpp2_prs_hw_write(priv, &pe);
1751
1752 /* Default IPv4 entry for unicast address */
1753 memset(&pe, 0, sizeof(pe));
1754 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1755 pe.index = MVPP2_PE_IP4_ADDR_UN;
1756
1757 /* Finished: go to flowid generation */
1758 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1759 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1760 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1761 MVPP2_PRS_RI_L3_ADDR_MASK);
1762
1763 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1764 MVPP2_PRS_IPV4_DIP_AI_BIT);
1765 /* Unmask all ports */
1766 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1767
1768 /* Update shadow table and hw entry */
1769 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1770 mvpp2_prs_hw_write(priv, &pe);
1771
1772 return 0;
1773}
1774
1775/* Initialize entries for IPv6 */
1776static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1777{
1778 struct mvpp2_prs_entry pe;
1779 int tid, err;
1780
1781 /* Set entries for TCP, UDP and ICMP over IPv6 */
1782 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1783 MVPP2_PRS_RI_L4_TCP,
1784 MVPP2_PRS_RI_L4_PROTO_MASK);
1785 if (err)
1786 return err;
1787
1788 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1789 MVPP2_PRS_RI_L4_UDP,
1790 MVPP2_PRS_RI_L4_PROTO_MASK);
1791 if (err)
1792 return err;
1793
1794 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1795 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1796 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1797 MVPP2_PRS_RI_CPU_CODE_MASK |
1798 MVPP2_PRS_RI_UDF3_MASK);
1799 if (err)
1800 return err;
1801
1802 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1803 /* Result Info: UDF7=1, DS lite */
1804 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1805 MVPP2_PRS_RI_UDF7_IP6_LITE,
1806 MVPP2_PRS_RI_UDF7_MASK);
1807 if (err)
1808 return err;
1809
1810 /* IPv6 multicast */
1811 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1812 if (err)
1813 return err;
1814
1815 /* Entry for checking hop limit */
1816 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1817 MVPP2_PE_LAST_FREE_TID);
1818 if (tid < 0)
1819 return tid;
1820
1821 memset(&pe, 0, sizeof(pe));
1822 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1823 pe.index = tid;
1824
1825 /* Finished: go to flowid generation */
1826 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1827 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1828 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1829 MVPP2_PRS_RI_DROP_MASK,
1830 MVPP2_PRS_RI_L3_PROTO_MASK |
1831 MVPP2_PRS_RI_DROP_MASK);
1832
1833 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1834 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1835 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1836
1837 /* Update shadow table and hw entry */
1838 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1839 mvpp2_prs_hw_write(priv, &pe);
1840
1841 /* Default IPv6 entry for unknown protocols */
1842 memset(&pe, 0, sizeof(pe));
1843 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1844 pe.index = MVPP2_PE_IP6_PROTO_UN;
1845
1846 /* Finished: go to flowid generation */
1847 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1848 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1849 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1850 MVPP2_PRS_RI_L4_PROTO_MASK);
1851 /* Set L4 offset relatively to our current place */
1852 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1853 sizeof(struct ipv6hdr) - 4,
1854 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1855
1856 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1857 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1858 /* Unmask all ports */
1859 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1860
1861 /* Update shadow table and hw entry */
1862 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1863 mvpp2_prs_hw_write(priv, &pe);
1864
1865 /* Default IPv6 entry for unknown ext protocols */
1866 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1867 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1868 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1869
1870 /* Finished: go to flowid generation */
1871 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1872 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1873 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1874 MVPP2_PRS_RI_L4_PROTO_MASK);
1875
1876 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1877 MVPP2_PRS_IPV6_EXT_AI_BIT);
1878 /* Unmask all ports */
1879 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1880
1881 /* Update shadow table and hw entry */
1882 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1883 mvpp2_prs_hw_write(priv, &pe);
1884
1885 /* Default IPv6 entry for unicast address */
1886 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1887 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1888 pe.index = MVPP2_PE_IP6_ADDR_UN;
1889
1890 /* Finished: go to IPv6 again */
1891 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1892 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1893 MVPP2_PRS_RI_L3_ADDR_MASK);
1894 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1895 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1896 /* Shift back to IPV6 NH */
1897 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1898
1899 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1900 /* Unmask all ports */
1901 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1902
1903 /* Update shadow table and hw entry */
1904 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1905 mvpp2_prs_hw_write(priv, &pe);
1906
1907 return 0;
1908}
1909
1910/* Find tcam entry with matched pair <vid,port> */
1911static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
1912 u16 mask)
1913{
1914 unsigned char byte[2], enable[2];
1915 struct mvpp2_prs_entry pe;
1916 u16 rvid, rmask;
1917 int tid;
1918
1919 /* Go through the all entries with MVPP2_PRS_LU_VID */
1920 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
1921 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
1922 if (!priv->prs_shadow[tid].valid ||
1923 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1924 continue;
1925
1926 mvpp2_prs_init_from_hw(priv, &pe, tid);
1927
1928 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1929 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1930
1931 rvid = ((byte[0] & 0xf) << 8) + byte[1];
1932 rmask = ((enable[0] & 0xf) << 8) + enable[1];
1933
1934 if (rvid != vid || rmask != mask)
1935 continue;
1936
1937 return tid;
1938 }
1939
1940 return -ENOENT;
1941}
1942
1943/* Write parser entry for VID filtering */
1944int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1945{
1946 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1947 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1948 unsigned int mask = 0xfff, reg_val, shift;
1949 struct mvpp2 *priv = port->priv;
1950 struct mvpp2_prs_entry pe;
1951 int tid;
1952
1953 memset(&pe, 0, sizeof(pe));
1954
1955 /* Scan TCAM and see if entry with this <vid,port> already exist */
1956 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
1957
1958 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1959 if (reg_val & MVPP2_DSA_EXTENDED)
1960 shift = MVPP2_VLAN_TAG_EDSA_LEN;
1961 else
1962 shift = MVPP2_VLAN_TAG_LEN;
1963
1964 /* No such entry */
1965 if (tid < 0) {
1966
1967 /* Go through all entries from first to last in vlan range */
1968 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
1969 vid_start +
1970 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
1971
1972 /* There isn't room for a new VID filter */
1973 if (tid < 0)
1974 return tid;
1975
1976 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1977 pe.index = tid;
1978
1979 /* Mask all ports */
1980 mvpp2_prs_tcam_port_map_set(&pe, 0);
1981 } else {
1982 mvpp2_prs_init_from_hw(priv, &pe, tid);
1983 }
1984
1985 /* Enable the current port */
1986 mvpp2_prs_tcam_port_set(&pe, port->id, true);
1987
1988 /* Continue - set next lookup */
1989 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1990
1991 /* Skip VLAN header - Set offset to 4 or 8 bytes */
1992 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1993
1994 /* Set match on VID */
1995 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
1996
1997 /* Clear all ai bits for next iteration */
1998 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1999
2000 /* Update shadow table */
2001 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2002 mvpp2_prs_hw_write(priv, &pe);
2003
2004 return 0;
2005}
2006
2007/* Write parser entry for VID filtering */
2008void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2009{
2010 struct mvpp2 *priv = port->priv;
2011 int tid;
2012
2013 /* Scan TCAM and see if entry with this <vid,port> already exist */
2014 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
2015
2016 /* No such entry */
2017 if (tid < 0)
2018 return;
2019
2020 mvpp2_prs_hw_inv(priv, tid);
2021 priv->prs_shadow[tid].valid = false;
2022}
2023
2024/* Remove all existing VID filters on this port */
2025void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2026{
2027 struct mvpp2 *priv = port->priv;
2028 int tid;
2029
2030 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2031 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2032 if (priv->prs_shadow[tid].valid)
2033 mvpp2_prs_vid_entry_remove(port, tid);
2034 }
2035}
2036
2037/* Remove VID filering entry for this port */
2038void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2039{
2040 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2041 struct mvpp2 *priv = port->priv;
2042
2043 /* Invalidate the guard entry */
2044 mvpp2_prs_hw_inv(priv, tid);
2045
2046 priv->prs_shadow[tid].valid = false;
2047}
2048
2049/* Add guard entry that drops packets when no VID is matched on this port */
2050void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2051{
2052 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2053 struct mvpp2 *priv = port->priv;
2054 unsigned int reg_val, shift;
2055 struct mvpp2_prs_entry pe;
2056
2057 if (priv->prs_shadow[tid].valid)
2058 return;
2059
2060 memset(&pe, 0, sizeof(pe));
2061
2062 pe.index = tid;
2063
2064 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2065 if (reg_val & MVPP2_DSA_EXTENDED)
2066 shift = MVPP2_VLAN_TAG_EDSA_LEN;
2067 else
2068 shift = MVPP2_VLAN_TAG_LEN;
2069
2070 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2071
2072 /* Mask all ports */
2073 mvpp2_prs_tcam_port_map_set(&pe, 0);
2074
2075 /* Update port mask */
2076 mvpp2_prs_tcam_port_set(&pe, port->id, true);
2077
2078 /* Continue - set next lookup */
2079 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2080
2081 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2082 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2083
2084 /* Drop VLAN packets that don't belong to any VIDs on this port */
2085 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2086 MVPP2_PRS_RI_DROP_MASK);
2087
2088 /* Clear all ai bits for next iteration */
2089 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2090
2091 /* Update shadow table */
2092 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2093 mvpp2_prs_hw_write(priv, &pe);
2094}
2095
2096/* Parser default initialization */
2097int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2098{
2099 int err, index, i;
2100
2101 /* Enable tcam table */
2102 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2103
2104 /* Clear all tcam and sram entries */
2105 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2106 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2107 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2108 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2109
2110 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2111 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2112 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2113 }
2114
2115 /* Invalidate all tcam entries */
2116 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2117 mvpp2_prs_hw_inv(priv, index);
2118
2119 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2120 sizeof(*priv->prs_shadow),
2121 GFP_KERNEL);
2122 if (!priv->prs_shadow)
2123 return -ENOMEM;
2124
2125 /* Always start from lookup = 0 */
2126 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2127 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2128 MVPP2_PRS_PORT_LU_MAX, 0);
2129
2130 mvpp2_prs_def_flow_init(priv);
2131
2132 mvpp2_prs_mh_init(priv);
2133
2134 mvpp2_prs_mac_init(priv);
2135
2136 mvpp2_prs_dsa_init(priv);
2137
2138 mvpp2_prs_vid_init(priv);
2139
2140 err = mvpp2_prs_etype_init(priv);
2141 if (err)
2142 return err;
2143
2144 err = mvpp2_prs_vlan_init(pdev, priv);
2145 if (err)
2146 return err;
2147
2148 err = mvpp2_prs_pppoe_init(priv);
2149 if (err)
2150 return err;
2151
2152 err = mvpp2_prs_ip6_init(priv);
2153 if (err)
2154 return err;
2155
2156 err = mvpp2_prs_ip4_init(priv);
2157 if (err)
2158 return err;
2159
2160 return 0;
2161}
2162
2163/* Compare MAC DA with tcam entry data */
2164static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2165 const u8 *da, unsigned char *mask)
2166{
2167 unsigned char tcam_byte, tcam_mask;
2168 int index;
2169
2170 for (index = 0; index < ETH_ALEN; index++) {
2171 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2172 if (tcam_mask != mask[index])
2173 return false;
2174
2175 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2176 return false;
2177 }
2178
2179 return true;
2180}
2181
2182/* Find tcam entry with matched pair <MAC DA, port> */
2183static int
2184mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2185 unsigned char *mask, int udf_type)
2186{
2187 struct mvpp2_prs_entry pe;
2188 int tid;
2189
2190 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2191 for (tid = MVPP2_PE_MAC_RANGE_START;
2192 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2193 unsigned int entry_pmap;
2194
2195 if (!priv->prs_shadow[tid].valid ||
2196 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2197 (priv->prs_shadow[tid].udf != udf_type))
2198 continue;
2199
2200 mvpp2_prs_init_from_hw(priv, &pe, tid);
2201 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2202
2203 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2204 entry_pmap == pmap)
2205 return tid;
2206 }
2207
2208 return -ENOENT;
2209}
2210
2211/* Update parser's mac da entry */
2212int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2213{
2214 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2215 struct mvpp2 *priv = port->priv;
2216 unsigned int pmap, len, ri;
2217 struct mvpp2_prs_entry pe;
2218 int tid;
2219
2220 memset(&pe, 0, sizeof(pe));
2221
2222 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2223 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2224 MVPP2_PRS_UDF_MAC_DEF);
2225
2226 /* No such entry */
2227 if (tid < 0) {
2228 if (!add)
2229 return 0;
2230
2231 /* Create new TCAM entry */
2232 /* Go through the all entries from first to last */
2233 tid = mvpp2_prs_tcam_first_free(priv,
2234 MVPP2_PE_MAC_RANGE_START,
2235 MVPP2_PE_MAC_RANGE_END);
2236 if (tid < 0)
2237 return tid;
2238
2239 pe.index = tid;
2240
2241 /* Mask all ports */
2242 mvpp2_prs_tcam_port_map_set(&pe, 0);
2243 } else {
2244 mvpp2_prs_init_from_hw(priv, &pe, tid);
2245 }
2246
2247 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2248
2249 /* Update port mask */
2250 mvpp2_prs_tcam_port_set(&pe, port->id, add);
2251
2252 /* Invalidate the entry if no ports are left enabled */
2253 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2254 if (pmap == 0) {
2255 if (add)
2256 return -EINVAL;
2257
2258 mvpp2_prs_hw_inv(priv, pe.index);
2259 priv->prs_shadow[pe.index].valid = false;
2260 return 0;
2261 }
2262
2263 /* Continue - set next lookup */
2264 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2265
2266 /* Set match on DA */
2267 len = ETH_ALEN;
2268 while (len--)
2269 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2270
2271 /* Set result info bits */
2272 if (is_broadcast_ether_addr(da)) {
2273 ri = MVPP2_PRS_RI_L2_BCAST;
2274 } else if (is_multicast_ether_addr(da)) {
2275 ri = MVPP2_PRS_RI_L2_MCAST;
2276 } else {
2277 ri = MVPP2_PRS_RI_L2_UCAST;
2278
2279 if (ether_addr_equal(da, port->dev->dev_addr))
2280 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2281 }
2282
2283 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2284 MVPP2_PRS_RI_MAC_ME_MASK);
2285 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2286 MVPP2_PRS_RI_MAC_ME_MASK);
2287
2288 /* Shift to ethertype */
2289 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2290 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2291
2292 /* Update shadow table and hw entry */
2293 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2294 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2295 mvpp2_prs_hw_write(priv, &pe);
2296
2297 return 0;
2298}
2299
2300int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2301{
2302 struct mvpp2_port *port = netdev_priv(dev);
2303 int err;
2304
2305 /* Remove old parser entry */
2306 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2307 if (err)
2308 return err;
2309
2310 /* Add new parser entry */
2311 err = mvpp2_prs_mac_da_accept(port, da, true);
2312 if (err)
2313 return err;
2314
2315 /* Set addr in the device */
2316 ether_addr_copy(dev->dev_addr, da);
2317
2318 return 0;
2319}
2320
2321void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2322{
2323 struct mvpp2 *priv = port->priv;
2324 struct mvpp2_prs_entry pe;
2325 unsigned long pmap;
2326 int index, tid;
2327
2328 for (tid = MVPP2_PE_MAC_RANGE_START;
2329 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2330 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2331
2332 if (!priv->prs_shadow[tid].valid ||
2333 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2334 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2335 continue;
2336
2337 mvpp2_prs_init_from_hw(priv, &pe, tid);
2338
2339 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2340
2341 /* We only want entries active on this port */
2342 if (!test_bit(port->id, &pmap))
2343 continue;
2344
2345 /* Read mac addr from entry */
2346 for (index = 0; index < ETH_ALEN; index++)
2347 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2348 &da_mask[index]);
2349
2350 /* Special cases : Don't remove broadcast and port's own
2351 * address
2352 */
2353 if (is_broadcast_ether_addr(da) ||
2354 ether_addr_equal(da, port->dev->dev_addr))
2355 continue;
2356
2357 /* Remove entry from TCAM */
2358 mvpp2_prs_mac_da_accept(port, da, false);
2359 }
2360}
2361
2362int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2363{
2364 switch (type) {
2365 case MVPP2_TAG_TYPE_EDSA:
2366 /* Add port to EDSA entries */
2367 mvpp2_prs_dsa_tag_set(priv, port, true,
2368 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2369 mvpp2_prs_dsa_tag_set(priv, port, true,
2370 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2371 /* Remove port from DSA entries */
2372 mvpp2_prs_dsa_tag_set(priv, port, false,
2373 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2374 mvpp2_prs_dsa_tag_set(priv, port, false,
2375 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2376 break;
2377
2378 case MVPP2_TAG_TYPE_DSA:
2379 /* Add port to DSA entries */
2380 mvpp2_prs_dsa_tag_set(priv, port, true,
2381 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2382 mvpp2_prs_dsa_tag_set(priv, port, true,
2383 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2384 /* Remove port from EDSA entries */
2385 mvpp2_prs_dsa_tag_set(priv, port, false,
2386 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2387 mvpp2_prs_dsa_tag_set(priv, port, false,
2388 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2389 break;
2390
2391 case MVPP2_TAG_TYPE_MH:
2392 case MVPP2_TAG_TYPE_NONE:
2393 /* Remove port form EDSA and DSA entries */
2394 mvpp2_prs_dsa_tag_set(priv, port, false,
2395 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2396 mvpp2_prs_dsa_tag_set(priv, port, false,
2397 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2398 mvpp2_prs_dsa_tag_set(priv, port, false,
2399 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2400 mvpp2_prs_dsa_tag_set(priv, port, false,
2401 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2402 break;
2403
2404 default:
2405 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2406 return -EINVAL;
2407 }
2408
2409 return 0;
2410}
2411
2412/* Set prs flow for the port */
2413int mvpp2_prs_def_flow(struct mvpp2_port *port)
2414{
2415 struct mvpp2_prs_entry pe;
2416 int tid;
2417
2418 memset(&pe, 0, sizeof(pe));
2419
2420 tid = mvpp2_prs_flow_find(port->priv, port->id);
2421
2422 /* Such entry not exist */
2423 if (tid < 0) {
2424 /* Go through the all entires from last to first */
2425 tid = mvpp2_prs_tcam_first_free(port->priv,
2426 MVPP2_PE_LAST_FREE_TID,
2427 MVPP2_PE_FIRST_FREE_TID);
2428 if (tid < 0)
2429 return tid;
2430
2431 pe.index = tid;
2432
2433 /* Set flow ID*/
2434 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2435 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2436
2437 /* Update shadow table */
2438 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2439 } else {
2440 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2441 }
2442
2443 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2444 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2445 mvpp2_prs_hw_write(port->priv, &pe);
2446
2447 return 0;
2448}