blob: 6bb69f086794ffdc16af81418aeeadc9766f9cf3 [file] [log] [blame]
Maxime Chevallierdb9d7d32018-05-31 10:07:43 +02001/*
2 * Header Parser helpers for Marvell PPv2 Network Controller
3 *
4 * Copyright (C) 2014 Marvell
5 *
6 * Marcin Wojtas <mw@semihalf.com>
7 *
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
11 */
12
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <uapi/linux/ppp_defs.h>
18#include <net/ip.h>
19#include <net/ipv6.h>
20
21#include "mvpp2.h"
22#include "mvpp2_prs.h"
23
24/* Update parser tcam and sram hw entries */
25static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
26{
27 int i;
28
29 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
30 return -EINVAL;
31
32 /* Clear entry invalidation bit */
33 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
34
35 /* Write tcam index - indirect access */
36 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
37 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
38 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
39
40 /* Write sram index - indirect access */
41 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
42 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
43 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
44
45 return 0;
46}
47
48/* Initialize tcam entry from hw */
49static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
50 struct mvpp2_prs_entry *pe, int tid)
51{
52 int i;
53
54 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
55 return -EINVAL;
56
57 memset(pe, 0, sizeof(*pe));
58 pe->index = tid;
59
60 /* Write tcam index - indirect access */
61 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
62
63 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
64 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
65 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
66 return MVPP2_PRS_TCAM_ENTRY_INVALID;
67
68 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
69 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
70
71 /* Write sram index - indirect access */
72 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
73 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
74 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
75
76 return 0;
77}
78
79/* Invalidate tcam hw entry */
80static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
81{
82 /* Write index - indirect access */
83 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
84 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
85 MVPP2_PRS_TCAM_INV_MASK);
86}
87
88/* Enable shadow table entry and set its lookup ID */
89static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
90{
91 priv->prs_shadow[index].valid = true;
92 priv->prs_shadow[index].lu = lu;
93}
94
95/* Update ri fields in shadow table entry */
96static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
97 unsigned int ri, unsigned int ri_mask)
98{
99 priv->prs_shadow[index].ri_mask = ri_mask;
100 priv->prs_shadow[index].ri = ri;
101}
102
103/* Update lookup field in tcam sw entry */
104static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
105{
106 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
107
108 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
109 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
110}
111
112/* Update mask for single port in tcam sw entry */
113static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
114 unsigned int port, bool add)
115{
116 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
117
118 if (add)
119 pe->tcam.byte[enable_off] &= ~(1 << port);
120 else
121 pe->tcam.byte[enable_off] |= 1 << port;
122}
123
124/* Update port map in tcam sw entry */
125static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
126 unsigned int ports)
127{
128 unsigned char port_mask = MVPP2_PRS_PORT_MASK;
129 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
130
131 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
132 pe->tcam.byte[enable_off] &= ~port_mask;
133 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
134}
135
136/* Obtain port map from tcam sw entry */
137static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
138{
139 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
140
141 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
142}
143
144/* Set byte of data and its enable bits in tcam sw entry */
145static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
146 unsigned int offs, unsigned char byte,
147 unsigned char enable)
148{
149 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
150 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
151}
152
153/* Get byte of data and its enable bits from tcam sw entry */
154static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
155 unsigned int offs, unsigned char *byte,
156 unsigned char *enable)
157{
158 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
159 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
160}
161
162/* Compare tcam data bytes with a pattern */
163static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
164 u16 data)
165{
166 int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
167 u16 tcam_data;
168
169 tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
170 if (tcam_data != data)
171 return false;
172 return true;
173}
174
175/* Update ai bits in tcam sw entry */
176static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
177 unsigned int bits, unsigned int enable)
178{
179 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
180
181 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
182 if (!(enable & BIT(i)))
183 continue;
184
185 if (bits & BIT(i))
186 pe->tcam.byte[ai_idx] |= 1 << i;
187 else
188 pe->tcam.byte[ai_idx] &= ~(1 << i);
189 }
190
191 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
192}
193
194/* Get ai bits from tcam sw entry */
195static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
196{
197 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
198}
199
200/* Set ethertype in tcam sw entry */
201static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
202 unsigned short ethertype)
203{
204 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
205 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
206}
207
208/* Set vid in tcam sw entry */
209static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
210 unsigned short vid)
211{
212 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
213 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
214}
215
216/* Set bits in sram sw entry */
217static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
218 int val)
219{
220 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
221}
222
223/* Clear bits in sram sw entry */
224static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
225 int val)
226{
227 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
228}
229
230/* Update ri bits in sram sw entry */
231static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
232 unsigned int bits, unsigned int mask)
233{
234 unsigned int i;
235
236 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
237 int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
238
239 if (!(mask & BIT(i)))
240 continue;
241
242 if (bits & BIT(i))
243 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
244 else
245 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
246
247 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
248 }
249}
250
251/* Obtain ri bits from sram sw entry */
252static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
253{
254 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
255}
256
257/* Update ai bits in sram sw entry */
258static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
259 unsigned int bits, unsigned int mask)
260{
261 unsigned int i;
262 int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
263
264 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
265 if (!(mask & BIT(i)))
266 continue;
267
268 if (bits & BIT(i))
269 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
270 else
271 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
272
273 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
274 }
275}
276
277/* Read ai bits from sram sw entry */
278static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
279{
280 u8 bits;
281 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
282 int ai_en_off = ai_off + 1;
283 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
284
285 bits = (pe->sram.byte[ai_off] >> ai_shift) |
286 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
287
288 return bits;
289}
290
291/* In sram sw entry set lookup ID field of the tcam key to be used in the next
292 * lookup interation
293 */
294static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
295 unsigned int lu)
296{
297 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
298
299 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
300 MVPP2_PRS_SRAM_NEXT_LU_MASK);
301 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
302}
303
304/* In the sram sw entry set sign and value of the next lookup offset
305 * and the offset value generated to the classifier
306 */
307static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
308 unsigned int op)
309{
310 /* Set sign */
311 if (shift < 0) {
312 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
313 shift = 0 - shift;
314 } else {
315 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
316 }
317
318 /* Set value */
319 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
320 (unsigned char)shift;
321
322 /* Reset and set operation */
323 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
324 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
325 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
326
327 /* Set base offset as current */
328 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
329}
330
331/* In the sram sw entry set sign and value of the user defined offset
332 * generated to the classifier
333 */
334static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
335 unsigned int type, int offset,
336 unsigned int op)
337{
338 /* Set sign */
339 if (offset < 0) {
340 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
341 offset = 0 - offset;
342 } else {
343 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
344 }
345
346 /* Set value */
347 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
348 MVPP2_PRS_SRAM_UDF_MASK);
349 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
350 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
351 MVPP2_PRS_SRAM_UDF_BITS)] &=
352 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
353 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
354 MVPP2_PRS_SRAM_UDF_BITS)] |=
355 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
356
357 /* Set offset type */
358 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
359 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
360 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
361
362 /* Set offset operation */
363 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
364 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
365 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
366
367 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
368 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
369 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
370 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
371
372 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
373 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
374 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
375
376 /* Set base offset as current */
377 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
378}
379
380/* Find parser flow entry */
381static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
382{
383 struct mvpp2_prs_entry pe;
384 int tid;
385
386 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
387 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
388 u8 bits;
389
390 if (!priv->prs_shadow[tid].valid ||
391 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
392 continue;
393
394 mvpp2_prs_init_from_hw(priv, &pe, tid);
395 bits = mvpp2_prs_sram_ai_get(&pe);
396
397 /* Sram store classification lookup ID in AI bits [5:0] */
398 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
399 return tid;
400 }
401
402 return -ENOENT;
403}
404
405/* Return first free tcam index, seeking from start to end */
406static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
407 unsigned char end)
408{
409 int tid;
410
411 if (start > end)
412 swap(start, end);
413
414 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
415 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
416
417 for (tid = start; tid <= end; tid++) {
418 if (!priv->prs_shadow[tid].valid)
419 return tid;
420 }
421
422 return -EINVAL;
423}
424
425/* Enable/disable dropping all mac da's */
426static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
427{
428 struct mvpp2_prs_entry pe;
429
430 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
431 /* Entry exist - update port only */
432 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
433 } else {
434 /* Entry doesn't exist - create new */
435 memset(&pe, 0, sizeof(pe));
436 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
437 pe.index = MVPP2_PE_DROP_ALL;
438
439 /* Non-promiscuous mode for all ports - DROP unknown packets */
440 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
441 MVPP2_PRS_RI_DROP_MASK);
442
443 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
444 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
445
446 /* Update shadow table */
447 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
448
449 /* Mask all ports */
450 mvpp2_prs_tcam_port_map_set(&pe, 0);
451 }
452
453 /* Update port mask */
454 mvpp2_prs_tcam_port_set(&pe, port, add);
455
456 mvpp2_prs_hw_write(priv, &pe);
457}
458
459/* Set port to unicast or multicast promiscuous mode */
460void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
461 enum mvpp2_prs_l2_cast l2_cast, bool add)
462{
463 struct mvpp2_prs_entry pe;
464 unsigned char cast_match;
465 unsigned int ri;
466 int tid;
467
468 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
469 cast_match = MVPP2_PRS_UCAST_VAL;
470 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
471 ri = MVPP2_PRS_RI_L2_UCAST;
472 } else {
473 cast_match = MVPP2_PRS_MCAST_VAL;
474 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
475 ri = MVPP2_PRS_RI_L2_MCAST;
476 }
477
478 /* promiscuous mode - Accept unknown unicast or multicast packets */
479 if (priv->prs_shadow[tid].valid) {
480 mvpp2_prs_init_from_hw(priv, &pe, tid);
481 } else {
482 memset(&pe, 0, sizeof(pe));
483 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
484 pe.index = tid;
485
486 /* Continue - set next lookup */
487 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
488
489 /* Set result info bits */
490 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
491
492 /* Match UC or MC addresses */
493 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
494 MVPP2_PRS_CAST_MASK);
495
496 /* Shift to ethertype */
497 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
498 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
499
500 /* Mask all ports */
501 mvpp2_prs_tcam_port_map_set(&pe, 0);
502
503 /* Update shadow table */
504 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
505 }
506
507 /* Update port mask */
508 mvpp2_prs_tcam_port_set(&pe, port, add);
509
510 mvpp2_prs_hw_write(priv, &pe);
511}
512
513/* Set entry for dsa packets */
514static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
515 bool tagged, bool extend)
516{
517 struct mvpp2_prs_entry pe;
518 int tid, shift;
519
520 if (extend) {
521 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
522 shift = 8;
523 } else {
524 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
525 shift = 4;
526 }
527
528 if (priv->prs_shadow[tid].valid) {
529 /* Entry exist - update port only */
530 mvpp2_prs_init_from_hw(priv, &pe, tid);
531 } else {
532 /* Entry doesn't exist - create new */
533 memset(&pe, 0, sizeof(pe));
534 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
535 pe.index = tid;
536
537 /* Update shadow table */
538 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
539
540 if (tagged) {
541 /* Set tagged bit in DSA tag */
542 mvpp2_prs_tcam_data_byte_set(&pe, 0,
543 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
544 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
545
546 /* Set ai bits for next iteration */
547 if (extend)
548 mvpp2_prs_sram_ai_update(&pe, 1,
549 MVPP2_PRS_SRAM_AI_MASK);
550 else
551 mvpp2_prs_sram_ai_update(&pe, 0,
552 MVPP2_PRS_SRAM_AI_MASK);
553
554 /* Set result info bits to 'single vlan' */
555 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
556 MVPP2_PRS_RI_VLAN_MASK);
557 /* If packet is tagged continue check vid filtering */
558 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
559 } else {
560 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
561 mvpp2_prs_sram_shift_set(&pe, shift,
562 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
563
564 /* Set result info bits to 'no vlans' */
565 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
566 MVPP2_PRS_RI_VLAN_MASK);
567 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
568 }
569
570 /* Mask all ports */
571 mvpp2_prs_tcam_port_map_set(&pe, 0);
572 }
573
574 /* Update port mask */
575 mvpp2_prs_tcam_port_set(&pe, port, add);
576
577 mvpp2_prs_hw_write(priv, &pe);
578}
579
580/* Set entry for dsa ethertype */
581static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
582 bool add, bool tagged, bool extend)
583{
584 struct mvpp2_prs_entry pe;
585 int tid, shift, port_mask;
586
587 if (extend) {
588 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
589 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
590 port_mask = 0;
591 shift = 8;
592 } else {
593 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
594 MVPP2_PE_ETYPE_DSA_UNTAGGED;
595 port_mask = MVPP2_PRS_PORT_MASK;
596 shift = 4;
597 }
598
599 if (priv->prs_shadow[tid].valid) {
600 /* Entry exist - update port only */
601 mvpp2_prs_init_from_hw(priv, &pe, tid);
602 } else {
603 /* Entry doesn't exist - create new */
604 memset(&pe, 0, sizeof(pe));
605 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
606 pe.index = tid;
607
608 /* Set ethertype */
609 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
610 mvpp2_prs_match_etype(&pe, 2, 0);
611
612 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
613 MVPP2_PRS_RI_DSA_MASK);
614 /* Shift ethertype + 2 byte reserved + tag*/
615 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
616 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
617
618 /* Update shadow table */
619 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
620
621 if (tagged) {
622 /* Set tagged bit in DSA tag */
623 mvpp2_prs_tcam_data_byte_set(&pe,
624 MVPP2_ETH_TYPE_LEN + 2 + 3,
625 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
626 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
627 /* Clear all ai bits for next iteration */
628 mvpp2_prs_sram_ai_update(&pe, 0,
629 MVPP2_PRS_SRAM_AI_MASK);
630 /* If packet is tagged continue check vlans */
631 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
632 } else {
633 /* Set result info bits to 'no vlans' */
634 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
635 MVPP2_PRS_RI_VLAN_MASK);
636 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
637 }
638 /* Mask/unmask all ports, depending on dsa type */
639 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
640 }
641
642 /* Update port mask */
643 mvpp2_prs_tcam_port_set(&pe, port, add);
644
645 mvpp2_prs_hw_write(priv, &pe);
646}
647
648/* Search for existing single/triple vlan entry */
649static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
650{
651 struct mvpp2_prs_entry pe;
652 int tid;
653
654 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
655 for (tid = MVPP2_PE_FIRST_FREE_TID;
656 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
657 unsigned int ri_bits, ai_bits;
658 bool match;
659
660 if (!priv->prs_shadow[tid].valid ||
661 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
662 continue;
663
664 mvpp2_prs_init_from_hw(priv, &pe, tid);
665 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid));
666 if (!match)
667 continue;
668
669 /* Get vlan type */
670 ri_bits = mvpp2_prs_sram_ri_get(&pe);
671 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
672
673 /* Get current ai value from tcam */
674 ai_bits = mvpp2_prs_tcam_ai_get(&pe);
675 /* Clear double vlan bit */
676 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
677
678 if (ai != ai_bits)
679 continue;
680
681 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
682 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
683 return tid;
684 }
685
686 return -ENOENT;
687}
688
689/* Add/update single/triple vlan entry */
690static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
691 unsigned int port_map)
692{
693 struct mvpp2_prs_entry pe;
694 int tid_aux, tid;
695 int ret = 0;
696
697 memset(&pe, 0, sizeof(pe));
698
699 tid = mvpp2_prs_vlan_find(priv, tpid, ai);
700
701 if (tid < 0) {
702 /* Create new tcam entry */
703 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
704 MVPP2_PE_FIRST_FREE_TID);
705 if (tid < 0)
706 return tid;
707
708 /* Get last double vlan tid */
709 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
710 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
711 unsigned int ri_bits;
712
713 if (!priv->prs_shadow[tid_aux].valid ||
714 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
715 continue;
716
717 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
718 ri_bits = mvpp2_prs_sram_ri_get(&pe);
719 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
720 MVPP2_PRS_RI_VLAN_DOUBLE)
721 break;
722 }
723
724 if (tid <= tid_aux)
725 return -EINVAL;
726
727 memset(&pe, 0, sizeof(pe));
728 pe.index = tid;
729 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
730
731 mvpp2_prs_match_etype(&pe, 0, tpid);
732
733 /* VLAN tag detected, proceed with VID filtering */
734 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
735
736 /* Clear all ai bits for next iteration */
737 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
738
739 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
740 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
741 MVPP2_PRS_RI_VLAN_MASK);
742 } else {
743 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
744 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
745 MVPP2_PRS_RI_VLAN_MASK);
746 }
747 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
748
749 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
750 } else {
751 mvpp2_prs_init_from_hw(priv, &pe, tid);
752 }
753 /* Update ports' mask */
754 mvpp2_prs_tcam_port_map_set(&pe, port_map);
755
756 mvpp2_prs_hw_write(priv, &pe);
757
758 return ret;
759}
760
761/* Get first free double vlan ai number */
762static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
763{
764 int i;
765
766 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
767 if (!priv->prs_double_vlans[i])
768 return i;
769 }
770
771 return -EINVAL;
772}
773
774/* Search for existing double vlan entry */
775static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
776 unsigned short tpid2)
777{
778 struct mvpp2_prs_entry pe;
779 int tid;
780
781 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
782 for (tid = MVPP2_PE_FIRST_FREE_TID;
783 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
784 unsigned int ri_mask;
785 bool match;
786
787 if (!priv->prs_shadow[tid].valid ||
788 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
789 continue;
790
791 mvpp2_prs_init_from_hw(priv, &pe, tid);
792
793 match = mvpp2_prs_tcam_data_cmp(&pe, 0, swab16(tpid1)) &&
794 mvpp2_prs_tcam_data_cmp(&pe, 4, swab16(tpid2));
795
796 if (!match)
797 continue;
798
799 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
800 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
801 return tid;
802 }
803
804 return -ENOENT;
805}
806
807/* Add or update double vlan entry */
808static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
809 unsigned short tpid2,
810 unsigned int port_map)
811{
812 int tid_aux, tid, ai, ret = 0;
813 struct mvpp2_prs_entry pe;
814
815 memset(&pe, 0, sizeof(pe));
816
817 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
818
819 if (tid < 0) {
820 /* Create new tcam entry */
821 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
822 MVPP2_PE_LAST_FREE_TID);
823 if (tid < 0)
824 return tid;
825
826 /* Set ai value for new double vlan entry */
827 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
828 if (ai < 0)
829 return ai;
830
831 /* Get first single/triple vlan tid */
832 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
833 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
834 unsigned int ri_bits;
835
836 if (!priv->prs_shadow[tid_aux].valid ||
837 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
838 continue;
839
840 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
841 ri_bits = mvpp2_prs_sram_ri_get(&pe);
842 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
843 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
844 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
845 break;
846 }
847
848 if (tid >= tid_aux)
849 return -ERANGE;
850
851 memset(&pe, 0, sizeof(pe));
852 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
853 pe.index = tid;
854
855 priv->prs_double_vlans[ai] = true;
856
857 mvpp2_prs_match_etype(&pe, 0, tpid1);
858 mvpp2_prs_match_etype(&pe, 4, tpid2);
859
860 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
861 /* Shift 4 bytes - skip outer vlan tag */
862 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
863 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
864 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
865 MVPP2_PRS_RI_VLAN_MASK);
866 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
867 MVPP2_PRS_SRAM_AI_MASK);
868
869 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
870 } else {
871 mvpp2_prs_init_from_hw(priv, &pe, tid);
872 }
873
874 /* Update ports' mask */
875 mvpp2_prs_tcam_port_map_set(&pe, port_map);
876 mvpp2_prs_hw_write(priv, &pe);
877
878 return ret;
879}
880
881/* IPv4 header parsing for fragmentation and L4 offset */
882static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
883 unsigned int ri, unsigned int ri_mask)
884{
885 struct mvpp2_prs_entry pe;
886 int tid;
887
888 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
889 (proto != IPPROTO_IGMP))
890 return -EINVAL;
891
892 /* Not fragmented packet */
893 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
894 MVPP2_PE_LAST_FREE_TID);
895 if (tid < 0)
896 return tid;
897
898 memset(&pe, 0, sizeof(pe));
899 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
900 pe.index = tid;
901
902 /* Set next lu to IPv4 */
903 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
904 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
905 /* Set L4 offset */
906 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
907 sizeof(struct iphdr) - 4,
908 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
909 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
910 MVPP2_PRS_IPV4_DIP_AI_BIT);
911 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
912
913 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
914 MVPP2_PRS_TCAM_PROTO_MASK_L);
915 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
916 MVPP2_PRS_TCAM_PROTO_MASK);
917
918 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
919 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
920 /* Unmask all ports */
921 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
922
923 /* Update shadow table and hw entry */
924 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
925 mvpp2_prs_hw_write(priv, &pe);
926
927 /* Fragmented packet */
928 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
929 MVPP2_PE_LAST_FREE_TID);
930 if (tid < 0)
931 return tid;
932
933 pe.index = tid;
934 /* Clear ri before updating */
935 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
936 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
937 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
938
939 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
940 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
941
942 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
943 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
944
945 /* Update shadow table and hw entry */
946 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
947 mvpp2_prs_hw_write(priv, &pe);
948
949 return 0;
950}
951
952/* IPv4 L3 multicast or broadcast */
953static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
954{
955 struct mvpp2_prs_entry pe;
956 int mask, tid;
957
958 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
959 MVPP2_PE_LAST_FREE_TID);
960 if (tid < 0)
961 return tid;
962
963 memset(&pe, 0, sizeof(pe));
964 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
965 pe.index = tid;
966
967 switch (l3_cast) {
968 case MVPP2_PRS_L3_MULTI_CAST:
969 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
970 MVPP2_PRS_IPV4_MC_MASK);
971 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
972 MVPP2_PRS_RI_L3_ADDR_MASK);
973 break;
974 case MVPP2_PRS_L3_BROAD_CAST:
975 mask = MVPP2_PRS_IPV4_BC_MASK;
976 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
977 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
978 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
979 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
980 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
981 MVPP2_PRS_RI_L3_ADDR_MASK);
982 break;
983 default:
984 return -EINVAL;
985 }
986
987 /* Finished: go to flowid generation */
988 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
989 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
990
991 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
992 MVPP2_PRS_IPV4_DIP_AI_BIT);
993 /* Unmask all ports */
994 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
995
996 /* Update shadow table and hw entry */
997 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
998 mvpp2_prs_hw_write(priv, &pe);
999
1000 return 0;
1001}
1002
1003/* Set entries for protocols over IPv6 */
1004static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
1005 unsigned int ri, unsigned int ri_mask)
1006{
1007 struct mvpp2_prs_entry pe;
1008 int tid;
1009
1010 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
1011 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
1012 return -EINVAL;
1013
1014 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1015 MVPP2_PE_LAST_FREE_TID);
1016 if (tid < 0)
1017 return tid;
1018
1019 memset(&pe, 0, sizeof(pe));
1020 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1021 pe.index = tid;
1022
1023 /* Finished: go to flowid generation */
1024 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1025 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1026 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1027 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1028 sizeof(struct ipv6hdr) - 6,
1029 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1030
1031 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1032 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1033 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1034 /* Unmask all ports */
1035 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1036
1037 /* Write HW */
1038 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1039 mvpp2_prs_hw_write(priv, &pe);
1040
1041 return 0;
1042}
1043
1044/* IPv6 L3 multicast entry */
1045static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1046{
1047 struct mvpp2_prs_entry pe;
1048 int tid;
1049
1050 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1051 return -EINVAL;
1052
1053 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1054 MVPP2_PE_LAST_FREE_TID);
1055 if (tid < 0)
1056 return tid;
1057
1058 memset(&pe, 0, sizeof(pe));
1059 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1060 pe.index = tid;
1061
1062 /* Finished: go to flowid generation */
1063 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1064 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1065 MVPP2_PRS_RI_L3_ADDR_MASK);
1066 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1067 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1068 /* Shift back to IPv6 NH */
1069 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1070
1071 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1072 MVPP2_PRS_IPV6_MC_MASK);
1073 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1074 /* Unmask all ports */
1075 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1076
1077 /* Update shadow table and hw entry */
1078 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1079 mvpp2_prs_hw_write(priv, &pe);
1080
1081 return 0;
1082}
1083
1084/* Parser per-port initialization */
1085static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1086 int lu_max, int offset)
1087{
1088 u32 val;
1089
1090 /* Set lookup ID */
1091 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1092 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1093 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1094 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1095
1096 /* Set maximum number of loops for packet received from port */
1097 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1098 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1099 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1100 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1101
1102 /* Set initial offset for packet header extraction for the first
1103 * searching loop
1104 */
1105 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1106 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1107 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1108 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1109}
1110
1111/* Default flow entries initialization for all ports */
1112static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1113{
1114 struct mvpp2_prs_entry pe;
1115 int port;
1116
1117 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1118 memset(&pe, 0, sizeof(pe));
1119 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1120 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1121
1122 /* Mask all ports */
1123 mvpp2_prs_tcam_port_map_set(&pe, 0);
1124
1125 /* Set flow ID*/
1126 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1127 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1128
1129 /* Update shadow table and hw entry */
1130 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1131 mvpp2_prs_hw_write(priv, &pe);
1132 }
1133}
1134
1135/* Set default entry for Marvell Header field */
1136static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1137{
1138 struct mvpp2_prs_entry pe;
1139
1140 memset(&pe, 0, sizeof(pe));
1141
1142 pe.index = MVPP2_PE_MH_DEFAULT;
1143 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1144 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1145 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1146 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1147
1148 /* Unmask all ports */
1149 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1150
1151 /* Update shadow table and hw entry */
1152 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1153 mvpp2_prs_hw_write(priv, &pe);
1154}
1155
1156/* Set default entires (place holder) for promiscuous, non-promiscuous and
1157 * multicast MAC addresses
1158 */
1159static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1160{
1161 struct mvpp2_prs_entry pe;
1162
1163 memset(&pe, 0, sizeof(pe));
1164
1165 /* Non-promiscuous mode for all ports - DROP unknown packets */
1166 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1167 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1168
1169 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1170 MVPP2_PRS_RI_DROP_MASK);
1171 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1172 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1173
1174 /* Unmask all ports */
1175 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1176
1177 /* Update shadow table and hw entry */
1178 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1179 mvpp2_prs_hw_write(priv, &pe);
1180
1181 /* Create dummy entries for drop all and promiscuous modes */
1182 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1183 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1184 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1185}
1186
1187/* Set default entries for various types of dsa packets */
1188static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1189{
1190 struct mvpp2_prs_entry pe;
1191
1192 /* None tagged EDSA entry - place holder */
1193 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1194 MVPP2_PRS_EDSA);
1195
1196 /* Tagged EDSA entry - place holder */
1197 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1198
1199 /* None tagged DSA entry - place holder */
1200 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1201 MVPP2_PRS_DSA);
1202
1203 /* Tagged DSA entry - place holder */
1204 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1205
1206 /* None tagged EDSA ethertype entry - place holder*/
1207 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1208 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1209
1210 /* Tagged EDSA ethertype entry - place holder*/
1211 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1212 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1213
1214 /* None tagged DSA ethertype entry */
1215 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1216 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1217
1218 /* Tagged DSA ethertype entry */
1219 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1220 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1221
1222 /* Set default entry, in case DSA or EDSA tag not found */
1223 memset(&pe, 0, sizeof(pe));
1224 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1225 pe.index = MVPP2_PE_DSA_DEFAULT;
1226 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1227
1228 /* Shift 0 bytes */
1229 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1230 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1231
1232 /* Clear all sram ai bits for next iteration */
1233 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1234
1235 /* Unmask all ports */
1236 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1237
1238 mvpp2_prs_hw_write(priv, &pe);
1239}
1240
1241/* Initialize parser entries for VID filtering */
1242static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1243{
1244 struct mvpp2_prs_entry pe;
1245
1246 memset(&pe, 0, sizeof(pe));
1247
1248 /* Set default vid entry */
1249 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1250 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1251
1252 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1253
1254 /* Skip VLAN header - Set offset to 4 bytes */
1255 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1256 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1257
1258 /* Clear all ai bits for next iteration */
1259 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1260
1261 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1262
1263 /* Unmask all ports */
1264 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1265
1266 /* Update shadow table and hw entry */
1267 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1268 mvpp2_prs_hw_write(priv, &pe);
1269
1270 /* Set default vid entry for extended DSA*/
1271 memset(&pe, 0, sizeof(pe));
1272
1273 /* Set default vid entry */
1274 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1275 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1276
1277 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1278 MVPP2_PRS_EDSA_VID_AI_BIT);
1279
1280 /* Skip VLAN header - Set offset to 8 bytes */
1281 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1282 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1283
1284 /* Clear all ai bits for next iteration */
1285 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1286
1287 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1288
1289 /* Unmask all ports */
1290 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1291
1292 /* Update shadow table and hw entry */
1293 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1294 mvpp2_prs_hw_write(priv, &pe);
1295}
1296
1297/* Match basic ethertypes */
1298static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1299{
1300 struct mvpp2_prs_entry pe;
1301 int tid;
1302
1303 /* Ethertype: PPPoE */
1304 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1305 MVPP2_PE_LAST_FREE_TID);
1306 if (tid < 0)
1307 return tid;
1308
1309 memset(&pe, 0, sizeof(pe));
1310 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1311 pe.index = tid;
1312
1313 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1314
1315 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1316 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1317 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1318 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1319 MVPP2_PRS_RI_PPPOE_MASK);
1320
1321 /* Update shadow table and hw entry */
1322 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1323 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1324 priv->prs_shadow[pe.index].finish = false;
1325 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1326 MVPP2_PRS_RI_PPPOE_MASK);
1327 mvpp2_prs_hw_write(priv, &pe);
1328
1329 /* Ethertype: ARP */
1330 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1331 MVPP2_PE_LAST_FREE_TID);
1332 if (tid < 0)
1333 return tid;
1334
1335 memset(&pe, 0, sizeof(pe));
1336 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1337 pe.index = tid;
1338
1339 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1340
1341 /* Generate flow in the next iteration*/
1342 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1343 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1344 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1345 MVPP2_PRS_RI_L3_PROTO_MASK);
1346 /* Set L3 offset */
1347 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1348 MVPP2_ETH_TYPE_LEN,
1349 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1350
1351 /* Update shadow table and hw entry */
1352 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1353 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1354 priv->prs_shadow[pe.index].finish = true;
1355 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1356 MVPP2_PRS_RI_L3_PROTO_MASK);
1357 mvpp2_prs_hw_write(priv, &pe);
1358
1359 /* Ethertype: LBTD */
1360 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1361 MVPP2_PE_LAST_FREE_TID);
1362 if (tid < 0)
1363 return tid;
1364
1365 memset(&pe, 0, sizeof(pe));
1366 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1367 pe.index = tid;
1368
1369 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1370
1371 /* Generate flow in the next iteration*/
1372 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1373 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1374 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1375 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1376 MVPP2_PRS_RI_CPU_CODE_MASK |
1377 MVPP2_PRS_RI_UDF3_MASK);
1378 /* Set L3 offset */
1379 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1380 MVPP2_ETH_TYPE_LEN,
1381 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1382
1383 /* Update shadow table and hw entry */
1384 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1385 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1386 priv->prs_shadow[pe.index].finish = true;
1387 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1388 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1389 MVPP2_PRS_RI_CPU_CODE_MASK |
1390 MVPP2_PRS_RI_UDF3_MASK);
1391 mvpp2_prs_hw_write(priv, &pe);
1392
1393 /* Ethertype: IPv4 without options */
1394 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1395 MVPP2_PE_LAST_FREE_TID);
1396 if (tid < 0)
1397 return tid;
1398
1399 memset(&pe, 0, sizeof(pe));
1400 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1401 pe.index = tid;
1402
1403 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1404 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1405 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1406 MVPP2_PRS_IPV4_HEAD_MASK |
1407 MVPP2_PRS_IPV4_IHL_MASK);
1408
1409 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1410 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1411 MVPP2_PRS_RI_L3_PROTO_MASK);
1412 /* Skip eth_type + 4 bytes of IP header */
1413 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1414 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1415 /* Set L3 offset */
1416 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1417 MVPP2_ETH_TYPE_LEN,
1418 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1419
1420 /* Update shadow table and hw entry */
1421 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1422 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1423 priv->prs_shadow[pe.index].finish = false;
1424 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1425 MVPP2_PRS_RI_L3_PROTO_MASK);
1426 mvpp2_prs_hw_write(priv, &pe);
1427
1428 /* Ethertype: IPv4 with options */
1429 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1430 MVPP2_PE_LAST_FREE_TID);
1431 if (tid < 0)
1432 return tid;
1433
1434 pe.index = tid;
1435
1436 /* Clear tcam data before updating */
1437 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
1438 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
1439
1440 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1441 MVPP2_PRS_IPV4_HEAD,
1442 MVPP2_PRS_IPV4_HEAD_MASK);
1443
1444 /* Clear ri before updating */
1445 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1446 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1447 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1448 MVPP2_PRS_RI_L3_PROTO_MASK);
1449
1450 /* Update shadow table and hw entry */
1451 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1452 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1453 priv->prs_shadow[pe.index].finish = false;
1454 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1455 MVPP2_PRS_RI_L3_PROTO_MASK);
1456 mvpp2_prs_hw_write(priv, &pe);
1457
1458 /* Ethertype: IPv6 without options */
1459 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1460 MVPP2_PE_LAST_FREE_TID);
1461 if (tid < 0)
1462 return tid;
1463
1464 memset(&pe, 0, sizeof(pe));
1465 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1466 pe.index = tid;
1467
1468 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1469
1470 /* Skip DIP of IPV6 header */
1471 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1472 MVPP2_MAX_L3_ADDR_SIZE,
1473 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1474 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1475 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1476 MVPP2_PRS_RI_L3_PROTO_MASK);
1477 /* Set L3 offset */
1478 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1479 MVPP2_ETH_TYPE_LEN,
1480 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1481
1482 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1483 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1484 priv->prs_shadow[pe.index].finish = false;
1485 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1486 MVPP2_PRS_RI_L3_PROTO_MASK);
1487 mvpp2_prs_hw_write(priv, &pe);
1488
1489 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1490 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1491 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1492 pe.index = MVPP2_PE_ETH_TYPE_UN;
1493
1494 /* Unmask all ports */
1495 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1496
1497 /* Generate flow in the next iteration*/
1498 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1499 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1500 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1501 MVPP2_PRS_RI_L3_PROTO_MASK);
1502 /* Set L3 offset even it's unknown L3 */
1503 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1504 MVPP2_ETH_TYPE_LEN,
1505 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1506
1507 /* Update shadow table and hw entry */
1508 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1509 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1510 priv->prs_shadow[pe.index].finish = true;
1511 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1512 MVPP2_PRS_RI_L3_PROTO_MASK);
1513 mvpp2_prs_hw_write(priv, &pe);
1514
1515 return 0;
1516}
1517
1518/* Configure vlan entries and detect up to 2 successive VLAN tags.
1519 * Possible options:
1520 * 0x8100, 0x88A8
1521 * 0x8100, 0x8100
1522 * 0x8100
1523 * 0x88A8
1524 */
1525static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1526{
1527 struct mvpp2_prs_entry pe;
1528 int err;
1529
1530 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1531 MVPP2_PRS_DBL_VLANS_MAX,
1532 GFP_KERNEL);
1533 if (!priv->prs_double_vlans)
1534 return -ENOMEM;
1535
1536 /* Double VLAN: 0x8100, 0x88A8 */
1537 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1538 MVPP2_PRS_PORT_MASK);
1539 if (err)
1540 return err;
1541
1542 /* Double VLAN: 0x8100, 0x8100 */
1543 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1544 MVPP2_PRS_PORT_MASK);
1545 if (err)
1546 return err;
1547
1548 /* Single VLAN: 0x88a8 */
1549 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1550 MVPP2_PRS_PORT_MASK);
1551 if (err)
1552 return err;
1553
1554 /* Single VLAN: 0x8100 */
1555 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1556 MVPP2_PRS_PORT_MASK);
1557 if (err)
1558 return err;
1559
1560 /* Set default double vlan entry */
1561 memset(&pe, 0, sizeof(pe));
1562 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1563 pe.index = MVPP2_PE_VLAN_DBL;
1564
1565 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1566
1567 /* Clear ai for next iterations */
1568 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1569 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1570 MVPP2_PRS_RI_VLAN_MASK);
1571
1572 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1573 MVPP2_PRS_DBL_VLAN_AI_BIT);
1574 /* Unmask all ports */
1575 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1576
1577 /* Update shadow table and hw entry */
1578 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1579 mvpp2_prs_hw_write(priv, &pe);
1580
1581 /* Set default vlan none entry */
1582 memset(&pe, 0, sizeof(pe));
1583 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1584 pe.index = MVPP2_PE_VLAN_NONE;
1585
1586 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1587 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1588 MVPP2_PRS_RI_VLAN_MASK);
1589
1590 /* Unmask all ports */
1591 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1592
1593 /* Update shadow table and hw entry */
1594 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1595 mvpp2_prs_hw_write(priv, &pe);
1596
1597 return 0;
1598}
1599
1600/* Set entries for PPPoE ethertype */
1601static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1602{
1603 struct mvpp2_prs_entry pe;
1604 int tid;
1605
1606 /* IPv4 over PPPoE with options */
1607 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1608 MVPP2_PE_LAST_FREE_TID);
1609 if (tid < 0)
1610 return tid;
1611
1612 memset(&pe, 0, sizeof(pe));
1613 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1614 pe.index = tid;
1615
1616 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1617
1618 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1619 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1620 MVPP2_PRS_RI_L3_PROTO_MASK);
1621 /* Skip eth_type + 4 bytes of IP header */
1622 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1623 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1624 /* Set L3 offset */
1625 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1626 MVPP2_ETH_TYPE_LEN,
1627 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1628
1629 /* Update shadow table and hw entry */
1630 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1631 mvpp2_prs_hw_write(priv, &pe);
1632
1633 /* IPv4 over PPPoE without options */
1634 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1635 MVPP2_PE_LAST_FREE_TID);
1636 if (tid < 0)
1637 return tid;
1638
1639 pe.index = tid;
1640
1641 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1642 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1643 MVPP2_PRS_IPV4_HEAD_MASK |
1644 MVPP2_PRS_IPV4_IHL_MASK);
1645
1646 /* Clear ri before updating */
1647 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1648 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1649 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1650 MVPP2_PRS_RI_L3_PROTO_MASK);
1651
1652 /* Update shadow table and hw entry */
1653 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1654 mvpp2_prs_hw_write(priv, &pe);
1655
1656 /* IPv6 over PPPoE */
1657 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1658 MVPP2_PE_LAST_FREE_TID);
1659 if (tid < 0)
1660 return tid;
1661
1662 memset(&pe, 0, sizeof(pe));
1663 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1664 pe.index = tid;
1665
1666 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1667
1668 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1669 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1670 MVPP2_PRS_RI_L3_PROTO_MASK);
1671 /* Skip eth_type + 4 bytes of IPv6 header */
1672 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1673 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1674 /* Set L3 offset */
1675 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1676 MVPP2_ETH_TYPE_LEN,
1677 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1678
1679 /* Update shadow table and hw entry */
1680 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1681 mvpp2_prs_hw_write(priv, &pe);
1682
1683 /* Non-IP over PPPoE */
1684 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1685 MVPP2_PE_LAST_FREE_TID);
1686 if (tid < 0)
1687 return tid;
1688
1689 memset(&pe, 0, sizeof(pe));
1690 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1691 pe.index = tid;
1692
1693 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1694 MVPP2_PRS_RI_L3_PROTO_MASK);
1695
1696 /* Finished: go to flowid generation */
1697 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1698 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1699 /* Set L3 offset even if it's unknown L3 */
1700 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1701 MVPP2_ETH_TYPE_LEN,
1702 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1703
1704 /* Update shadow table and hw entry */
1705 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1706 mvpp2_prs_hw_write(priv, &pe);
1707
1708 return 0;
1709}
1710
1711/* Initialize entries for IPv4 */
1712static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1713{
1714 struct mvpp2_prs_entry pe;
1715 int err;
1716
1717 /* Set entries for TCP, UDP and IGMP over IPv4 */
1718 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1719 MVPP2_PRS_RI_L4_PROTO_MASK);
1720 if (err)
1721 return err;
1722
1723 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1724 MVPP2_PRS_RI_L4_PROTO_MASK);
1725 if (err)
1726 return err;
1727
1728 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1729 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1730 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1731 MVPP2_PRS_RI_CPU_CODE_MASK |
1732 MVPP2_PRS_RI_UDF3_MASK);
1733 if (err)
1734 return err;
1735
1736 /* IPv4 Broadcast */
1737 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1738 if (err)
1739 return err;
1740
1741 /* IPv4 Multicast */
1742 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1743 if (err)
1744 return err;
1745
1746 /* Default IPv4 entry for unknown protocols */
1747 memset(&pe, 0, sizeof(pe));
1748 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1749 pe.index = MVPP2_PE_IP4_PROTO_UN;
1750
1751 /* Set next lu to IPv4 */
1752 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1753 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1754 /* Set L4 offset */
1755 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1756 sizeof(struct iphdr) - 4,
1757 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1758 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1759 MVPP2_PRS_IPV4_DIP_AI_BIT);
1760 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1761 MVPP2_PRS_RI_L4_PROTO_MASK);
1762
1763 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1764 /* Unmask all ports */
1765 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1766
1767 /* Update shadow table and hw entry */
1768 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1769 mvpp2_prs_hw_write(priv, &pe);
1770
1771 /* Default IPv4 entry for unicast address */
1772 memset(&pe, 0, sizeof(pe));
1773 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1774 pe.index = MVPP2_PE_IP4_ADDR_UN;
1775
1776 /* Finished: go to flowid generation */
1777 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1778 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1779 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1780 MVPP2_PRS_RI_L3_ADDR_MASK);
1781
1782 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1783 MVPP2_PRS_IPV4_DIP_AI_BIT);
1784 /* Unmask all ports */
1785 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1786
1787 /* Update shadow table and hw entry */
1788 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1789 mvpp2_prs_hw_write(priv, &pe);
1790
1791 return 0;
1792}
1793
1794/* Initialize entries for IPv6 */
1795static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1796{
1797 struct mvpp2_prs_entry pe;
1798 int tid, err;
1799
1800 /* Set entries for TCP, UDP and ICMP over IPv6 */
1801 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1802 MVPP2_PRS_RI_L4_TCP,
1803 MVPP2_PRS_RI_L4_PROTO_MASK);
1804 if (err)
1805 return err;
1806
1807 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1808 MVPP2_PRS_RI_L4_UDP,
1809 MVPP2_PRS_RI_L4_PROTO_MASK);
1810 if (err)
1811 return err;
1812
1813 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1814 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1815 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1816 MVPP2_PRS_RI_CPU_CODE_MASK |
1817 MVPP2_PRS_RI_UDF3_MASK);
1818 if (err)
1819 return err;
1820
1821 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1822 /* Result Info: UDF7=1, DS lite */
1823 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1824 MVPP2_PRS_RI_UDF7_IP6_LITE,
1825 MVPP2_PRS_RI_UDF7_MASK);
1826 if (err)
1827 return err;
1828
1829 /* IPv6 multicast */
1830 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1831 if (err)
1832 return err;
1833
1834 /* Entry for checking hop limit */
1835 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1836 MVPP2_PE_LAST_FREE_TID);
1837 if (tid < 0)
1838 return tid;
1839
1840 memset(&pe, 0, sizeof(pe));
1841 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1842 pe.index = tid;
1843
1844 /* Finished: go to flowid generation */
1845 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1846 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1847 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1848 MVPP2_PRS_RI_DROP_MASK,
1849 MVPP2_PRS_RI_L3_PROTO_MASK |
1850 MVPP2_PRS_RI_DROP_MASK);
1851
1852 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1853 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1854 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1855
1856 /* Update shadow table and hw entry */
1857 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1858 mvpp2_prs_hw_write(priv, &pe);
1859
1860 /* Default IPv6 entry for unknown protocols */
1861 memset(&pe, 0, sizeof(pe));
1862 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1863 pe.index = MVPP2_PE_IP6_PROTO_UN;
1864
1865 /* Finished: go to flowid generation */
1866 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1867 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1868 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1869 MVPP2_PRS_RI_L4_PROTO_MASK);
1870 /* Set L4 offset relatively to our current place */
1871 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1872 sizeof(struct ipv6hdr) - 4,
1873 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1874
1875 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1876 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1877 /* Unmask all ports */
1878 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1879
1880 /* Update shadow table and hw entry */
1881 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1882 mvpp2_prs_hw_write(priv, &pe);
1883
1884 /* Default IPv6 entry for unknown ext protocols */
1885 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1886 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1887 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1888
1889 /* Finished: go to flowid generation */
1890 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1891 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1892 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1893 MVPP2_PRS_RI_L4_PROTO_MASK);
1894
1895 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1896 MVPP2_PRS_IPV6_EXT_AI_BIT);
1897 /* Unmask all ports */
1898 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1899
1900 /* Update shadow table and hw entry */
1901 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1902 mvpp2_prs_hw_write(priv, &pe);
1903
1904 /* Default IPv6 entry for unicast address */
1905 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1906 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1907 pe.index = MVPP2_PE_IP6_ADDR_UN;
1908
1909 /* Finished: go to IPv6 again */
1910 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1911 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1912 MVPP2_PRS_RI_L3_ADDR_MASK);
1913 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1914 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1915 /* Shift back to IPV6 NH */
1916 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1917
1918 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1919 /* Unmask all ports */
1920 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1921
1922 /* Update shadow table and hw entry */
1923 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1924 mvpp2_prs_hw_write(priv, &pe);
1925
1926 return 0;
1927}
1928
1929/* Find tcam entry with matched pair <vid,port> */
1930static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
1931 u16 mask)
1932{
1933 unsigned char byte[2], enable[2];
1934 struct mvpp2_prs_entry pe;
1935 u16 rvid, rmask;
1936 int tid;
1937
1938 /* Go through the all entries with MVPP2_PRS_LU_VID */
1939 for (tid = MVPP2_PE_VID_FILT_RANGE_START;
1940 tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
1941 if (!priv->prs_shadow[tid].valid ||
1942 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1943 continue;
1944
1945 mvpp2_prs_init_from_hw(priv, &pe, tid);
1946
1947 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1948 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1949
1950 rvid = ((byte[0] & 0xf) << 8) + byte[1];
1951 rmask = ((enable[0] & 0xf) << 8) + enable[1];
1952
1953 if (rvid != vid || rmask != mask)
1954 continue;
1955
1956 return tid;
1957 }
1958
1959 return -ENOENT;
1960}
1961
1962/* Write parser entry for VID filtering */
1963int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1964{
1965 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1966 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1967 unsigned int mask = 0xfff, reg_val, shift;
1968 struct mvpp2 *priv = port->priv;
1969 struct mvpp2_prs_entry pe;
1970 int tid;
1971
1972 memset(&pe, 0, sizeof(pe));
1973
1974 /* Scan TCAM and see if entry with this <vid,port> already exist */
1975 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
1976
1977 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1978 if (reg_val & MVPP2_DSA_EXTENDED)
1979 shift = MVPP2_VLAN_TAG_EDSA_LEN;
1980 else
1981 shift = MVPP2_VLAN_TAG_LEN;
1982
1983 /* No such entry */
1984 if (tid < 0) {
1985
1986 /* Go through all entries from first to last in vlan range */
1987 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
1988 vid_start +
1989 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
1990
1991 /* There isn't room for a new VID filter */
1992 if (tid < 0)
1993 return tid;
1994
1995 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1996 pe.index = tid;
1997
1998 /* Mask all ports */
1999 mvpp2_prs_tcam_port_map_set(&pe, 0);
2000 } else {
2001 mvpp2_prs_init_from_hw(priv, &pe, tid);
2002 }
2003
2004 /* Enable the current port */
2005 mvpp2_prs_tcam_port_set(&pe, port->id, true);
2006
2007 /* Continue - set next lookup */
2008 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2009
2010 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2011 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2012
2013 /* Set match on VID */
2014 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
2015
2016 /* Clear all ai bits for next iteration */
2017 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2018
2019 /* Update shadow table */
2020 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2021 mvpp2_prs_hw_write(priv, &pe);
2022
2023 return 0;
2024}
2025
2026/* Write parser entry for VID filtering */
2027void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2028{
2029 struct mvpp2 *priv = port->priv;
2030 int tid;
2031
2032 /* Scan TCAM and see if entry with this <vid,port> already exist */
2033 tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
2034
2035 /* No such entry */
2036 if (tid < 0)
2037 return;
2038
2039 mvpp2_prs_hw_inv(priv, tid);
2040 priv->prs_shadow[tid].valid = false;
2041}
2042
2043/* Remove all existing VID filters on this port */
2044void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2045{
2046 struct mvpp2 *priv = port->priv;
2047 int tid;
2048
2049 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2050 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2051 if (priv->prs_shadow[tid].valid)
2052 mvpp2_prs_vid_entry_remove(port, tid);
2053 }
2054}
2055
2056/* Remove VID filering entry for this port */
2057void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2058{
2059 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2060 struct mvpp2 *priv = port->priv;
2061
2062 /* Invalidate the guard entry */
2063 mvpp2_prs_hw_inv(priv, tid);
2064
2065 priv->prs_shadow[tid].valid = false;
2066}
2067
2068/* Add guard entry that drops packets when no VID is matched on this port */
2069void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2070{
2071 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2072 struct mvpp2 *priv = port->priv;
2073 unsigned int reg_val, shift;
2074 struct mvpp2_prs_entry pe;
2075
2076 if (priv->prs_shadow[tid].valid)
2077 return;
2078
2079 memset(&pe, 0, sizeof(pe));
2080
2081 pe.index = tid;
2082
2083 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2084 if (reg_val & MVPP2_DSA_EXTENDED)
2085 shift = MVPP2_VLAN_TAG_EDSA_LEN;
2086 else
2087 shift = MVPP2_VLAN_TAG_LEN;
2088
2089 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2090
2091 /* Mask all ports */
2092 mvpp2_prs_tcam_port_map_set(&pe, 0);
2093
2094 /* Update port mask */
2095 mvpp2_prs_tcam_port_set(&pe, port->id, true);
2096
2097 /* Continue - set next lookup */
2098 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2099
2100 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2101 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2102
2103 /* Drop VLAN packets that don't belong to any VIDs on this port */
2104 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2105 MVPP2_PRS_RI_DROP_MASK);
2106
2107 /* Clear all ai bits for next iteration */
2108 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2109
2110 /* Update shadow table */
2111 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2112 mvpp2_prs_hw_write(priv, &pe);
2113}
2114
2115/* Parser default initialization */
2116int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2117{
2118 int err, index, i;
2119
2120 /* Enable tcam table */
2121 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2122
2123 /* Clear all tcam and sram entries */
2124 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2125 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2126 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2127 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2128
2129 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2130 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2131 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2132 }
2133
2134 /* Invalidate all tcam entries */
2135 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2136 mvpp2_prs_hw_inv(priv, index);
2137
2138 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2139 sizeof(*priv->prs_shadow),
2140 GFP_KERNEL);
2141 if (!priv->prs_shadow)
2142 return -ENOMEM;
2143
2144 /* Always start from lookup = 0 */
2145 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2146 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2147 MVPP2_PRS_PORT_LU_MAX, 0);
2148
2149 mvpp2_prs_def_flow_init(priv);
2150
2151 mvpp2_prs_mh_init(priv);
2152
2153 mvpp2_prs_mac_init(priv);
2154
2155 mvpp2_prs_dsa_init(priv);
2156
2157 mvpp2_prs_vid_init(priv);
2158
2159 err = mvpp2_prs_etype_init(priv);
2160 if (err)
2161 return err;
2162
2163 err = mvpp2_prs_vlan_init(pdev, priv);
2164 if (err)
2165 return err;
2166
2167 err = mvpp2_prs_pppoe_init(priv);
2168 if (err)
2169 return err;
2170
2171 err = mvpp2_prs_ip6_init(priv);
2172 if (err)
2173 return err;
2174
2175 err = mvpp2_prs_ip4_init(priv);
2176 if (err)
2177 return err;
2178
2179 return 0;
2180}
2181
2182/* Compare MAC DA with tcam entry data */
2183static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2184 const u8 *da, unsigned char *mask)
2185{
2186 unsigned char tcam_byte, tcam_mask;
2187 int index;
2188
2189 for (index = 0; index < ETH_ALEN; index++) {
2190 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2191 if (tcam_mask != mask[index])
2192 return false;
2193
2194 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2195 return false;
2196 }
2197
2198 return true;
2199}
2200
2201/* Find tcam entry with matched pair <MAC DA, port> */
2202static int
2203mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2204 unsigned char *mask, int udf_type)
2205{
2206 struct mvpp2_prs_entry pe;
2207 int tid;
2208
2209 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2210 for (tid = MVPP2_PE_MAC_RANGE_START;
2211 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2212 unsigned int entry_pmap;
2213
2214 if (!priv->prs_shadow[tid].valid ||
2215 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2216 (priv->prs_shadow[tid].udf != udf_type))
2217 continue;
2218
2219 mvpp2_prs_init_from_hw(priv, &pe, tid);
2220 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2221
2222 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2223 entry_pmap == pmap)
2224 return tid;
2225 }
2226
2227 return -ENOENT;
2228}
2229
2230/* Update parser's mac da entry */
2231int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2232{
2233 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2234 struct mvpp2 *priv = port->priv;
2235 unsigned int pmap, len, ri;
2236 struct mvpp2_prs_entry pe;
2237 int tid;
2238
2239 memset(&pe, 0, sizeof(pe));
2240
2241 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2242 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2243 MVPP2_PRS_UDF_MAC_DEF);
2244
2245 /* No such entry */
2246 if (tid < 0) {
2247 if (!add)
2248 return 0;
2249
2250 /* Create new TCAM entry */
2251 /* Go through the all entries from first to last */
2252 tid = mvpp2_prs_tcam_first_free(priv,
2253 MVPP2_PE_MAC_RANGE_START,
2254 MVPP2_PE_MAC_RANGE_END);
2255 if (tid < 0)
2256 return tid;
2257
2258 pe.index = tid;
2259
2260 /* Mask all ports */
2261 mvpp2_prs_tcam_port_map_set(&pe, 0);
2262 } else {
2263 mvpp2_prs_init_from_hw(priv, &pe, tid);
2264 }
2265
2266 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2267
2268 /* Update port mask */
2269 mvpp2_prs_tcam_port_set(&pe, port->id, add);
2270
2271 /* Invalidate the entry if no ports are left enabled */
2272 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2273 if (pmap == 0) {
2274 if (add)
2275 return -EINVAL;
2276
2277 mvpp2_prs_hw_inv(priv, pe.index);
2278 priv->prs_shadow[pe.index].valid = false;
2279 return 0;
2280 }
2281
2282 /* Continue - set next lookup */
2283 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2284
2285 /* Set match on DA */
2286 len = ETH_ALEN;
2287 while (len--)
2288 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2289
2290 /* Set result info bits */
2291 if (is_broadcast_ether_addr(da)) {
2292 ri = MVPP2_PRS_RI_L2_BCAST;
2293 } else if (is_multicast_ether_addr(da)) {
2294 ri = MVPP2_PRS_RI_L2_MCAST;
2295 } else {
2296 ri = MVPP2_PRS_RI_L2_UCAST;
2297
2298 if (ether_addr_equal(da, port->dev->dev_addr))
2299 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2300 }
2301
2302 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2303 MVPP2_PRS_RI_MAC_ME_MASK);
2304 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2305 MVPP2_PRS_RI_MAC_ME_MASK);
2306
2307 /* Shift to ethertype */
2308 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2309 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2310
2311 /* Update shadow table and hw entry */
2312 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2313 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2314 mvpp2_prs_hw_write(priv, &pe);
2315
2316 return 0;
2317}
2318
2319int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2320{
2321 struct mvpp2_port *port = netdev_priv(dev);
2322 int err;
2323
2324 /* Remove old parser entry */
2325 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2326 if (err)
2327 return err;
2328
2329 /* Add new parser entry */
2330 err = mvpp2_prs_mac_da_accept(port, da, true);
2331 if (err)
2332 return err;
2333
2334 /* Set addr in the device */
2335 ether_addr_copy(dev->dev_addr, da);
2336
2337 return 0;
2338}
2339
2340void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2341{
2342 struct mvpp2 *priv = port->priv;
2343 struct mvpp2_prs_entry pe;
2344 unsigned long pmap;
2345 int index, tid;
2346
2347 for (tid = MVPP2_PE_MAC_RANGE_START;
2348 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2349 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2350
2351 if (!priv->prs_shadow[tid].valid ||
2352 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2353 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2354 continue;
2355
2356 mvpp2_prs_init_from_hw(priv, &pe, tid);
2357
2358 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2359
2360 /* We only want entries active on this port */
2361 if (!test_bit(port->id, &pmap))
2362 continue;
2363
2364 /* Read mac addr from entry */
2365 for (index = 0; index < ETH_ALEN; index++)
2366 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2367 &da_mask[index]);
2368
2369 /* Special cases : Don't remove broadcast and port's own
2370 * address
2371 */
2372 if (is_broadcast_ether_addr(da) ||
2373 ether_addr_equal(da, port->dev->dev_addr))
2374 continue;
2375
2376 /* Remove entry from TCAM */
2377 mvpp2_prs_mac_da_accept(port, da, false);
2378 }
2379}
2380
2381int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2382{
2383 switch (type) {
2384 case MVPP2_TAG_TYPE_EDSA:
2385 /* Add port to EDSA entries */
2386 mvpp2_prs_dsa_tag_set(priv, port, true,
2387 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2388 mvpp2_prs_dsa_tag_set(priv, port, true,
2389 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2390 /* Remove port from DSA entries */
2391 mvpp2_prs_dsa_tag_set(priv, port, false,
2392 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2393 mvpp2_prs_dsa_tag_set(priv, port, false,
2394 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2395 break;
2396
2397 case MVPP2_TAG_TYPE_DSA:
2398 /* Add port to DSA entries */
2399 mvpp2_prs_dsa_tag_set(priv, port, true,
2400 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2401 mvpp2_prs_dsa_tag_set(priv, port, true,
2402 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2403 /* Remove port from EDSA entries */
2404 mvpp2_prs_dsa_tag_set(priv, port, false,
2405 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2406 mvpp2_prs_dsa_tag_set(priv, port, false,
2407 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2408 break;
2409
2410 case MVPP2_TAG_TYPE_MH:
2411 case MVPP2_TAG_TYPE_NONE:
2412 /* Remove port form EDSA and DSA entries */
2413 mvpp2_prs_dsa_tag_set(priv, port, false,
2414 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2415 mvpp2_prs_dsa_tag_set(priv, port, false,
2416 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2417 mvpp2_prs_dsa_tag_set(priv, port, false,
2418 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2419 mvpp2_prs_dsa_tag_set(priv, port, false,
2420 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2421 break;
2422
2423 default:
2424 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2425 return -EINVAL;
2426 }
2427
2428 return 0;
2429}
2430
2431/* Set prs flow for the port */
2432int mvpp2_prs_def_flow(struct mvpp2_port *port)
2433{
2434 struct mvpp2_prs_entry pe;
2435 int tid;
2436
2437 memset(&pe, 0, sizeof(pe));
2438
2439 tid = mvpp2_prs_flow_find(port->priv, port->id);
2440
2441 /* Such entry not exist */
2442 if (tid < 0) {
2443 /* Go through the all entires from last to first */
2444 tid = mvpp2_prs_tcam_first_free(port->priv,
2445 MVPP2_PE_LAST_FREE_TID,
2446 MVPP2_PE_FIRST_FREE_TID);
2447 if (tid < 0)
2448 return tid;
2449
2450 pe.index = tid;
2451
2452 /* Set flow ID*/
2453 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2454 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2455
2456 /* Update shadow table */
2457 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2458 } else {
2459 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2460 }
2461
2462 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2463 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2464 mvpp2_prs_hw_write(port->priv, &pe);
2465
2466 return 0;
2467}