blob: c39b7a1887260ae52cd0fcc4d55408ff46a7eb9d [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/slab.h>
42#include <linux/device.h>
43#include <linux/skbuff.h>
44#include <linux/if_vlan.h>
45#include <linux/if_bridge.h>
46#include <linux/workqueue.h>
47#include <linux/jiffies.h>
48#include <net/switchdev.h>
49
50#include "spectrum.h"
51#include "core.h"
52#include "reg.h"
53
54static int mlxsw_sp_port_attr_get(struct net_device *dev,
55 struct switchdev_attr *attr)
56{
57 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
58 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
59
60 switch (attr->id) {
61 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
62 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
63 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
64 attr->u.ppid.id_len);
65 break;
66 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
67 attr->u.brport_flags =
68 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
69 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0);
70 break;
71 default:
72 return -EOPNOTSUPP;
73 }
74
75 return 0;
76}
77
78static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
79 u8 state)
80{
81 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
82 enum mlxsw_reg_spms_state spms_state;
83 char *spms_pl;
84 u16 vid;
85 int err;
86
87 switch (state) {
88 case BR_STATE_DISABLED: /* fall-through */
89 case BR_STATE_FORWARDING:
90 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
91 break;
92 case BR_STATE_LISTENING: /* fall-through */
93 case BR_STATE_LEARNING:
94 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
95 break;
96 case BR_STATE_BLOCKING:
97 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
98 break;
99 default:
100 BUG();
101 }
102
103 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
104 if (!spms_pl)
105 return -ENOMEM;
106 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
107 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
108 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
109
110 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
111 kfree(spms_pl);
112 return err;
113}
114
115static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
116 struct switchdev_trans *trans,
117 u8 state)
118{
119 if (switchdev_trans_ph_prepare(trans))
120 return 0;
121
122 mlxsw_sp_port->stp_state = state;
123 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
124}
125
126static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
127 struct switchdev_trans *trans,
128 unsigned long brport_flags)
129{
130 if (switchdev_trans_ph_prepare(trans))
131 return 0;
132
133 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
134 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
135 return 0;
136}
137
138static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
139{
140 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
141 int err;
142
143 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
144 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
145 if (err)
146 return err;
147 mlxsw_sp->ageing_time = ageing_time;
148 return 0;
149}
150
151static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
152 struct switchdev_trans *trans,
153 unsigned long ageing_jiffies)
154{
155 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
156 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
157
158 if (switchdev_trans_ph_prepare(trans))
159 return 0;
160
161 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
162}
163
164static int mlxsw_sp_port_attr_set(struct net_device *dev,
165 const struct switchdev_attr *attr,
166 struct switchdev_trans *trans)
167{
168 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
169 int err = 0;
170
171 switch (attr->id) {
172 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
173 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
174 attr->u.stp_state);
175 break;
176 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
177 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
178 attr->u.brport_flags);
179 break;
180 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
181 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
182 attr->u.ageing_time);
183 break;
184 default:
185 err = -EOPNOTSUPP;
186 break;
187 }
188
189 return err;
190}
191
192static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
193{
194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 char spvid_pl[MLXSW_REG_SPVID_LEN];
196
197 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
198 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
199}
200
201static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
202{
203 char sfmr_pl[MLXSW_REG_SFMR_LEN];
204 int err;
205
206 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
207 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
208
209 if (err)
210 return err;
211
212 set_bit(fid, mlxsw_sp->active_fids);
213 return 0;
214}
215
216static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
217{
218 char sfmr_pl[MLXSW_REG_SFMR_LEN];
219
220 clear_bit(fid, mlxsw_sp->active_fids);
221
222 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
223 fid, fid);
224 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
225}
226
227static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
228{
229 enum mlxsw_reg_svfa_mt mt;
230
231 if (mlxsw_sp_port->nr_vfids)
232 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
233 else
234 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
235
236 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
237}
238
239static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
240{
241 enum mlxsw_reg_svfa_mt mt;
242
243 if (!mlxsw_sp_port->nr_vfids)
244 return 0;
245
246 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
247 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
248}
249
250static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
251 u16 fid, bool set, bool only_uc)
252{
253 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
254 char *sftr_pl;
255 int err;
256
257 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
258 if (!sftr_pl)
259 return -ENOMEM;
260
261 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid,
262 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
263 mlxsw_sp_port->local_port, set);
264 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
265 if (err)
266 goto buffer_out;
267
268 /* Flooding control allows one to decide whether a given port will
269 * flood unicast traffic for which there is no FDB entry.
270 */
271 if (only_uc)
272 goto buffer_out;
273
274 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid,
275 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, 0,
276 mlxsw_sp_port->local_port, set);
277 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
278
279buffer_out:
280 kfree(sftr_pl);
281 return err;
282}
283
284static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
285 u16 vid_end)
286{
287 u16 vid;
288 int err;
289
290 for (vid = vid_begin; vid <= vid_end; vid++) {
291 err = mlxsw_sp_port_add_vid(dev, 0, vid);
292 if (err)
293 goto err_port_add_vid;
294 }
295 return 0;
296
297err_port_add_vid:
298 for (vid--; vid >= vid_begin; vid--)
299 mlxsw_sp_port_kill_vid(dev, 0, vid);
300 return err;
301}
302
303static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
304 u16 vid_begin, u16 vid_end,
305 bool flag_untagged, bool flag_pvid)
306{
307 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
308 struct net_device *dev = mlxsw_sp_port->dev;
309 enum mlxsw_reg_svfa_mt mt;
310 u16 vid, vid_e;
311 int err;
312
313 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
314 * not bridged, then packets ingressing through the port with
315 * the specified VIDs will be directed to CPU.
316 */
317 if (!mlxsw_sp_port->bridged)
318 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
319
320 for (vid = vid_begin; vid <= vid_end; vid++) {
321 if (!test_bit(vid, mlxsw_sp->active_fids)) {
322 err = mlxsw_sp_fid_create(mlxsw_sp, vid);
323 if (err) {
324 netdev_err(dev, "Failed to create FID=%d\n",
325 vid);
326 return err;
327 }
328
329 /* When creating a FID, we set a VID to FID mapping
330 * regardless of the port's mode.
331 */
332 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
333 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
334 true, vid, vid);
335 if (err) {
336 netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
337 vid);
338 return err;
339 }
340 }
341
342 /* Set FID mapping according to port's mode */
343 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
344 if (err) {
345 netdev_err(dev, "Failed to map FID=%d", vid);
346 return err;
347 }
348
349 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, true,
350 false);
351 if (err) {
352 netdev_err(dev, "Failed to set flooding for FID=%d",
353 vid);
354 return err;
355 }
356 }
357
358 for (vid = vid_begin; vid <= vid_end;
359 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
360 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
361 vid_end);
362
363 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true,
364 flag_untagged);
365 if (err) {
366 netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n",
367 vid, vid_e);
368 return err;
369 }
370 }
371
372 vid = vid_begin;
373 if (flag_pvid && mlxsw_sp_port->pvid != vid) {
374 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
375 if (err) {
376 netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
377 vid);
378 return err;
379 }
380 mlxsw_sp_port->pvid = vid;
381 }
382
383 /* Changing activity bits only if HW operation succeded */
384 for (vid = vid_begin; vid <= vid_end; vid++)
385 set_bit(vid, mlxsw_sp_port->active_vlans);
386
387 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
388 mlxsw_sp_port->stp_state);
389}
390
391static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
392 const struct switchdev_obj_port_vlan *vlan,
393 struct switchdev_trans *trans)
394{
395 bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
396 bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
397
398 if (switchdev_trans_ph_prepare(trans))
399 return 0;
400
401 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
402 vlan->vid_begin, vlan->vid_end,
403 untagged_flag, pvid_flag);
404}
405
406static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
407 const char *mac, u16 vid, bool adding,
408 bool dynamic)
409{
410 enum mlxsw_reg_sfd_rec_policy policy;
411 enum mlxsw_reg_sfd_op op;
412 char *sfd_pl;
413 int err;
414
415 if (!vid)
416 vid = mlxsw_sp_port->pvid;
417
418 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
419 if (!sfd_pl)
420 return -ENOMEM;
421
422 policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
423 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
424 op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
425 MLXSW_REG_SFD_OP_WRITE_REMOVE;
426 mlxsw_reg_sfd_pack(sfd_pl, op, 0);
427 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
428 mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
429 mlxsw_sp_port->local_port);
430 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
431 sfd_pl);
432 kfree(sfd_pl);
433
434 return err;
435}
436
437static int
438mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
439 const struct switchdev_obj_port_fdb *fdb,
440 struct switchdev_trans *trans)
441{
442 if (switchdev_trans_ph_prepare(trans))
443 return 0;
444
445 return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
446 true, false);
447}
448
449static int mlxsw_sp_port_obj_add(struct net_device *dev,
450 const struct switchdev_obj *obj,
451 struct switchdev_trans *trans)
452{
453 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
454 int err = 0;
455
456 switch (obj->id) {
457 case SWITCHDEV_OBJ_ID_PORT_VLAN:
458 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
459 SWITCHDEV_OBJ_PORT_VLAN(obj),
460 trans);
461 break;
462 case SWITCHDEV_OBJ_ID_PORT_FDB:
463 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
464 SWITCHDEV_OBJ_PORT_FDB(obj),
465 trans);
466 break;
467 default:
468 err = -EOPNOTSUPP;
469 break;
470 }
471
472 return err;
473}
474
475static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
476 u16 vid_end)
477{
478 u16 vid;
479 int err;
480
481 for (vid = vid_begin; vid <= vid_end; vid++) {
482 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
483 if (err)
484 return err;
485 }
486
487 return 0;
488}
489
490static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
491 u16 vid_begin, u16 vid_end, bool init)
492{
493 struct net_device *dev = mlxsw_sp_port->dev;
494 u16 vid, vid_e;
495 int err;
496
497 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
498 * not bridged, then prevent packets ingressing through the
499 * port with the specified VIDs from being trapped to CPU.
500 */
501 if (!init && !mlxsw_sp_port->bridged)
502 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
503
504 for (vid = vid_begin; vid <= vid_end;
505 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
506 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
507 vid_end);
508 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false,
509 false);
510 if (err) {
511 netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n",
512 vid, vid_e);
513 return err;
514 }
515 }
516
517 if ((mlxsw_sp_port->pvid >= vid_begin) &&
518 (mlxsw_sp_port->pvid <= vid_end)) {
519 /* Default VLAN is always 1 */
520 mlxsw_sp_port->pvid = 1;
521 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port,
522 mlxsw_sp_port->pvid);
523 if (err) {
524 netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n",
525 vid);
526 return err;
527 }
528 }
529
530 if (init)
531 goto out;
532
533 for (vid = vid_begin; vid <= vid_end; vid++) {
534 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, false,
535 false);
536 if (err) {
537 netdev_err(dev, "Failed to clear flooding for FID=%d",
538 vid);
539 return err;
540 }
541
542 /* Remove FID mapping in case of Virtual mode */
543 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
544 if (err) {
545 netdev_err(dev, "Failed to unmap FID=%d", vid);
546 return err;
547 }
548 }
549
550out:
551 /* Changing activity bits only if HW operation succeded */
552 for (vid = vid_begin; vid <= vid_end; vid++)
553 clear_bit(vid, mlxsw_sp_port->active_vlans);
554
555 return 0;
556}
557
558static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
559 const struct switchdev_obj_port_vlan *vlan)
560{
561 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
562 vlan->vid_begin, vlan->vid_end, false);
563}
564
565static int
566mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
567 const struct switchdev_obj_port_fdb *fdb)
568{
569 return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
570 false, false);
571}
572
573static int mlxsw_sp_port_obj_del(struct net_device *dev,
574 const struct switchdev_obj *obj)
575{
576 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
577 int err = 0;
578
579 switch (obj->id) {
580 case SWITCHDEV_OBJ_ID_PORT_VLAN:
581 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
582 SWITCHDEV_OBJ_PORT_VLAN(obj));
583 break;
584 case SWITCHDEV_OBJ_ID_PORT_FDB:
585 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
586 SWITCHDEV_OBJ_PORT_FDB(obj));
587 break;
588 default:
589 err = -EOPNOTSUPP;
590 break;
591 }
592
593 return err;
594}
595
596static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
597 struct switchdev_obj_port_fdb *fdb,
598 switchdev_obj_dump_cb_t *cb)
599{
600 char *sfd_pl;
601 char mac[ETH_ALEN];
602 u16 vid;
603 u8 local_port;
604 u8 num_rec;
605 int stored_err = 0;
606 int i;
607 int err;
608
609 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
610 if (!sfd_pl)
611 return -ENOMEM;
612
613 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
614 do {
615 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
616 err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
617 MLXSW_REG(sfd), sfd_pl);
618 if (err)
619 goto out;
620
621 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
622
623 /* Even in case of error, we have to run the dump to the end
624 * so the session in firmware is finished.
625 */
626 if (stored_err)
627 continue;
628
629 for (i = 0; i < num_rec; i++) {
630 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
631 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
632 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
633 &local_port);
634 if (local_port == mlxsw_sp_port->local_port) {
635 ether_addr_copy(fdb->addr, mac);
636 fdb->ndm_state = NUD_REACHABLE;
637 fdb->vid = vid;
638 err = cb(&fdb->obj);
639 if (err)
640 stored_err = err;
641 }
642 }
643 }
644 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
645
646out:
647 kfree(sfd_pl);
648 return stored_err ? stored_err : err;
649}
650
651static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
652 struct switchdev_obj_port_vlan *vlan,
653 switchdev_obj_dump_cb_t *cb)
654{
655 u16 vid;
656 int err = 0;
657
658 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
659 vlan->flags = 0;
660 if (vid == mlxsw_sp_port->pvid)
661 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
662 vlan->vid_begin = vid;
663 vlan->vid_end = vid;
664 err = cb(&vlan->obj);
665 if (err)
666 break;
667 }
668 return err;
669}
670
671static int mlxsw_sp_port_obj_dump(struct net_device *dev,
672 struct switchdev_obj *obj,
673 switchdev_obj_dump_cb_t *cb)
674{
675 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
676 int err = 0;
677
678 switch (obj->id) {
679 case SWITCHDEV_OBJ_ID_PORT_VLAN:
680 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
681 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
682 break;
683 case SWITCHDEV_OBJ_ID_PORT_FDB:
684 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
685 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
686 break;
687 default:
688 err = -EOPNOTSUPP;
689 break;
690 }
691
692 return err;
693}
694
695const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
696 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
697 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
698 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
699 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
700 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
701};
702
703static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
704 char *sfn_pl, int rec_index,
705 bool adding)
706{
707 struct mlxsw_sp_port *mlxsw_sp_port;
708 char mac[ETH_ALEN];
709 u8 local_port;
710 u16 vid;
711 int err;
712
713 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
714 mlxsw_sp_port = mlxsw_sp->ports[local_port];
715 if (!mlxsw_sp_port) {
716 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
717 return;
718 }
719
720 err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
721 adding && mlxsw_sp_port->learning, true);
722 if (err) {
723 if (net_ratelimit())
724 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
725 return;
726 }
727
728 if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
729 struct switchdev_notifier_fdb_info info;
730 unsigned long notifier_type;
731
732 info.addr = mac;
733 info.vid = vid;
734 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
735 call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
736 &info.info);
737 }
738}
739
740static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
741 char *sfn_pl, int rec_index)
742{
743 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
744 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
745 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
746 rec_index, true);
747 break;
748 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
749 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
750 rec_index, false);
751 break;
752 }
753}
754
755static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
756{
757 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
758 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
759}
760
761static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
762{
763 struct mlxsw_sp *mlxsw_sp;
764 char *sfn_pl;
765 u8 num_rec;
766 int i;
767 int err;
768
769 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
770 if (!sfn_pl)
771 return;
772
773 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
774
775 do {
776 mlxsw_reg_sfn_pack(sfn_pl);
777 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
778 if (err) {
779 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
780 break;
781 }
782 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
783 for (i = 0; i < num_rec; i++)
784 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
785
786 } while (num_rec);
787
788 kfree(sfn_pl);
789 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
790}
791
792static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
793{
794 int err;
795
796 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
797 if (err) {
798 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
799 return err;
800 }
801 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
802 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
803 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
804 return 0;
805}
806
807static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
808{
809 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
810}
811
812static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
813{
814 u16 fid;
815
816 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
817 mlxsw_sp_fid_destroy(mlxsw_sp, fid);
818}
819
820int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
821{
822 return mlxsw_sp_fdb_init(mlxsw_sp);
823}
824
825void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
826{
827 mlxsw_sp_fdb_fini(mlxsw_sp);
828 mlxsw_sp_fids_fini(mlxsw_sp);
829}
830
831int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
832{
833 struct net_device *dev = mlxsw_sp_port->dev;
834 int err;
835
836 /* Allow only untagged packets to ingress and tag them internally
837 * with VID 1.
838 */
839 mlxsw_sp_port->pvid = 1;
840 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
841 if (err) {
842 netdev_err(dev, "Unable to init VLANs\n");
843 return err;
844 }
845
846 /* Add implicit VLAN interface in the device, so that untagged
847 * packets will be classified to the default vFID.
848 */
849 err = mlxsw_sp_port_add_vid(dev, 0, 1);
850 if (err)
851 netdev_err(dev, "Failed to configure default vFID\n");
852
853 return err;
854}
855
856void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
857{
858 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
859}
860
861void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
862{
863}