blob: d44d92fe7ff31e234184310a7814f6b256e9d571 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/slab.h>
42#include <linux/device.h>
43#include <linux/skbuff.h>
44#include <linux/if_vlan.h>
45#include <linux/if_bridge.h>
46#include <linux/workqueue.h>
47#include <linux/jiffies.h>
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +010048#include <linux/rtnetlink.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020049#include <net/switchdev.h>
50
51#include "spectrum.h"
52#include "core.h"
53#include "reg.h"
54
Elad Raze4b6f692016-01-10 21:06:27 +010055static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
56 u16 vid)
57{
Ido Schimmel56918b62016-06-20 23:04:18 +020058 struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
Elad Raze4b6f692016-01-10 21:06:27 +010059 u16 fid = vid;
60
Ido Schimmel56918b62016-06-20 23:04:18 +020061 fid = f ? f->fid : fid;
Elad Raze4b6f692016-01-10 21:06:27 +010062
63 if (!fid)
64 fid = mlxsw_sp_port->pvid;
65
66 return fid;
67}
68
Ido Schimmel54a73202015-12-15 16:03:41 +010069static struct mlxsw_sp_port *
70mlxsw_sp_port_orig_get(struct net_device *dev,
71 struct mlxsw_sp_port *mlxsw_sp_port)
72{
73 struct mlxsw_sp_port *mlxsw_sp_vport;
Nogah Frankel1e5d9432017-02-09 14:54:48 +010074 struct mlxsw_sp_fid *fid;
Ido Schimmel54a73202015-12-15 16:03:41 +010075 u16 vid;
76
Nogah Frankel1e5d9432017-02-09 14:54:48 +010077 if (netif_is_bridge_master(dev)) {
78 fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
79 dev);
80 if (fid) {
81 mlxsw_sp_vport =
82 mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
83 fid->fid);
84 WARN_ON(!mlxsw_sp_vport);
85 return mlxsw_sp_vport;
86 }
87 }
88
Ido Schimmel54a73202015-12-15 16:03:41 +010089 if (!is_vlan_dev(dev))
90 return mlxsw_sp_port;
91
92 vid = vlan_dev_vlan_id(dev);
93 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
94 WARN_ON(!mlxsw_sp_vport);
95
96 return mlxsw_sp_vport;
97}
98
Jiri Pirko56ade8f2015-10-16 14:01:37 +020099static int mlxsw_sp_port_attr_get(struct net_device *dev,
100 struct switchdev_attr *attr)
101{
102 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
103 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
104
Ido Schimmel54a73202015-12-15 16:03:41 +0100105 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
106 if (!mlxsw_sp_port)
107 return -EINVAL;
108
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200109 switch (attr->id) {
110 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
111 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
112 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
113 attr->u.ppid.id_len);
114 break;
115 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
116 attr->u.brport_flags =
117 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
Ido Schimmel02930382015-10-28 10:16:58 +0100118 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
119 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200120 break;
121 default:
122 return -EOPNOTSUPP;
123 }
124
125 return 0;
126}
127
128static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
129 u8 state)
130{
131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
132 enum mlxsw_reg_spms_state spms_state;
133 char *spms_pl;
134 u16 vid;
135 int err;
136
137 switch (state) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200138 case BR_STATE_FORWARDING:
139 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
140 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200141 case BR_STATE_LEARNING:
142 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
143 break;
Ido Schimmel45491132016-01-27 15:20:20 +0100144 case BR_STATE_LISTENING: /* fall-through */
Ido Schimmel9cb026e2016-01-27 15:20:19 +0100145 case BR_STATE_DISABLED: /* fall-through */
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200146 case BR_STATE_BLOCKING:
147 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
148 break;
149 default:
150 BUG();
151 }
152
153 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
154 if (!spms_pl)
155 return -ENOMEM;
156 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
Ido Schimmel54a73202015-12-15 16:03:41 +0100157
158 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
159 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200160 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
Ido Schimmel54a73202015-12-15 16:03:41 +0100161 } else {
162 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
163 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
164 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200165
166 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
167 kfree(spms_pl);
168 return err;
169}
170
171static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
172 struct switchdev_trans *trans,
173 u8 state)
174{
175 if (switchdev_trans_ph_prepare(trans))
176 return 0;
177
178 mlxsw_sp_port->stp_state = state;
179 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
180}
181
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100182static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
183 u16 idx_begin, u16 idx_end,
184 enum mlxsw_sp_flood_table table,
185 bool set)
Ido Schimmel02930382015-10-28 10:16:58 +0100186{
187 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100188 u16 local_port = mlxsw_sp_port->local_port;
189 enum mlxsw_flood_table_type table_type;
Ido Schimmelc06a94e2015-12-15 16:03:38 +0100190 u16 range = idx_end - idx_begin + 1;
Ido Schimmel02930382015-10-28 10:16:58 +0100191 char *sftr_pl;
192 int err;
193
Ido Schimmel99724c12016-07-04 08:23:14 +0200194 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100195 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
Ido Schimmel99724c12016-07-04 08:23:14 +0200196 else
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100197 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100198
Ido Schimmel02930382015-10-28 10:16:58 +0100199 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
200 if (!sftr_pl)
201 return -ENOMEM;
202
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100203 mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
204 table_type, range, local_port, set);
Ido Schimmel02930382015-10-28 10:16:58 +0100205 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
Ido Schimmel02930382015-10-28 10:16:58 +0100206
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100207 kfree(sftr_pl);
208 return err;
209}
210
211static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
212 u16 idx_begin, u16 idx_end, bool uc_set,
Nogah Frankel71c365b2017-02-09 14:54:46 +0100213 bool bc_set, bool mc_set)
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100214{
215 int err;
216
217 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
218 MLXSW_SP_FLOOD_TABLE_UC, uc_set);
219 if (err)
220 return err;
221
222 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
Nogah Frankel71c365b2017-02-09 14:54:46 +0100223 MLXSW_SP_FLOOD_TABLE_BC, bc_set);
Ido Schimmel28892862016-05-06 22:18:40 +0200224 if (err)
225 goto err_flood_bm_set;
Ido Schimmelaad8b6b2016-09-01 10:37:45 +0200226
Nogah Frankel71c365b2017-02-09 14:54:46 +0100227 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
228 MLXSW_SP_FLOOD_TABLE_MC, mc_set);
229 if (err)
230 goto err_flood_mc_set;
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100231 return 0;
Ido Schimmel02930382015-10-28 10:16:58 +0100232
Nogah Frankel71c365b2017-02-09 14:54:46 +0100233err_flood_mc_set:
234 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
235 MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
Ido Schimmel28892862016-05-06 22:18:40 +0200236err_flood_bm_set:
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100237 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
238 MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
Ido Schimmel02930382015-10-28 10:16:58 +0100239 return err;
240}
241
Nogah Frankel69be01f2017-02-09 14:54:44 +0100242static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
243 enum mlxsw_sp_flood_table table,
244 bool set)
Ido Schimmel02930382015-10-28 10:16:58 +0100245{
246 struct net_device *dev = mlxsw_sp_port->dev;
247 u16 vid, last_visited_vid;
248 int err;
249
Ido Schimmel54a73202015-12-15 16:03:41 +0100250 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
Ido Schimmel41b996c2016-06-20 23:04:17 +0200251 u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
252 u16 vfid = mlxsw_sp_fid_to_vfid(fid);
Ido Schimmel54a73202015-12-15 16:03:41 +0100253
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100254 return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
Nogah Frankel69be01f2017-02-09 14:54:44 +0100255 vfid, table, set);
Ido Schimmel54a73202015-12-15 16:03:41 +0100256 }
257
Ido Schimmel02930382015-10-28 10:16:58 +0100258 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
Nogah Frankeleaa7df32017-02-09 14:54:43 +0100259 err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
Nogah Frankel69be01f2017-02-09 14:54:44 +0100260 table, set);
Ido Schimmel02930382015-10-28 10:16:58 +0100261 if (err) {
262 last_visited_vid = vid;
263 goto err_port_flood_set;
264 }
265 }
266
267 return 0;
268
269err_port_flood_set:
270 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
Nogah Frankel69be01f2017-02-09 14:54:44 +0100271 __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
272 !set);
Ido Schimmel02930382015-10-28 10:16:58 +0100273 netdev_err(dev, "Failed to configure unicast flooding\n");
274 return err;
275}
276
Nogah Frankel90e0f0c2017-02-09 14:54:49 +0100277static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
278 struct switchdev_trans *trans,
279 bool mc_disabled)
280{
281 int set;
282 int err = 0;
283
284 if (switchdev_trans_ph_prepare(trans))
285 return 0;
286
287 if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
288 set = mc_disabled ?
289 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
290 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
291 MLXSW_SP_FLOOD_TABLE_MC,
292 set);
293 }
294
295 if (!err)
296 mlxsw_sp_port->mc_disabled = mc_disabled;
297
298 return err;
299}
300
Ido Schimmele6060022016-06-20 23:04:11 +0200301int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
Ido Schimmel47a0a9e2016-06-20 23:04:08 +0200302 bool set)
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100303{
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100304 bool mc_set = set;
Ido Schimmele6060022016-06-20 23:04:11 +0200305 u16 vfid;
306
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100307 /* In case of vFIDs, index into the flooding table is relative to
308 * the start of the vFIDs range.
309 */
Ido Schimmele6060022016-06-20 23:04:11 +0200310 vfid = mlxsw_sp_fid_to_vfid(fid);
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100311
312 if (set)
313 mc_set = mlxsw_sp_vport->mc_disabled ?
314 mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
315
Nogah Frankel71c365b2017-02-09 14:54:46 +0100316 return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100317 mc_set);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100318}
319
Ido Schimmel89b548f2016-08-24 12:00:27 +0200320static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
321 bool set)
322{
323 u16 vid;
324 int err;
325
326 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
327 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
328
329 return __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
330 set);
331 }
332
333 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
334 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid,
335 set);
336 if (err)
337 goto err_port_vid_learning_set;
338 }
339
340 return 0;
341
342err_port_vid_learning_set:
343 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
344 __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, vid, !set);
345 return err;
346}
347
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200348static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
349 struct switchdev_trans *trans,
350 unsigned long brport_flags)
351{
Ido Schimmel89b548f2016-08-24 12:00:27 +0200352 unsigned long learning = mlxsw_sp_port->learning ? BR_LEARNING : 0;
Ido Schimmel02930382015-10-28 10:16:58 +0100353 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
Ido Schimmel02930382015-10-28 10:16:58 +0100354 int err;
355
Ido Schimmel6c72a3d2016-01-04 10:42:26 +0100356 if (!mlxsw_sp_port->bridged)
357 return -EINVAL;
358
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200359 if (switchdev_trans_ph_prepare(trans))
360 return 0;
361
Ido Schimmel02930382015-10-28 10:16:58 +0100362 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
Nogah Frankel69be01f2017-02-09 14:54:44 +0100363 err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
364 MLXSW_SP_FLOOD_TABLE_UC,
365 !mlxsw_sp_port->uc_flood);
Ido Schimmel02930382015-10-28 10:16:58 +0100366 if (err)
367 return err;
368 }
369
Ido Schimmel89b548f2016-08-24 12:00:27 +0200370 if ((learning ^ brport_flags) & BR_LEARNING) {
371 err = mlxsw_sp_port_learning_set(mlxsw_sp_port,
372 !mlxsw_sp_port->learning);
373 if (err)
374 goto err_port_learning_set;
375 }
376
Ido Schimmel02930382015-10-28 10:16:58 +0100377 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200378 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
379 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
Ido Schimmel02930382015-10-28 10:16:58 +0100380
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200381 return 0;
Ido Schimmel89b548f2016-08-24 12:00:27 +0200382
383err_port_learning_set:
384 if ((uc_flood ^ brport_flags) & BR_FLOOD)
Nogah Frankel69be01f2017-02-09 14:54:44 +0100385 mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
386 MLXSW_SP_FLOOD_TABLE_UC,
387 mlxsw_sp_port->uc_flood);
Ido Schimmel89b548f2016-08-24 12:00:27 +0200388 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200389}
390
391static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
392{
393 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
394 int err;
395
396 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
397 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
398 if (err)
399 return err;
400 mlxsw_sp->ageing_time = ageing_time;
401 return 0;
402}
403
404static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
405 struct switchdev_trans *trans,
Jiri Pirko135f9ec2015-10-28 10:17:02 +0100406 unsigned long ageing_clock_t)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200407{
408 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko135f9ec2015-10-28 10:17:02 +0100409 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200410 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
411
Ido Schimmel869f63a2016-03-08 12:59:33 -0800412 if (switchdev_trans_ph_prepare(trans)) {
413 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
414 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
415 return -ERANGE;
416 else
417 return 0;
418 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200419
420 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
421}
422
Elad Raz26a4ea02016-01-06 13:01:10 +0100423static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
424 struct switchdev_trans *trans,
425 struct net_device *orig_dev,
426 bool vlan_enabled)
427{
428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
429
430 /* SWITCHDEV_TRANS_PREPARE phase */
431 if ((!vlan_enabled) && (mlxsw_sp->master_bridge.dev == orig_dev)) {
432 netdev_err(mlxsw_sp_port->dev, "Bridge must be vlan-aware\n");
433 return -EINVAL;
434 }
435
436 return 0;
437}
438
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100439static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
440 struct switchdev_trans *trans,
441 bool is_port_mc_router)
442{
443 if (switchdev_trans_ph_prepare(trans))
444 return 0;
445
446 mlxsw_sp_port->mc_router = is_port_mc_router;
447 if (!mlxsw_sp_port->mc_disabled)
448 return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
449 MLXSW_SP_FLOOD_TABLE_MC,
450 is_port_mc_router);
451
452 return 0;
453}
454
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200455static int mlxsw_sp_port_attr_set(struct net_device *dev,
456 const struct switchdev_attr *attr,
457 struct switchdev_trans *trans)
458{
459 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
460 int err = 0;
461
Ido Schimmel54a73202015-12-15 16:03:41 +0100462 mlxsw_sp_port = mlxsw_sp_port_orig_get(attr->orig_dev, mlxsw_sp_port);
463 if (!mlxsw_sp_port)
464 return -EINVAL;
465
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200466 switch (attr->id) {
467 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
468 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
469 attr->u.stp_state);
470 break;
471 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
472 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
473 attr->u.brport_flags);
474 break;
475 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
476 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
477 attr->u.ageing_time);
478 break;
Elad Raz26a4ea02016-01-06 13:01:10 +0100479 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
480 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
481 attr->orig_dev,
482 attr->u.vlan_filtering);
483 break;
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100484 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
485 err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
486 attr->u.mrouter);
487 break;
Nogah Frankel90e0f0c2017-02-09 14:54:49 +0100488 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
489 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
490 attr->u.mc_disabled);
491 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200492 default:
493 err = -EOPNOTSUPP;
494 break;
495 }
496
497 return err;
498}
499
Ido Schimmel14d39462016-06-20 23:04:15 +0200500static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
501{
502 char sfmr_pl[MLXSW_REG_SFMR_LEN];
503
504 mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
506}
507
508static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
509{
510 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
511 char svfa_pl[MLXSW_REG_SVFA_LEN];
512
513 mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
514 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
515}
516
517static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
518{
519 struct mlxsw_sp_fid *f;
520
521 f = kzalloc(sizeof(*f), GFP_KERNEL);
522 if (!f)
523 return NULL;
524
525 f->fid = fid;
526
527 return f;
528}
529
Ido Schimmel701b1862016-07-04 08:23:16 +0200530struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
Ido Schimmel14d39462016-06-20 23:04:15 +0200531{
532 struct mlxsw_sp_fid *f;
533 int err;
534
535 err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
536 if (err)
537 return ERR_PTR(err);
538
539 /* Although all the ports member in the FID might be using a
540 * {Port, VID} to FID mapping, we create a global VID-to-FID
541 * mapping. This allows a port to transition to VLAN mode,
542 * knowing the global mapping exists.
543 */
544 err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
545 if (err)
546 goto err_fid_map;
547
548 f = mlxsw_sp_fid_alloc(fid);
549 if (!f) {
550 err = -ENOMEM;
551 goto err_allocate_fid;
552 }
553
554 list_add(&f->list, &mlxsw_sp->fids);
555
556 return f;
557
558err_allocate_fid:
559 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
560err_fid_map:
561 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
562 return ERR_PTR(err);
563}
564
Ido Schimmel701b1862016-07-04 08:23:16 +0200565void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
Ido Schimmel14d39462016-06-20 23:04:15 +0200566{
567 u16 fid = f->fid;
568
569 list_del(&f->list);
570
Arkadi Sharshevskybf952332017-03-17 09:38:00 +0100571 if (f->rif)
572 mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->rif);
Ido Schimmel99f44bb2016-07-04 08:23:17 +0200573
Ido Schimmel14d39462016-06-20 23:04:15 +0200574 kfree(f);
575
Ido Schimmel81682872016-08-17 16:39:36 +0200576 mlxsw_sp_fid_map(mlxsw_sp, fid, false);
577
Ido Schimmel14d39462016-06-20 23:04:15 +0200578 mlxsw_sp_fid_op(mlxsw_sp, fid, false);
579}
580
581static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
582 u16 fid)
583{
584 struct mlxsw_sp_fid *f;
585
Ido Schimmelf1de7a22016-09-01 10:37:44 +0200586 if (test_bit(fid, mlxsw_sp_port->active_vlans))
587 return 0;
588
Ido Schimmel14d39462016-06-20 23:04:15 +0200589 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
590 if (!f) {
591 f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
592 if (IS_ERR(f))
593 return PTR_ERR(f);
594 }
595
596 f->ref_count++;
597
Ido Schimmel22305372016-06-20 23:04:21 +0200598 netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
599
Ido Schimmel14d39462016-06-20 23:04:15 +0200600 return 0;
601}
602
603static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
604 u16 fid)
605{
606 struct mlxsw_sp_fid *f;
607
608 f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
609 if (WARN_ON(!f))
610 return;
611
Ido Schimmel22305372016-06-20 23:04:21 +0200612 netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
613
Ido Schimmelfe3f6d12016-06-20 23:04:19 +0200614 mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
615
Ido Schimmel14d39462016-06-20 23:04:15 +0200616 if (--f->ref_count == 0)
617 mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
618}
619
620static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
621 bool valid)
622{
623 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
624
625 /* If port doesn't have vPorts, then it can use the global
626 * VID-to-FID mapping.
627 */
628 if (list_empty(&mlxsw_sp_port->vports_list))
629 return 0;
630
631 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
632}
633
634static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
635 u16 fid_begin, u16 fid_end)
636{
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100637 bool mc_flood;
Ido Schimmel14d39462016-06-20 23:04:15 +0200638 int fid, err;
639
640 for (fid = fid_begin; fid <= fid_end; fid++) {
641 err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
642 if (err)
643 goto err_port_fid_join;
644 }
645
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100646 mc_flood = mlxsw_sp_port->mc_disabled ?
647 mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
648
Ido Schimmel14d39462016-06-20 23:04:15 +0200649 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
Nogah Frankel71c365b2017-02-09 14:54:46 +0100650 mlxsw_sp_port->uc_flood, true,
Nogah Frankel8ecd4592017-02-09 14:54:47 +0100651 mc_flood);
Ido Schimmel14d39462016-06-20 23:04:15 +0200652 if (err)
653 goto err_port_flood_set;
654
655 for (fid = fid_begin; fid <= fid_end; fid++) {
656 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
657 if (err)
658 goto err_port_fid_map;
659 }
660
661 return 0;
662
663err_port_fid_map:
664 for (fid--; fid >= fid_begin; fid--)
665 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
666 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
Nogah Frankel71c365b2017-02-09 14:54:46 +0100667 false, false);
Ido Schimmel14d39462016-06-20 23:04:15 +0200668err_port_flood_set:
669 fid = fid_end;
670err_port_fid_join:
671 for (fid--; fid >= fid_begin; fid--)
672 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
673 return err;
674}
675
676static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
677 u16 fid_begin, u16 fid_end)
678{
679 int fid;
680
681 for (fid = fid_begin; fid <= fid_end; fid++)
682 mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
683
684 __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
Nogah Frankel71c365b2017-02-09 14:54:46 +0100685 false, false);
Ido Schimmel14d39462016-06-20 23:04:15 +0200686
687 for (fid = fid_begin; fid <= fid_end; fid++)
688 __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
689}
690
Ido Schimmel28a01d22016-02-18 11:30:02 +0100691static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
692 u16 vid)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200693{
694 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
695 char spvid_pl[MLXSW_REG_SPVID_LEN];
696
697 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
698 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
699}
700
Ido Schimmel28a01d22016-02-18 11:30:02 +0100701static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
702 bool allow)
703{
704 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
705 char spaft_pl[MLXSW_REG_SPAFT_LEN];
706
707 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
708 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
709}
710
711int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
712{
713 struct net_device *dev = mlxsw_sp_port->dev;
714 int err;
715
716 if (!vid) {
717 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
718 if (err) {
719 netdev_err(dev, "Failed to disallow untagged traffic\n");
720 return err;
721 }
722 } else {
723 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
724 if (err) {
725 netdev_err(dev, "Failed to set PVID\n");
726 return err;
727 }
728
729 /* Only allow if not already allowed. */
730 if (!mlxsw_sp_port->pvid) {
731 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
732 true);
733 if (err) {
734 netdev_err(dev, "Failed to allow untagged traffic\n");
735 goto err_port_allow_untagged_set;
736 }
737 }
738 }
739
740 mlxsw_sp_port->pvid = vid;
741 return 0;
742
743err_port_allow_untagged_set:
744 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
745 return err;
746}
747
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +0100748static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
749 u16 vid_begin, u16 vid_end, bool is_member,
750 bool untagged)
751{
752 u16 vid, vid_e;
753 int err;
754
755 for (vid = vid_begin; vid <= vid_end;
756 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
757 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
758 vid_end);
759
760 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
761 is_member, untagged);
762 if (err)
763 return err;
764 }
765
766 return 0;
767}
768
Ido Schimmel584d73d2016-08-24 12:00:26 +0200769static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
770 u16 vid_begin, u16 vid_end,
771 bool learn_enable)
772{
773 u16 vid, vid_e;
774 int err;
775
776 for (vid = vid_begin; vid <= vid_end;
777 vid += MLXSW_REG_SPVMLR_REC_MAX_COUNT) {
778 vid_e = min((u16) (vid + MLXSW_REG_SPVMLR_REC_MAX_COUNT - 1),
779 vid_end);
780
781 err = __mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
782 vid_e, learn_enable);
783 if (err)
784 return err;
785 }
786
787 return 0;
788}
789
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200790static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
791 u16 vid_begin, u16 vid_end,
792 bool flag_untagged, bool flag_pvid)
793{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200794 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel14d39462016-06-20 23:04:15 +0200795 u16 vid, old_pvid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200796 int err;
797
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200798 if (!mlxsw_sp_port->bridged)
Ido Schimmel32d863f2016-07-02 11:00:10 +0200799 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200800
Ido Schimmel14d39462016-06-20 23:04:15 +0200801 err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
Ido Schimmel1b3433a2015-10-28 10:16:57 +0100802 if (err) {
Ido Schimmel14d39462016-06-20 23:04:15 +0200803 netdev_err(dev, "Failed to join FIDs\n");
804 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200805 }
806
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +0100807 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
808 true, flag_untagged);
809 if (err) {
810 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
811 vid_end);
Ido Schimmelb07a9662015-11-19 12:27:40 +0100812 goto err_port_vlans_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200813 }
814
Ido Schimmelb07a9662015-11-19 12:27:40 +0100815 old_pvid = mlxsw_sp_port->pvid;
816 if (flag_pvid && old_pvid != vid_begin) {
817 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid_begin);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200818 if (err) {
Ido Schimmelb07a9662015-11-19 12:27:40 +0100819 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
820 goto err_port_pvid_set;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200821 }
Ido Schimmel28a01d22016-02-18 11:30:02 +0100822 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
823 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
824 if (err) {
825 netdev_err(dev, "Unable to del PVID\n");
826 goto err_port_pvid_set;
827 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200828 }
829
Ido Schimmel584d73d2016-08-24 12:00:26 +0200830 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
831 mlxsw_sp_port->learning);
832 if (err) {
833 netdev_err(dev, "Failed to set learning for VIDs %d-%d\n",
834 vid_begin, vid_end);
835 goto err_port_vid_learning_set;
836 }
837
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200838 /* Changing activity bits only if HW operation succeded */
Elad Razfc1273a2016-01-06 13:01:11 +0100839 for (vid = vid_begin; vid <= vid_end; vid++) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200840 set_bit(vid, mlxsw_sp_port->active_vlans);
Elad Razfc1273a2016-01-06 13:01:11 +0100841 if (flag_untagged)
842 set_bit(vid, mlxsw_sp_port->untagged_vlans);
843 else
844 clear_bit(vid, mlxsw_sp_port->untagged_vlans);
845 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200846
Ido Schimmelb07a9662015-11-19 12:27:40 +0100847 /* STP state change must be done after we set active VLANs */
848 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
849 mlxsw_sp_port->stp_state);
850 if (err) {
851 netdev_err(dev, "Failed to set STP state\n");
852 goto err_port_stp_state_set;
853 }
854
855 return 0;
856
Ido Schimmelb07a9662015-11-19 12:27:40 +0100857err_port_stp_state_set:
858 for (vid = vid_begin; vid <= vid_end; vid++)
859 clear_bit(vid, mlxsw_sp_port->active_vlans);
Ido Schimmel584d73d2016-08-24 12:00:26 +0200860 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
861 false);
862err_port_vid_learning_set:
Ido Schimmelb07a9662015-11-19 12:27:40 +0100863 if (old_pvid != mlxsw_sp_port->pvid)
864 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
865err_port_pvid_set:
866 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
867 false);
868err_port_vlans_set:
Ido Schimmel14d39462016-06-20 23:04:15 +0200869 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
Ido Schimmelb07a9662015-11-19 12:27:40 +0100870 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200871}
872
873static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
874 const struct switchdev_obj_port_vlan *vlan,
875 struct switchdev_trans *trans)
876{
Elad Raze4a13052016-01-06 13:01:09 +0100877 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
878 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200879
880 if (switchdev_trans_ph_prepare(trans))
881 return 0;
882
883 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
884 vlan->vid_begin, vlan->vid_end,
Elad Raze4a13052016-01-06 13:01:09 +0100885 flag_untagged, flag_pvid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200886}
887
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100888static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200889{
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100890 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
891 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
892}
893
894static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
895{
896 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
897 MLXSW_REG_SFD_OP_WRITE_REMOVE;
898}
899
Ido Schimmel6e095fd2016-07-04 08:23:13 +0200900static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
901 const char *mac, u16 fid, bool adding,
902 enum mlxsw_reg_sfd_rec_action action,
903 bool dynamic)
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100904{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200905 char *sfd_pl;
906 int err;
907
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200908 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
909 if (!sfd_pl)
910 return -ENOMEM;
911
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100912 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
913 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
Ido Schimmel6e095fd2016-07-04 08:23:13 +0200914 mac, fid, action, local_port);
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100915 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
916 kfree(sfd_pl);
917
918 return err;
919}
920
Ido Schimmel6e095fd2016-07-04 08:23:13 +0200921static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
922 const char *mac, u16 fid, bool adding,
923 bool dynamic)
924{
925 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
926 MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
927}
928
929int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
930 bool adding)
931{
932 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
933 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
934 false);
935}
936
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100937static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
Ido Schimmel64771e32015-12-15 16:03:46 +0100938 const char *mac, u16 fid, u16 lag_vid,
939 bool adding, bool dynamic)
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100940{
941 char *sfd_pl;
942 int err;
943
944 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
945 if (!sfd_pl)
946 return -ENOMEM;
947
948 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
949 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
Ido Schimmel64771e32015-12-15 16:03:46 +0100950 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
951 lag_vid, lag_id);
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100952 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200953 kfree(sfd_pl);
954
955 return err;
956}
957
958static int
959mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
960 const struct switchdev_obj_port_fdb *fdb,
961 struct switchdev_trans *trans)
962{
Elad Raze4b6f692016-01-10 21:06:27 +0100963 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
Ido Schimmel64771e32015-12-15 16:03:46 +0100964 u16 lag_vid = 0;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100965
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200966 if (switchdev_trans_ph_prepare(trans))
967 return 0;
968
Ido Schimmel54a73202015-12-15 16:03:41 +0100969 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
Ido Schimmel64771e32015-12-15 16:03:46 +0100970 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
Ido Schimmel54a73202015-12-15 16:03:41 +0100971 }
972
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100973 if (!mlxsw_sp_port->lagged)
Jiri Pirko2fa9d452016-01-07 11:50:29 +0100974 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
975 mlxsw_sp_port->local_port,
Ido Schimmel9de6a802015-12-15 16:03:40 +0100976 fdb->addr, fid, true, false);
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +0100977 else
978 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
979 mlxsw_sp_port->lag_id,
Ido Schimmel64771e32015-12-15 16:03:46 +0100980 fdb->addr, fid, lag_vid,
981 true, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200982}
983
Elad Raz3a49b4f2016-01-10 21:06:28 +0100984static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
985 u16 fid, u16 mid, bool adding)
986{
987 char *sfd_pl;
988 int err;
989
990 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
991 if (!sfd_pl)
992 return -ENOMEM;
993
994 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
995 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
996 MLXSW_REG_SFD_REC_ACTION_NOP, mid);
997 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
998 kfree(sfd_pl);
999 return err;
1000}
1001
1002static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid,
1003 bool add, bool clear_all_ports)
1004{
1005 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1006 char *smid_pl;
1007 int err, i;
1008
1009 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1010 if (!smid_pl)
1011 return -ENOMEM;
1012
1013 mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add);
1014 if (clear_all_ports) {
1015 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1016 if (mlxsw_sp->ports[i])
1017 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1018 }
1019 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1020 kfree(smid_pl);
1021 return err;
1022}
1023
1024static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp,
1025 const unsigned char *addr,
Ido Schimmel46d08472016-10-30 10:09:22 +01001026 u16 fid)
Elad Raz3a49b4f2016-01-10 21:06:28 +01001027{
1028 struct mlxsw_sp_mid *mid;
1029
1030 list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) {
Ido Schimmel46d08472016-10-30 10:09:22 +01001031 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
Elad Raz3a49b4f2016-01-10 21:06:28 +01001032 return mid;
1033 }
1034 return NULL;
1035}
1036
1037static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1038 const unsigned char *addr,
Ido Schimmel46d08472016-10-30 10:09:22 +01001039 u16 fid)
Elad Raz3a49b4f2016-01-10 21:06:28 +01001040{
1041 struct mlxsw_sp_mid *mid;
1042 u16 mid_idx;
1043
1044 mid_idx = find_first_zero_bit(mlxsw_sp->br_mids.mapped,
1045 MLXSW_SP_MID_MAX);
1046 if (mid_idx == MLXSW_SP_MID_MAX)
1047 return NULL;
1048
1049 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1050 if (!mid)
1051 return NULL;
1052
1053 set_bit(mid_idx, mlxsw_sp->br_mids.mapped);
1054 ether_addr_copy(mid->addr, addr);
Ido Schimmel46d08472016-10-30 10:09:22 +01001055 mid->fid = fid;
Elad Raz3a49b4f2016-01-10 21:06:28 +01001056 mid->mid = mid_idx;
1057 mid->ref_count = 0;
1058 list_add_tail(&mid->list, &mlxsw_sp->br_mids.list);
1059
1060 return mid;
1061}
1062
1063static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp,
1064 struct mlxsw_sp_mid *mid)
1065{
1066 if (--mid->ref_count == 0) {
1067 list_del(&mid->list);
1068 clear_bit(mid->mid, mlxsw_sp->br_mids.mapped);
1069 kfree(mid);
1070 return 1;
1071 }
1072 return 0;
1073}
1074
1075static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1076 const struct switchdev_obj_port_mdb *mdb,
1077 struct switchdev_trans *trans)
1078{
1079 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1080 struct net_device *dev = mlxsw_sp_port->dev;
1081 struct mlxsw_sp_mid *mid;
1082 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1083 int err = 0;
1084
1085 if (switchdev_trans_ph_prepare(trans))
1086 return 0;
1087
Ido Schimmel46d08472016-10-30 10:09:22 +01001088 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
Elad Raz3a49b4f2016-01-10 21:06:28 +01001089 if (!mid) {
Ido Schimmel46d08472016-10-30 10:09:22 +01001090 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid);
Elad Raz3a49b4f2016-01-10 21:06:28 +01001091 if (!mid) {
1092 netdev_err(dev, "Unable to allocate MC group\n");
1093 return -ENOMEM;
1094 }
1095 }
1096 mid->ref_count++;
1097
1098 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true,
1099 mid->ref_count == 1);
1100 if (err) {
1101 netdev_err(dev, "Unable to set SMID\n");
1102 goto err_out;
1103 }
1104
1105 if (mid->ref_count == 1) {
1106 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid->mid,
1107 true);
1108 if (err) {
1109 netdev_err(dev, "Unable to set MC SFD\n");
1110 goto err_out;
1111 }
1112 }
1113
1114 return 0;
1115
1116err_out:
1117 __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid);
1118 return err;
1119}
1120
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001121static int mlxsw_sp_port_obj_add(struct net_device *dev,
1122 const struct switchdev_obj *obj,
1123 struct switchdev_trans *trans)
1124{
1125 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1126 int err = 0;
1127
Ido Schimmel54a73202015-12-15 16:03:41 +01001128 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1129 if (!mlxsw_sp_port)
1130 return -EINVAL;
1131
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001132 switch (obj->id) {
1133 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Ido Schimmel54a73202015-12-15 16:03:41 +01001134 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1135 return 0;
1136
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001137 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
1138 SWITCHDEV_OBJ_PORT_VLAN(obj),
1139 trans);
1140 break;
1141 case SWITCHDEV_OBJ_ID_PORT_FDB:
1142 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
1143 SWITCHDEV_OBJ_PORT_FDB(obj),
1144 trans);
1145 break;
Elad Raz3a49b4f2016-01-10 21:06:28 +01001146 case SWITCHDEV_OBJ_ID_PORT_MDB:
1147 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1148 SWITCHDEV_OBJ_PORT_MDB(obj),
1149 trans);
1150 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001151 default:
1152 err = -EOPNOTSUPP;
1153 break;
1154 }
1155
1156 return err;
1157}
1158
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001159static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
Ido Schimmel05978482016-08-17 16:39:30 +02001160 u16 vid_begin, u16 vid_end)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001161{
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +01001162 u16 vid, pvid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001163
Ido Schimmel05978482016-08-17 16:39:30 +02001164 if (!mlxsw_sp_port->bridged)
Ido Schimmel32d863f2016-07-02 11:00:10 +02001165 return -EINVAL;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001166
Ido Schimmel584d73d2016-08-24 12:00:26 +02001167 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid_begin, vid_end,
1168 false);
1169
Ido Schimmel06c071f2015-11-19 12:27:38 +01001170 pvid = mlxsw_sp_port->pvid;
Ido Schimmel640be7b2016-08-24 12:00:25 +02001171 if (pvid >= vid_begin && pvid <= vid_end)
1172 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001173
Ido Schimmel640be7b2016-08-24 12:00:25 +02001174 __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
1175 false);
Ido Schimmelf7a8f6c2016-08-24 12:00:24 +02001176
Ido Schimmel14d39462016-06-20 23:04:15 +02001177 mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001178
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001179 /* Changing activity bits only if HW operation succeded */
1180 for (vid = vid_begin; vid <= vid_end; vid++)
1181 clear_bit(vid, mlxsw_sp_port->active_vlans);
1182
1183 return 0;
1184}
1185
1186static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1187 const struct switchdev_obj_port_vlan *vlan)
1188{
Ido Schimmel05978482016-08-17 16:39:30 +02001189 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
1190 vlan->vid_end);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001191}
1192
Ido Schimmel4dc236c2016-01-27 15:20:16 +01001193void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
1194{
1195 u16 vid;
1196
1197 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
Ido Schimmel05978482016-08-17 16:39:30 +02001198 __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01001199}
1200
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001201static int
1202mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
1203 const struct switchdev_obj_port_fdb *fdb)
1204{
Elad Raze4b6f692016-01-10 21:06:27 +01001205 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, fdb->vid);
Ido Schimmel64771e32015-12-15 16:03:46 +01001206 u16 lag_vid = 0;
Ido Schimmel9de6a802015-12-15 16:03:40 +01001207
Ido Schimmel54a73202015-12-15 16:03:41 +01001208 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
Ido Schimmel64771e32015-12-15 16:03:46 +01001209 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
Ido Schimmel54a73202015-12-15 16:03:41 +01001210 }
1211
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001212 if (!mlxsw_sp_port->lagged)
Jiri Pirko2fa9d452016-01-07 11:50:29 +01001213 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp_port->mlxsw_sp,
1214 mlxsw_sp_port->local_port,
Ido Schimmel9de6a802015-12-15 16:03:40 +01001215 fdb->addr, fid,
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001216 false, false);
1217 else
1218 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp_port->mlxsw_sp,
1219 mlxsw_sp_port->lag_id,
Ido Schimmel64771e32015-12-15 16:03:46 +01001220 fdb->addr, fid, lag_vid,
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001221 false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001222}
1223
Elad Raz3a49b4f2016-01-10 21:06:28 +01001224static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1225 const struct switchdev_obj_port_mdb *mdb)
1226{
1227 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1228 struct net_device *dev = mlxsw_sp_port->dev;
1229 struct mlxsw_sp_mid *mid;
1230 u16 fid = mlxsw_sp_port_vid_to_fid_get(mlxsw_sp_port, mdb->vid);
1231 u16 mid_idx;
1232 int err = 0;
1233
Ido Schimmel46d08472016-10-30 10:09:22 +01001234 mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid);
Elad Raz3a49b4f2016-01-10 21:06:28 +01001235 if (!mid) {
1236 netdev_err(dev, "Unable to remove port from MC DB\n");
1237 return -EINVAL;
1238 }
1239
1240 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false);
1241 if (err)
1242 netdev_err(dev, "Unable to remove port from SMID\n");
1243
1244 mid_idx = mid->mid;
1245 if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) {
1246 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid, mid_idx,
1247 false);
1248 if (err)
1249 netdev_err(dev, "Unable to remove MC SFD\n");
1250 }
1251
1252 return err;
1253}
1254
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001255static int mlxsw_sp_port_obj_del(struct net_device *dev,
1256 const struct switchdev_obj *obj)
1257{
1258 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1259 int err = 0;
1260
Ido Schimmel54a73202015-12-15 16:03:41 +01001261 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1262 if (!mlxsw_sp_port)
1263 return -EINVAL;
1264
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001265 switch (obj->id) {
1266 case SWITCHDEV_OBJ_ID_PORT_VLAN:
Ido Schimmel54a73202015-12-15 16:03:41 +01001267 if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
1268 return 0;
1269
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001270 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1271 SWITCHDEV_OBJ_PORT_VLAN(obj));
1272 break;
1273 case SWITCHDEV_OBJ_ID_PORT_FDB:
1274 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
1275 SWITCHDEV_OBJ_PORT_FDB(obj));
1276 break;
Elad Raz3a49b4f2016-01-10 21:06:28 +01001277 case SWITCHDEV_OBJ_ID_PORT_MDB:
1278 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1279 SWITCHDEV_OBJ_PORT_MDB(obj));
Dan Carpenter00ae40e2016-01-13 15:28:23 +03001280 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001281 default:
1282 err = -EOPNOTSUPP;
1283 break;
1284 }
1285
1286 return err;
1287}
1288
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001289static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1290 u16 lag_id)
1291{
1292 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirkoc1a38312016-10-21 16:07:23 +02001293 u64 max_lag_members;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001294 int i;
1295
Jiri Pirkoc1a38312016-10-21 16:07:23 +02001296 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1297 MAX_LAG_MEMBERS);
1298 for (i = 0; i < max_lag_members; i++) {
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001299 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1300 if (mlxsw_sp_port)
1301 return mlxsw_sp_port;
1302 }
1303 return NULL;
1304}
1305
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001306static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1307 struct switchdev_obj_port_fdb *fdb,
Ido Schimmel304f5152016-01-27 15:20:24 +01001308 switchdev_obj_dump_cb_t *cb,
1309 struct net_device *orig_dev)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001310{
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel3f47f862016-01-27 15:20:25 +01001312 struct mlxsw_sp_port *tmp;
Ido Schimmel56918b62016-06-20 23:04:18 +02001313 struct mlxsw_sp_fid *f;
1314 u16 vport_fid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001315 char *sfd_pl;
1316 char mac[ETH_ALEN];
Ido Schimmel9de6a802015-12-15 16:03:40 +01001317 u16 fid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001318 u8 local_port;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001319 u16 lag_id;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001320 u8 num_rec;
1321 int stored_err = 0;
1322 int i;
1323 int err;
1324
1325 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1326 if (!sfd_pl)
1327 return -ENOMEM;
1328
Ido Schimmel56918b62016-06-20 23:04:18 +02001329 f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
1330 vport_fid = f ? f->fid : 0;
Ido Schimmel54a73202015-12-15 16:03:41 +01001331
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001332 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
1333 do {
1334 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001335 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001336 if (err)
1337 goto out;
1338
1339 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1340
1341 /* Even in case of error, we have to run the dump to the end
1342 * so the session in firmware is finished.
1343 */
1344 if (stored_err)
1345 continue;
1346
1347 for (i = 0; i < num_rec; i++) {
1348 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
1349 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
Ido Schimmel9de6a802015-12-15 16:03:40 +01001350 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001351 &local_port);
1352 if (local_port == mlxsw_sp_port->local_port) {
Ido Schimmel004f85e2016-01-27 15:20:22 +01001353 if (vport_fid && vport_fid == fid)
1354 fdb->vid = 0;
1355 else if (!vport_fid &&
1356 !mlxsw_sp_fid_is_vfid(fid))
Ido Schimmel54a73202015-12-15 16:03:41 +01001357 fdb->vid = fid;
Ido Schimmel004f85e2016-01-27 15:20:22 +01001358 else
1359 continue;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001360 ether_addr_copy(fdb->addr, mac);
1361 fdb->ndm_state = NUD_REACHABLE;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001362 err = cb(&fdb->obj);
1363 if (err)
1364 stored_err = err;
1365 }
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001366 break;
1367 case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
1368 mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
Ido Schimmel9de6a802015-12-15 16:03:40 +01001369 mac, &fid, &lag_id);
Ido Schimmel3f47f862016-01-27 15:20:25 +01001370 tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1371 if (tmp && tmp->local_port ==
1372 mlxsw_sp_port->local_port) {
Ido Schimmel304f5152016-01-27 15:20:24 +01001373 /* LAG records can only point to LAG
1374 * devices or VLAN devices on top.
1375 */
1376 if (!netif_is_lag_master(orig_dev) &&
1377 !is_vlan_dev(orig_dev))
1378 continue;
Ido Schimmel004f85e2016-01-27 15:20:22 +01001379 if (vport_fid && vport_fid == fid)
1380 fdb->vid = 0;
1381 else if (!vport_fid &&
1382 !mlxsw_sp_fid_is_vfid(fid))
Ido Schimmel54a73202015-12-15 16:03:41 +01001383 fdb->vid = fid;
Ido Schimmel004f85e2016-01-27 15:20:22 +01001384 else
1385 continue;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001386 ether_addr_copy(fdb->addr, mac);
1387 fdb->ndm_state = NUD_REACHABLE;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001388 err = cb(&fdb->obj);
1389 if (err)
1390 stored_err = err;
1391 }
1392 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001393 }
1394 }
1395 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1396
1397out:
1398 kfree(sfd_pl);
1399 return stored_err ? stored_err : err;
1400}
1401
1402static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
1403 struct switchdev_obj_port_vlan *vlan,
1404 switchdev_obj_dump_cb_t *cb)
1405{
1406 u16 vid;
1407 int err = 0;
1408
Ido Schimmel54a73202015-12-15 16:03:41 +01001409 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1410 vlan->flags = 0;
1411 vlan->vid_begin = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1412 vlan->vid_end = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
1413 return cb(&vlan->obj);
1414 }
1415
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001416 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
1417 vlan->flags = 0;
1418 if (vid == mlxsw_sp_port->pvid)
1419 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
Elad Razfc1273a2016-01-06 13:01:11 +01001420 if (test_bit(vid, mlxsw_sp_port->untagged_vlans))
1421 vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001422 vlan->vid_begin = vid;
1423 vlan->vid_end = vid;
1424 err = cb(&vlan->obj);
1425 if (err)
1426 break;
1427 }
1428 return err;
1429}
1430
1431static int mlxsw_sp_port_obj_dump(struct net_device *dev,
1432 struct switchdev_obj *obj,
1433 switchdev_obj_dump_cb_t *cb)
1434{
1435 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1436 int err = 0;
1437
Ido Schimmel54a73202015-12-15 16:03:41 +01001438 mlxsw_sp_port = mlxsw_sp_port_orig_get(obj->orig_dev, mlxsw_sp_port);
1439 if (!mlxsw_sp_port)
1440 return -EINVAL;
1441
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001442 switch (obj->id) {
1443 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1444 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
1445 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
1446 break;
1447 case SWITCHDEV_OBJ_ID_PORT_FDB:
1448 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
Ido Schimmel304f5152016-01-27 15:20:24 +01001449 SWITCHDEV_OBJ_PORT_FDB(obj), cb,
1450 obj->orig_dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001451 break;
1452 default:
1453 err = -EOPNOTSUPP;
1454 break;
1455 }
1456
1457 return err;
1458}
1459
Jiri Pirkoc7070fc2015-10-28 10:17:05 +01001460static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001461 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
1462 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
1463 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
1464 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
1465 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
1466};
1467
Ido Schimmel45827d72016-01-27 15:20:21 +01001468static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
1469 char *mac, u16 vid,
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001470 struct net_device *dev)
1471{
1472 struct switchdev_notifier_fdb_info info;
1473 unsigned long notifier_type;
1474
Ido Schimmel45827d72016-01-27 15:20:21 +01001475 if (learning_sync) {
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001476 info.addr = mac;
1477 info.vid = vid;
1478 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
1479 call_switchdev_notifiers(notifier_type, dev, &info.info);
1480 }
1481}
1482
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001483static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
1484 char *sfn_pl, int rec_index,
1485 bool adding)
1486{
1487 struct mlxsw_sp_port *mlxsw_sp_port;
1488 char mac[ETH_ALEN];
1489 u8 local_port;
Ido Schimmel9de6a802015-12-15 16:03:40 +01001490 u16 vid, fid;
Jiri Pirko12f15012016-01-07 11:50:30 +01001491 bool do_notification = true;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001492 int err;
1493
Ido Schimmel9de6a802015-12-15 16:03:40 +01001494 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001495 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1496 if (!mlxsw_sp_port) {
1497 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
Jiri Pirko12f15012016-01-07 11:50:30 +01001498 goto just_remove;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001499 }
1500
Ido Schimmelaac78a42015-12-15 16:03:42 +01001501 if (mlxsw_sp_fid_is_vfid(fid)) {
Ido Schimmelaac78a42015-12-15 16:03:42 +01001502 struct mlxsw_sp_port *mlxsw_sp_vport;
1503
Ido Schimmeld0ec8752016-06-20 23:04:12 +02001504 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1505 fid);
Ido Schimmelaac78a42015-12-15 16:03:42 +01001506 if (!mlxsw_sp_vport) {
1507 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
Jiri Pirko12f15012016-01-07 11:50:30 +01001508 goto just_remove;
Ido Schimmelaac78a42015-12-15 16:03:42 +01001509 }
Ido Schimmel004f85e2016-01-27 15:20:22 +01001510 vid = 0;
Ido Schimmelaac78a42015-12-15 16:03:42 +01001511 /* Override the physical port with the vPort. */
1512 mlxsw_sp_port = mlxsw_sp_vport;
1513 } else {
1514 vid = fid;
1515 }
1516
Jiri Pirko12f15012016-01-07 11:50:30 +01001517do_fdb_op:
Jiri Pirko2fa9d452016-01-07 11:50:29 +01001518 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
Jiri Pirko12f15012016-01-07 11:50:30 +01001519 adding, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001520 if (err) {
1521 if (net_ratelimit())
1522 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1523 return;
1524 }
1525
Jiri Pirko12f15012016-01-07 11:50:30 +01001526 if (!do_notification)
1527 return;
Ido Schimmel45827d72016-01-27 15:20:21 +01001528 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001529 adding, mac, vid, mlxsw_sp_port->dev);
Jiri Pirko12f15012016-01-07 11:50:30 +01001530 return;
1531
1532just_remove:
1533 adding = false;
1534 do_notification = false;
1535 goto do_fdb_op;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001536}
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001537
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001538static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
1539 char *sfn_pl, int rec_index,
1540 bool adding)
1541{
1542 struct mlxsw_sp_port *mlxsw_sp_port;
Ido Schimmele43aca22016-01-27 15:20:23 +01001543 struct net_device *dev;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001544 char mac[ETH_ALEN];
Ido Schimmel64771e32015-12-15 16:03:46 +01001545 u16 lag_vid = 0;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001546 u16 lag_id;
Ido Schimmel9de6a802015-12-15 16:03:40 +01001547 u16 vid, fid;
Jiri Pirko12f15012016-01-07 11:50:30 +01001548 bool do_notification = true;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001549 int err;
1550
Ido Schimmel9de6a802015-12-15 16:03:40 +01001551 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001552 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
1553 if (!mlxsw_sp_port) {
1554 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
Jiri Pirko12f15012016-01-07 11:50:30 +01001555 goto just_remove;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001556 }
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001557
Ido Schimmelaac78a42015-12-15 16:03:42 +01001558 if (mlxsw_sp_fid_is_vfid(fid)) {
Ido Schimmelaac78a42015-12-15 16:03:42 +01001559 struct mlxsw_sp_port *mlxsw_sp_vport;
1560
Ido Schimmeld0ec8752016-06-20 23:04:12 +02001561 mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
1562 fid);
Ido Schimmelaac78a42015-12-15 16:03:42 +01001563 if (!mlxsw_sp_vport) {
1564 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
Jiri Pirko12f15012016-01-07 11:50:30 +01001565 goto just_remove;
Ido Schimmelaac78a42015-12-15 16:03:42 +01001566 }
1567
Ido Schimmel004f85e2016-01-27 15:20:22 +01001568 lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
Ido Schimmele43aca22016-01-27 15:20:23 +01001569 dev = mlxsw_sp_vport->dev;
Ido Schimmel004f85e2016-01-27 15:20:22 +01001570 vid = 0;
Ido Schimmelaac78a42015-12-15 16:03:42 +01001571 /* Override the physical port with the vPort. */
1572 mlxsw_sp_port = mlxsw_sp_vport;
1573 } else {
Ido Schimmele43aca22016-01-27 15:20:23 +01001574 dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
Ido Schimmelaac78a42015-12-15 16:03:42 +01001575 vid = fid;
1576 }
1577
Jiri Pirko12f15012016-01-07 11:50:30 +01001578do_fdb_op:
Ido Schimmel64771e32015-12-15 16:03:46 +01001579 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
Jiri Pirko12f15012016-01-07 11:50:30 +01001580 adding, true);
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001581 if (err) {
1582 if (net_ratelimit())
1583 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
1584 return;
1585 }
1586
Jiri Pirko12f15012016-01-07 11:50:30 +01001587 if (!do_notification)
1588 return;
Ido Schimmel45827d72016-01-27 15:20:21 +01001589 mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
Ido Schimmele43aca22016-01-27 15:20:23 +01001590 vid, dev);
Jiri Pirko12f15012016-01-07 11:50:30 +01001591 return;
1592
1593just_remove:
1594 adding = false;
1595 do_notification = false;
1596 goto do_fdb_op;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001597}
1598
1599static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
1600 char *sfn_pl, int rec_index)
1601{
1602 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
1603 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
1604 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1605 rec_index, true);
1606 break;
1607 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
1608 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
1609 rec_index, false);
1610 break;
Jiri Pirko8a1ab5d2015-12-03 12:12:29 +01001611 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
1612 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1613 rec_index, true);
1614 break;
1615 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
1616 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
1617 rec_index, false);
1618 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001619 }
1620}
1621
1622static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
1623{
Jiri Pirkodd9bdb02016-04-14 18:19:28 +02001624 mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
1625 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001626}
1627
1628static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
1629{
1630 struct mlxsw_sp *mlxsw_sp;
1631 char *sfn_pl;
1632 u8 num_rec;
1633 int i;
1634 int err;
1635
1636 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
1637 if (!sfn_pl)
1638 return;
1639
1640 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1641
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01001642 rtnl_lock();
Ido Schimmel1803e0f2016-08-24 12:00:23 +02001643 mlxsw_reg_sfn_pack(sfn_pl);
1644 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
1645 if (err) {
1646 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
1647 goto out;
1648 }
1649 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
1650 for (i = 0; i < num_rec; i++)
1651 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001652
Ido Schimmel1803e0f2016-08-24 12:00:23 +02001653out:
Ido Schimmel4f2c6ae2016-01-27 15:16:43 +01001654 rtnl_unlock();
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001655 kfree(sfn_pl);
1656 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1657}
1658
1659static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
1660{
1661 int err;
1662
1663 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
1664 if (err) {
1665 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1666 return err;
1667 }
1668 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1669 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1670 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
1671 return 0;
1672}
1673
1674static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
1675{
1676 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
1677}
1678
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001679int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
1680{
1681 return mlxsw_sp_fdb_init(mlxsw_sp);
1682}
1683
1684void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
1685{
1686 mlxsw_sp_fdb_fini(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001687}
1688
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001689void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
1690{
1691 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
1692}
1693
1694void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1695{
1696}