blob: fe881655a86418f6745b4afd30995e88423a68d3 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/types.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/slab.h>
42#include <linux/device.h>
43#include <linux/skbuff.h>
44#include <linux/if_vlan.h>
45#include <linux/if_bridge.h>
46#include <linux/workqueue.h>
47#include <linux/jiffies.h>
48#include <net/switchdev.h>
49
50#include "spectrum.h"
51#include "core.h"
52#include "reg.h"
53
54static int mlxsw_sp_port_attr_get(struct net_device *dev,
55 struct switchdev_attr *attr)
56{
57 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
58 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
59
60 switch (attr->id) {
61 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
62 attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac);
63 memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac,
64 attr->u.ppid.id_len);
65 break;
66 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
67 attr->u.brport_flags =
68 (mlxsw_sp_port->learning ? BR_LEARNING : 0) |
Ido Schimmel02930382015-10-28 10:16:58 +010069 (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) |
70 (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +020071 break;
72 default:
73 return -EOPNOTSUPP;
74 }
75
76 return 0;
77}
78
79static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
80 u8 state)
81{
82 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
83 enum mlxsw_reg_spms_state spms_state;
84 char *spms_pl;
85 u16 vid;
86 int err;
87
88 switch (state) {
89 case BR_STATE_DISABLED: /* fall-through */
90 case BR_STATE_FORWARDING:
91 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
92 break;
93 case BR_STATE_LISTENING: /* fall-through */
94 case BR_STATE_LEARNING:
95 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
96 break;
97 case BR_STATE_BLOCKING:
98 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
99 break;
100 default:
101 BUG();
102 }
103
104 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
105 if (!spms_pl)
106 return -ENOMEM;
107 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
108 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
109 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
110
111 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
112 kfree(spms_pl);
113 return err;
114}
115
116static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
117 struct switchdev_trans *trans,
118 u8 state)
119{
120 if (switchdev_trans_ph_prepare(trans))
121 return 0;
122
123 mlxsw_sp_port->stp_state = state;
124 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
125}
126
Ido Schimmel02930382015-10-28 10:16:58 +0100127static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
128 u16 fid_begin, u16 fid_end, bool set,
129 bool only_uc)
130{
131 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
132 u16 range = fid_end - fid_begin + 1;
133 char *sftr_pl;
134 int err;
135
136 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
137 if (!sftr_pl)
138 return -ENOMEM;
139
140 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin,
141 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
142 mlxsw_sp_port->local_port, set);
143 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
144 if (err)
145 goto buffer_out;
146
147 /* Flooding control allows one to decide whether a given port will
148 * flood unicast traffic for which there is no FDB entry.
149 */
150 if (only_uc)
151 goto buffer_out;
152
153 mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin,
154 MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range,
155 mlxsw_sp_port->local_port, set);
156 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
157
158buffer_out:
159 kfree(sftr_pl);
160 return err;
161}
162
163static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
164 bool set)
165{
166 struct net_device *dev = mlxsw_sp_port->dev;
167 u16 vid, last_visited_vid;
168 int err;
169
170 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
171 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
172 true);
173 if (err) {
174 last_visited_vid = vid;
175 goto err_port_flood_set;
176 }
177 }
178
179 return 0;
180
181err_port_flood_set:
182 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
183 __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
184 netdev_err(dev, "Failed to configure unicast flooding\n");
185 return err;
186}
187
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200188static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
189 struct switchdev_trans *trans,
190 unsigned long brport_flags)
191{
Ido Schimmel02930382015-10-28 10:16:58 +0100192 unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0;
193 bool set;
194 int err;
195
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200196 if (switchdev_trans_ph_prepare(trans))
197 return 0;
198
Ido Schimmel02930382015-10-28 10:16:58 +0100199 if ((uc_flood ^ brport_flags) & BR_FLOOD) {
200 set = mlxsw_sp_port->uc_flood ? false : true;
201 err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set);
202 if (err)
203 return err;
204 }
205
206 mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200207 mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0;
208 mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0;
Ido Schimmel02930382015-10-28 10:16:58 +0100209
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200210 return 0;
211}
212
213static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
214{
215 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
216 int err;
217
218 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
219 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
220 if (err)
221 return err;
222 mlxsw_sp->ageing_time = ageing_time;
223 return 0;
224}
225
226static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
227 struct switchdev_trans *trans,
Jiri Pirko135f9ec2015-10-28 10:17:02 +0100228 unsigned long ageing_clock_t)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200229{
230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Jiri Pirko135f9ec2015-10-28 10:17:02 +0100231 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200232 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
233
234 if (switchdev_trans_ph_prepare(trans))
235 return 0;
236
237 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
238}
239
240static int mlxsw_sp_port_attr_set(struct net_device *dev,
241 const struct switchdev_attr *attr,
242 struct switchdev_trans *trans)
243{
244 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
245 int err = 0;
246
247 switch (attr->id) {
248 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
249 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
250 attr->u.stp_state);
251 break;
252 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
253 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
254 attr->u.brport_flags);
255 break;
256 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
257 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
258 attr->u.ageing_time);
259 break;
260 default:
261 err = -EOPNOTSUPP;
262 break;
263 }
264
265 return err;
266}
267
268static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
269{
270 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
271 char spvid_pl[MLXSW_REG_SPVID_LEN];
272
273 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
274 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
275}
276
277static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
278{
279 char sfmr_pl[MLXSW_REG_SFMR_LEN];
280 int err;
281
282 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
283 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
284
285 if (err)
286 return err;
287
288 set_bit(fid, mlxsw_sp->active_fids);
289 return 0;
290}
291
292static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
293{
294 char sfmr_pl[MLXSW_REG_SFMR_LEN];
295
296 clear_bit(fid, mlxsw_sp->active_fids);
297
298 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
299 fid, fid);
300 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
301}
302
303static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
304{
305 enum mlxsw_reg_svfa_mt mt;
306
307 if (mlxsw_sp_port->nr_vfids)
308 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
309 else
310 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
311
312 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
313}
314
315static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
316{
317 enum mlxsw_reg_svfa_mt mt;
318
319 if (!mlxsw_sp_port->nr_vfids)
320 return 0;
321
322 mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
323 return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
324}
325
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200326static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
327 u16 vid_end)
328{
329 u16 vid;
330 int err;
331
332 for (vid = vid_begin; vid <= vid_end; vid++) {
333 err = mlxsw_sp_port_add_vid(dev, 0, vid);
334 if (err)
335 goto err_port_add_vid;
336 }
337 return 0;
338
339err_port_add_vid:
340 for (vid--; vid >= vid_begin; vid--)
341 mlxsw_sp_port_kill_vid(dev, 0, vid);
342 return err;
343}
344
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +0100345static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
346 u16 vid_begin, u16 vid_end, bool is_member,
347 bool untagged)
348{
349 u16 vid, vid_e;
350 int err;
351
352 for (vid = vid_begin; vid <= vid_end;
353 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
354 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
355 vid_end);
356
357 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
358 is_member, untagged);
359 if (err)
360 return err;
361 }
362
363 return 0;
364}
365
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200366static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
367 u16 vid_begin, u16 vid_end,
368 bool flag_untagged, bool flag_pvid)
369{
370 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
371 struct net_device *dev = mlxsw_sp_port->dev;
372 enum mlxsw_reg_svfa_mt mt;
373 u16 vid, vid_e;
374 int err;
375
376 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
377 * not bridged, then packets ingressing through the port with
378 * the specified VIDs will be directed to CPU.
379 */
380 if (!mlxsw_sp_port->bridged)
381 return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
382
383 for (vid = vid_begin; vid <= vid_end; vid++) {
384 if (!test_bit(vid, mlxsw_sp->active_fids)) {
385 err = mlxsw_sp_fid_create(mlxsw_sp, vid);
386 if (err) {
387 netdev_err(dev, "Failed to create FID=%d\n",
388 vid);
389 return err;
390 }
391
392 /* When creating a FID, we set a VID to FID mapping
393 * regardless of the port's mode.
394 */
395 mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
396 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
397 true, vid, vid);
398 if (err) {
399 netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
400 vid);
401 return err;
402 }
403 }
404
405 /* Set FID mapping according to port's mode */
406 err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
407 if (err) {
408 netdev_err(dev, "Failed to map FID=%d", vid);
409 return err;
410 }
Ido Schimmel1b3433a2015-10-28 10:16:57 +0100411 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200412
Ido Schimmel1b3433a2015-10-28 10:16:57 +0100413 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
414 true, false);
415 if (err) {
416 netdev_err(dev, "Failed to configure flooding\n");
417 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200418 }
419
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +0100420 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
421 true, flag_untagged);
422 if (err) {
423 netdev_err(dev, "Unable to add VIDs %d-%d\n", vid_begin,
424 vid_end);
425 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200426 }
427
428 vid = vid_begin;
429 if (flag_pvid && mlxsw_sp_port->pvid != vid) {
430 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
431 if (err) {
432 netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n",
433 vid);
434 return err;
435 }
436 mlxsw_sp_port->pvid = vid;
437 }
438
439 /* Changing activity bits only if HW operation succeded */
440 for (vid = vid_begin; vid <= vid_end; vid++)
441 set_bit(vid, mlxsw_sp_port->active_vlans);
442
443 return mlxsw_sp_port_stp_state_set(mlxsw_sp_port,
444 mlxsw_sp_port->stp_state);
445}
446
447static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
448 const struct switchdev_obj_port_vlan *vlan,
449 struct switchdev_trans *trans)
450{
451 bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
452 bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID;
453
454 if (switchdev_trans_ph_prepare(trans))
455 return 0;
456
457 return __mlxsw_sp_port_vlans_add(mlxsw_sp_port,
458 vlan->vid_begin, vlan->vid_end,
459 untagged_flag, pvid_flag);
460}
461
462static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port,
463 const char *mac, u16 vid, bool adding,
464 bool dynamic)
465{
466 enum mlxsw_reg_sfd_rec_policy policy;
467 enum mlxsw_reg_sfd_op op;
468 char *sfd_pl;
469 int err;
470
471 if (!vid)
472 vid = mlxsw_sp_port->pvid;
473
474 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
475 if (!sfd_pl)
476 return -ENOMEM;
477
478 policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
479 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
480 op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
481 MLXSW_REG_SFD_OP_WRITE_REMOVE;
482 mlxsw_reg_sfd_pack(sfd_pl, op, 0);
483 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy,
484 mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP,
485 mlxsw_sp_port->local_port);
486 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd),
487 sfd_pl);
488 kfree(sfd_pl);
489
490 return err;
491}
492
493static int
494mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port,
495 const struct switchdev_obj_port_fdb *fdb,
496 struct switchdev_trans *trans)
497{
498 if (switchdev_trans_ph_prepare(trans))
499 return 0;
500
501 return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
502 true, false);
503}
504
505static int mlxsw_sp_port_obj_add(struct net_device *dev,
506 const struct switchdev_obj *obj,
507 struct switchdev_trans *trans)
508{
509 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
510 int err = 0;
511
512 switch (obj->id) {
513 case SWITCHDEV_OBJ_ID_PORT_VLAN:
514 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port,
515 SWITCHDEV_OBJ_PORT_VLAN(obj),
516 trans);
517 break;
518 case SWITCHDEV_OBJ_ID_PORT_FDB:
519 err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
520 SWITCHDEV_OBJ_PORT_FDB(obj),
521 trans);
522 break;
523 default:
524 err = -EOPNOTSUPP;
525 break;
526 }
527
528 return err;
529}
530
531static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
532 u16 vid_end)
533{
534 u16 vid;
535 int err;
536
537 for (vid = vid_begin; vid <= vid_end; vid++) {
538 err = mlxsw_sp_port_kill_vid(dev, 0, vid);
539 if (err)
540 return err;
541 }
542
543 return 0;
544}
545
546static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
547 u16 vid_begin, u16 vid_end, bool init)
548{
549 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +0100550 u16 vid, pvid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200551 int err;
552
553 /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
554 * not bridged, then prevent packets ingressing through the
555 * port with the specified VIDs from being trapped to CPU.
556 */
557 if (!init && !mlxsw_sp_port->bridged)
558 return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
559
Ido Schimmel3b7ad5e2015-11-19 12:27:39 +0100560 err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
561 false, false);
562 if (err) {
563 netdev_err(dev, "Unable to del VIDs %d-%d\n", vid_begin,
564 vid_end);
565 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200566 }
567
Ido Schimmel06c071f2015-11-19 12:27:38 +0100568 pvid = mlxsw_sp_port->pvid;
569 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200570 /* Default VLAN is always 1 */
Ido Schimmel06c071f2015-11-19 12:27:38 +0100571 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200572 if (err) {
Ido Schimmel06c071f2015-11-19 12:27:38 +0100573 netdev_err(dev, "Unable to del PVID %d\n", pvid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200574 return err;
575 }
Ido Schimmel06c071f2015-11-19 12:27:38 +0100576 mlxsw_sp_port->pvid = 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200577 }
578
579 if (init)
580 goto out;
581
Ido Schimmel1b3433a2015-10-28 10:16:57 +0100582 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
583 false, false);
584 if (err) {
585 netdev_err(dev, "Failed to clear flooding\n");
586 return err;
587 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200588
Ido Schimmel1b3433a2015-10-28 10:16:57 +0100589 for (vid = vid_begin; vid <= vid_end; vid++) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200590 /* Remove FID mapping in case of Virtual mode */
591 err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
592 if (err) {
593 netdev_err(dev, "Failed to unmap FID=%d", vid);
594 return err;
595 }
596 }
597
598out:
599 /* Changing activity bits only if HW operation succeded */
600 for (vid = vid_begin; vid <= vid_end; vid++)
601 clear_bit(vid, mlxsw_sp_port->active_vlans);
602
603 return 0;
604}
605
606static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
607 const struct switchdev_obj_port_vlan *vlan)
608{
609 return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
610 vlan->vid_begin, vlan->vid_end, false);
611}
612
613static int
614mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
615 const struct switchdev_obj_port_fdb *fdb)
616{
617 return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid,
618 false, false);
619}
620
621static int mlxsw_sp_port_obj_del(struct net_device *dev,
622 const struct switchdev_obj *obj)
623{
624 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
625 int err = 0;
626
627 switch (obj->id) {
628 case SWITCHDEV_OBJ_ID_PORT_VLAN:
629 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
630 SWITCHDEV_OBJ_PORT_VLAN(obj));
631 break;
632 case SWITCHDEV_OBJ_ID_PORT_FDB:
633 err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
634 SWITCHDEV_OBJ_PORT_FDB(obj));
635 break;
636 default:
637 err = -EOPNOTSUPP;
638 break;
639 }
640
641 return err;
642}
643
644static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
645 struct switchdev_obj_port_fdb *fdb,
646 switchdev_obj_dump_cb_t *cb)
647{
648 char *sfd_pl;
649 char mac[ETH_ALEN];
650 u16 vid;
651 u8 local_port;
652 u8 num_rec;
653 int stored_err = 0;
654 int i;
655 int err;
656
657 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
658 if (!sfd_pl)
659 return -ENOMEM;
660
661 mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
662 do {
663 mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT);
664 err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core,
665 MLXSW_REG(sfd), sfd_pl);
666 if (err)
667 goto out;
668
669 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
670
671 /* Even in case of error, we have to run the dump to the end
672 * so the session in firmware is finished.
673 */
674 if (stored_err)
675 continue;
676
677 for (i = 0; i < num_rec; i++) {
678 switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) {
679 case MLXSW_REG_SFD_REC_TYPE_UNICAST:
680 mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid,
681 &local_port);
682 if (local_port == mlxsw_sp_port->local_port) {
683 ether_addr_copy(fdb->addr, mac);
684 fdb->ndm_state = NUD_REACHABLE;
685 fdb->vid = vid;
686 err = cb(&fdb->obj);
687 if (err)
688 stored_err = err;
689 }
690 }
691 }
692 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
693
694out:
695 kfree(sfd_pl);
696 return stored_err ? stored_err : err;
697}
698
699static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port,
700 struct switchdev_obj_port_vlan *vlan,
701 switchdev_obj_dump_cb_t *cb)
702{
703 u16 vid;
704 int err = 0;
705
706 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
707 vlan->flags = 0;
708 if (vid == mlxsw_sp_port->pvid)
709 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
710 vlan->vid_begin = vid;
711 vlan->vid_end = vid;
712 err = cb(&vlan->obj);
713 if (err)
714 break;
715 }
716 return err;
717}
718
719static int mlxsw_sp_port_obj_dump(struct net_device *dev,
720 struct switchdev_obj *obj,
721 switchdev_obj_dump_cb_t *cb)
722{
723 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
724 int err = 0;
725
726 switch (obj->id) {
727 case SWITCHDEV_OBJ_ID_PORT_VLAN:
728 err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port,
729 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
730 break;
731 case SWITCHDEV_OBJ_ID_PORT_FDB:
732 err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
733 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
734 break;
735 default:
736 err = -EOPNOTSUPP;
737 break;
738 }
739
740 return err;
741}
742
Jiri Pirkoc7070fc2015-10-28 10:17:05 +0100743static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200744 .switchdev_port_attr_get = mlxsw_sp_port_attr_get,
745 .switchdev_port_attr_set = mlxsw_sp_port_attr_set,
746 .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
747 .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
748 .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump,
749};
750
751static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
752 char *sfn_pl, int rec_index,
753 bool adding)
754{
755 struct mlxsw_sp_port *mlxsw_sp_port;
756 char mac[ETH_ALEN];
757 u8 local_port;
758 u16 vid;
759 int err;
760
761 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port);
762 mlxsw_sp_port = mlxsw_sp->ports[local_port];
763 if (!mlxsw_sp_port) {
764 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
765 return;
766 }
767
768 err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid,
769 adding && mlxsw_sp_port->learning, true);
770 if (err) {
771 if (net_ratelimit())
772 netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n");
773 return;
774 }
775
776 if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) {
777 struct switchdev_notifier_fdb_info info;
778 unsigned long notifier_type;
779
780 info.addr = mac;
781 info.vid = vid;
782 notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
783 call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev,
784 &info.info);
785 }
786}
787
788static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
789 char *sfn_pl, int rec_index)
790{
791 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
792 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
793 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
794 rec_index, true);
795 break;
796 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
797 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
798 rec_index, false);
799 break;
800 }
801}
802
803static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
804{
805 schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
806 msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
807}
808
809static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
810{
811 struct mlxsw_sp *mlxsw_sp;
812 char *sfn_pl;
813 u8 num_rec;
814 int i;
815 int err;
816
817 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
818 if (!sfn_pl)
819 return;
820
821 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
822
823 do {
824 mlxsw_reg_sfn_pack(sfn_pl);
825 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
826 if (err) {
827 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
828 break;
829 }
830 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
831 for (i = 0; i < num_rec; i++)
832 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
833
834 } while (num_rec);
835
836 kfree(sfn_pl);
837 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
838}
839
840static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
841{
842 int err;
843
844 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
845 if (err) {
846 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
847 return err;
848 }
849 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
850 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
851 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
852 return 0;
853}
854
855static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
856{
857 cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
858}
859
860static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
861{
862 u16 fid;
863
864 for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
865 mlxsw_sp_fid_destroy(mlxsw_sp, fid);
866}
867
868int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
869{
870 return mlxsw_sp_fdb_init(mlxsw_sp);
871}
872
873void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
874{
875 mlxsw_sp_fdb_fini(mlxsw_sp);
876 mlxsw_sp_fids_fini(mlxsw_sp);
877}
878
879int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
880{
881 struct net_device *dev = mlxsw_sp_port->dev;
882 int err;
883
884 /* Allow only untagged packets to ingress and tag them internally
885 * with VID 1.
886 */
887 mlxsw_sp_port->pvid = 1;
888 err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true);
889 if (err) {
890 netdev_err(dev, "Unable to init VLANs\n");
891 return err;
892 }
893
894 /* Add implicit VLAN interface in the device, so that untagged
895 * packets will be classified to the default vFID.
896 */
897 err = mlxsw_sp_port_add_vid(dev, 0, 1);
898 if (err)
899 netdev_err(dev, "Failed to configure default vFID\n");
900
901 return err;
902}
903
904void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
905{
906 mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
907}
908
909void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port)
910{
911}