blob: 20e67835aae7f44d23d39c43df8ea078a0142c02 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/ethtool.h>
43#include <linux/slab.h>
44#include <linux/device.h>
45#include <linux/skbuff.h>
46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
48#include <linux/workqueue.h>
49#include <linux/jiffies.h>
50#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010051#include <linux/list.h>
Jiri Pirkoc4745502016-02-26 17:32:26 +010052#include <net/devlink.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020053#include <net/switchdev.h>
54#include <generated/utsrelease.h>
55
56#include "spectrum.h"
57#include "core.h"
58#include "reg.h"
59#include "port.h"
60#include "trap.h"
61#include "txheader.h"
62
63static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64static const char mlxsw_sp_driver_version[] = "1.0";
65
66/* tx_hdr_version
67 * Tx header version.
68 * Must be set to 1.
69 */
70MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71
72/* tx_hdr_ctl
73 * Packet control type.
74 * 0 - Ethernet control (e.g. EMADs, LACP)
75 * 1 - Ethernet data
76 */
77MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78
79/* tx_hdr_proto
80 * Packet protocol type. Must be set to 1 (Ethernet).
81 */
82MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83
84/* tx_hdr_rx_is_router
85 * Packet is sent from the router. Valid for data packets only.
86 */
87MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88
89/* tx_hdr_fid_valid
90 * Indicates if the 'fid' field is valid and should be used for
91 * forwarding lookup. Valid for data packets only.
92 */
93MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94
95/* tx_hdr_swid
96 * Switch partition ID. Must be set to 0.
97 */
98MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99
100/* tx_hdr_control_tclass
101 * Indicates if the packet should use the control TClass and not one
102 * of the data TClasses.
103 */
104MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105
106/* tx_hdr_etclass
107 * Egress TClass to be used on the egress device on the egress port.
108 */
109MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110
111/* tx_hdr_port_mid
112 * Destination local port for unicast packets.
113 * Destination multicast ID for multicast packets.
114 *
115 * Control packets are directed to a specific egress port, while data
116 * packets are transmitted through the CPU port (0) into the switch partition,
117 * where forwarding rules are applied.
118 */
119MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120
121/* tx_hdr_fid
122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124 * Valid for data packets only.
125 */
126MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127
128/* tx_hdr_type
129 * 0 - Data packets
130 * 6 - Control packets
131 */
132MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133
134static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 const struct mlxsw_tx_info *tx_info)
136{
137 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138
139 memset(txhdr, 0, MLXSW_TXHDR_LEN);
140
141 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 mlxsw_tx_hdr_swid_set(txhdr, 0);
145 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148}
149
150static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151{
152 char spad_pl[MLXSW_REG_SPAD_LEN];
153 int err;
154
155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156 if (err)
157 return err;
158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159 return 0;
160}
161
162static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163 bool is_up)
164{
165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 char paos_pl[MLXSW_REG_PAOS_LEN];
167
168 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 MLXSW_PORT_ADMIN_STATUS_DOWN);
171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172}
173
174static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175 bool *p_is_up)
176{
177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 char paos_pl[MLXSW_REG_PAOS_LEN];
179 u8 oper_status;
180 int err;
181
182 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184 if (err)
185 return err;
186 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188 return 0;
189}
190
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200191static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192 unsigned char *addr)
193{
194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 char ppad_pl[MLXSW_REG_PPAD_LEN];
196
197 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200}
201
202static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203{
204 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206
207 ether_addr_copy(addr, mlxsw_sp->base_mac);
208 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210}
211
212static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 u16 vid, enum mlxsw_reg_spms_state state)
214{
215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216 char *spms_pl;
217 int err;
218
219 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220 if (!spms_pl)
221 return -ENOMEM;
222 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225 kfree(spms_pl);
226 return err;
227}
228
229static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230{
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 char pmtu_pl[MLXSW_REG_PMTU_LEN];
233 int max_mtu;
234 int err;
235
236 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239 if (err)
240 return err;
241 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242
243 if (mtu > max_mtu)
244 return -EINVAL;
245
246 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248}
249
250static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251{
252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253 char pspa_pl[MLXSW_REG_PSPA_LEN];
254
255 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257}
258
259static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260 bool enable)
261{
262 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263 char svpe_pl[MLXSW_REG_SVPE_LEN];
264
265 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
267}
268
269int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
271 u16 vid)
272{
273 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274 char svfa_pl[MLXSW_REG_SVFA_LEN];
275
276 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277 fid, vid);
278 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
279}
280
281static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282 u16 vid, bool learn_enable)
283{
284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
285 char *spvmlr_pl;
286 int err;
287
288 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
289 if (!spvmlr_pl)
290 return -ENOMEM;
291 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292 learn_enable);
293 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
294 kfree(spvmlr_pl);
295 return err;
296}
297
298static int
299mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300{
301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302 char sspr_pl[MLXSW_REG_SSPR_LEN];
303
304 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306}
307
308static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
309 bool *p_usable)
310{
311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
312 char pmlp_pl[MLXSW_REG_PMLP_LEN];
313 int err;
314
315 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
317 if (err)
318 return err;
319 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
320 return 0;
321}
322
323static int mlxsw_sp_port_open(struct net_device *dev)
324{
325 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
326 int err;
327
328 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
329 if (err)
330 return err;
331 netif_start_queue(dev);
332 return 0;
333}
334
335static int mlxsw_sp_port_stop(struct net_device *dev)
336{
337 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
338
339 netif_stop_queue(dev);
340 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
341}
342
343static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
344 struct net_device *dev)
345{
346 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
347 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
348 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
349 const struct mlxsw_tx_info tx_info = {
350 .local_port = mlxsw_sp_port->local_port,
351 .is_emad = false,
352 };
353 u64 len;
354 int err;
355
356 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
357 return NETDEV_TX_BUSY;
358
359 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
360 struct sk_buff *skb_orig = skb;
361
362 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
363 if (!skb) {
364 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
365 dev_kfree_skb_any(skb_orig);
366 return NETDEV_TX_OK;
367 }
368 }
369
370 if (eth_skb_pad(skb)) {
371 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
372 return NETDEV_TX_OK;
373 }
374
375 mlxsw_sp_txhdr_construct(skb, &tx_info);
376 len = skb->len;
377 /* Due to a race we might fail here because of a full queue. In that
378 * unlikely case we simply drop the packet.
379 */
380 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
381
382 if (!err) {
383 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
384 u64_stats_update_begin(&pcpu_stats->syncp);
385 pcpu_stats->tx_packets++;
386 pcpu_stats->tx_bytes += len;
387 u64_stats_update_end(&pcpu_stats->syncp);
388 } else {
389 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
390 dev_kfree_skb_any(skb);
391 }
392 return NETDEV_TX_OK;
393}
394
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100395static void mlxsw_sp_set_rx_mode(struct net_device *dev)
396{
397}
398
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200399static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
400{
401 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
402 struct sockaddr *addr = p;
403 int err;
404
405 if (!is_valid_ether_addr(addr->sa_data))
406 return -EADDRNOTAVAIL;
407
408 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
409 if (err)
410 return err;
411 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
412 return 0;
413}
414
415static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
416{
417 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
418 int err;
419
420 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
421 if (err)
422 return err;
423 dev->mtu = mtu;
424 return 0;
425}
426
427static struct rtnl_link_stats64 *
428mlxsw_sp_port_get_stats64(struct net_device *dev,
429 struct rtnl_link_stats64 *stats)
430{
431 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
432 struct mlxsw_sp_port_pcpu_stats *p;
433 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
434 u32 tx_dropped = 0;
435 unsigned int start;
436 int i;
437
438 for_each_possible_cpu(i) {
439 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
440 do {
441 start = u64_stats_fetch_begin_irq(&p->syncp);
442 rx_packets = p->rx_packets;
443 rx_bytes = p->rx_bytes;
444 tx_packets = p->tx_packets;
445 tx_bytes = p->tx_bytes;
446 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
447
448 stats->rx_packets += rx_packets;
449 stats->rx_bytes += rx_bytes;
450 stats->tx_packets += tx_packets;
451 stats->tx_bytes += tx_bytes;
452 /* tx_dropped is u32, updated without syncp protection. */
453 tx_dropped += p->tx_dropped;
454 }
455 stats->tx_dropped = tx_dropped;
456 return stats;
457}
458
459int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
460 u16 vid_end, bool is_member, bool untagged)
461{
462 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
463 char *spvm_pl;
464 int err;
465
466 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
467 if (!spvm_pl)
468 return -ENOMEM;
469
470 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
471 vid_end, is_member, untagged);
472 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
473 kfree(spvm_pl);
474 return err;
475}
476
477static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
478{
479 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
480 u16 vid, last_visited_vid;
481 int err;
482
483 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
484 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
485 vid);
486 if (err) {
487 last_visited_vid = vid;
488 goto err_port_vid_to_fid_set;
489 }
490 }
491
492 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
493 if (err) {
494 last_visited_vid = VLAN_N_VID;
495 goto err_port_vid_to_fid_set;
496 }
497
498 return 0;
499
500err_port_vid_to_fid_set:
501 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
502 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
503 vid);
504 return err;
505}
506
507static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
508{
509 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
510 u16 vid;
511 int err;
512
513 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
514 if (err)
515 return err;
516
517 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
518 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
519 vid, vid);
520 if (err)
521 return err;
522 }
523
524 return 0;
525}
526
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100527static struct mlxsw_sp_vfid *
528mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
529{
530 struct mlxsw_sp_vfid *vfid;
531
532 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
533 if (vfid->vid == vid)
534 return vfid;
535 }
536
537 return NULL;
538}
539
540static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
541{
542 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
543 MLXSW_SP_VFID_PORT_MAX);
544}
545
546static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
547{
548 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
549 char sfmr_pl[MLXSW_REG_SFMR_LEN];
550
551 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
552 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
553}
554
555static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
556{
557 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
558 char sfmr_pl[MLXSW_REG_SFMR_LEN];
559
560 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
561 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
562}
563
564static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
565 u16 vid)
566{
567 struct device *dev = mlxsw_sp->bus_info->dev;
568 struct mlxsw_sp_vfid *vfid;
569 u16 n_vfid;
570 int err;
571
572 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
573 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
574 dev_err(dev, "No available vFIDs\n");
575 return ERR_PTR(-ERANGE);
576 }
577
578 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
579 if (err) {
580 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
581 return ERR_PTR(err);
582 }
583
584 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
585 if (!vfid)
586 goto err_allocate_vfid;
587
588 vfid->vfid = n_vfid;
589 vfid->vid = vid;
590
591 list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
592 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
593
594 return vfid;
595
596err_allocate_vfid:
597 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
598 return ERR_PTR(-ENOMEM);
599}
600
601static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
602 struct mlxsw_sp_vfid *vfid)
603{
604 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
605 list_del(&vfid->list);
606
607 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
608
609 kfree(vfid);
610}
611
612static struct mlxsw_sp_port *
613mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
614 struct mlxsw_sp_vfid *vfid)
615{
616 struct mlxsw_sp_port *mlxsw_sp_vport;
617
618 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
619 if (!mlxsw_sp_vport)
620 return NULL;
621
622 /* dev will be set correctly after the VLAN device is linked
623 * with the real device. In case of bridge SELF invocation, dev
624 * will remain as is.
625 */
626 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
627 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
628 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
629 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
Ido Schimmel272c4472015-12-15 16:03:47 +0100630 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
631 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100632 mlxsw_sp_vport->vport.vfid = vfid;
633 mlxsw_sp_vport->vport.vid = vfid->vid;
634
635 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
636
637 return mlxsw_sp_vport;
638}
639
640static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
641{
642 list_del(&mlxsw_sp_vport->vport.list);
643 kfree(mlxsw_sp_vport);
644}
645
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200646int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
647 u16 vid)
648{
649 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
650 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100651 struct mlxsw_sp_port *mlxsw_sp_vport;
652 struct mlxsw_sp_vfid *vfid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200653 int err;
654
655 /* VLAN 0 is added to HW filter when device goes up, but it is
656 * reserved in our case, so simply return.
657 */
658 if (!vid)
659 return 0;
660
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100661 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200662 netdev_warn(dev, "VID=%d already configured\n", vid);
663 return 0;
664 }
665
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100666 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
667 if (!vfid) {
668 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
669 if (IS_ERR(vfid)) {
670 netdev_err(dev, "Failed to create vFID for VID=%d\n",
671 vid);
672 return PTR_ERR(vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200673 }
674 }
675
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100676 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
677 if (!mlxsw_sp_vport) {
678 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
679 err = -ENOMEM;
680 goto err_port_vport_create;
681 }
682
683 if (!vfid->nr_vports) {
684 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
Ido Schimmel19ae6122015-12-15 16:03:39 +0100685 true, false);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100686 if (err) {
687 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
688 vfid->vfid);
689 goto err_vport_flood_set;
690 }
691 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200692
693 /* When adding the first VLAN interface on a bridged port we need to
694 * transition all the active 802.1Q bridge VLANs to use explicit
695 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
696 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100697 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200698 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
699 if (err) {
700 netdev_err(dev, "Failed to set to Virtual mode\n");
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100701 goto err_port_vp_mode_trans;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200702 }
703 }
704
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100705 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200706 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100707 true,
708 mlxsw_sp_vfid_to_fid(vfid->vfid),
709 vid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200710 if (err) {
711 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100712 vid, vfid->vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200713 goto err_port_vid_to_fid_set;
714 }
715
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100716 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200717 if (err) {
718 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
719 goto err_port_vid_learning_set;
720 }
721
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100722 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200723 if (err) {
724 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
725 vid);
726 goto err_port_add_vid;
727 }
728
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100729 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200730 MLXSW_REG_SPMS_STATE_FORWARDING);
731 if (err) {
732 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
733 goto err_port_stp_state_set;
734 }
735
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100736 vfid->nr_vports++;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200737
738 return 0;
739
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200740err_port_stp_state_set:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100741 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200742err_port_add_vid:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100743 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200744err_port_vid_learning_set:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100745 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200746 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100747 mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200748err_port_vid_to_fid_set:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100749 if (list_is_singular(&mlxsw_sp_port->vports_list))
750 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
751err_port_vp_mode_trans:
752 if (!vfid->nr_vports)
Ido Schimmel19ae6122015-12-15 16:03:39 +0100753 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
754 false);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100755err_vport_flood_set:
756 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
757err_port_vport_create:
758 if (!vfid->nr_vports)
759 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200760 return err;
761}
762
763int mlxsw_sp_port_kill_vid(struct net_device *dev,
764 __be16 __always_unused proto, u16 vid)
765{
766 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100767 struct mlxsw_sp_port *mlxsw_sp_vport;
768 struct mlxsw_sp_vfid *vfid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200769 int err;
770
771 /* VLAN 0 is removed from HW filter when device goes down, but
772 * it is reserved in our case, so simply return.
773 */
774 if (!vid)
775 return 0;
776
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100777 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
778 if (!mlxsw_sp_vport) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200779 netdev_warn(dev, "VID=%d does not exist\n", vid);
780 return 0;
781 }
782
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100783 vfid = mlxsw_sp_vport->vport.vfid;
784
785 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200786 MLXSW_REG_SPMS_STATE_DISCARDING);
787 if (err) {
788 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
789 return err;
790 }
791
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100792 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200793 if (err) {
794 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
795 vid);
796 return err;
797 }
798
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100799 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200800 if (err) {
801 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
802 return err;
803 }
804
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100805 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200806 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100807 false,
808 mlxsw_sp_vfid_to_fid(vfid->vfid),
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200809 vid);
810 if (err) {
811 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100812 vid, vfid->vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200813 return err;
814 }
815
816 /* When removing the last VLAN interface on a bridged port we need to
817 * transition all active 802.1Q bridge VLANs to use VID to FID
818 * mappings and set port's mode to VLAN mode.
819 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100820 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200821 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
822 if (err) {
823 netdev_err(dev, "Failed to set to VLAN mode\n");
824 return err;
825 }
826 }
827
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100828 vfid->nr_vports--;
829 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
830
831 /* Destroy the vFID if no vPorts are assigned to it anymore. */
832 if (!vfid->nr_vports)
833 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200834
835 return 0;
836}
837
838static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
839 .ndo_open = mlxsw_sp_port_open,
840 .ndo_stop = mlxsw_sp_port_stop,
841 .ndo_start_xmit = mlxsw_sp_port_xmit,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100842 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200843 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
844 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
845 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
846 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
847 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
848 .ndo_fdb_add = switchdev_port_fdb_add,
849 .ndo_fdb_del = switchdev_port_fdb_del,
850 .ndo_fdb_dump = switchdev_port_fdb_dump,
851 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
852 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
853 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
854};
855
856static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
857 struct ethtool_drvinfo *drvinfo)
858{
859 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
860 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
861
862 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
863 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
864 sizeof(drvinfo->version));
865 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
866 "%d.%d.%d",
867 mlxsw_sp->bus_info->fw_rev.major,
868 mlxsw_sp->bus_info->fw_rev.minor,
869 mlxsw_sp->bus_info->fw_rev.subminor);
870 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
871 sizeof(drvinfo->bus_info));
872}
873
874struct mlxsw_sp_port_hw_stats {
875 char str[ETH_GSTRING_LEN];
876 u64 (*getter)(char *payload);
877};
878
879static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
880 {
881 .str = "a_frames_transmitted_ok",
882 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
883 },
884 {
885 .str = "a_frames_received_ok",
886 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
887 },
888 {
889 .str = "a_frame_check_sequence_errors",
890 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
891 },
892 {
893 .str = "a_alignment_errors",
894 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
895 },
896 {
897 .str = "a_octets_transmitted_ok",
898 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
899 },
900 {
901 .str = "a_octets_received_ok",
902 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
903 },
904 {
905 .str = "a_multicast_frames_xmitted_ok",
906 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
907 },
908 {
909 .str = "a_broadcast_frames_xmitted_ok",
910 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
911 },
912 {
913 .str = "a_multicast_frames_received_ok",
914 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
915 },
916 {
917 .str = "a_broadcast_frames_received_ok",
918 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
919 },
920 {
921 .str = "a_in_range_length_errors",
922 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
923 },
924 {
925 .str = "a_out_of_range_length_field",
926 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
927 },
928 {
929 .str = "a_frame_too_long_errors",
930 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
931 },
932 {
933 .str = "a_symbol_error_during_carrier",
934 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
935 },
936 {
937 .str = "a_mac_control_frames_transmitted",
938 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
939 },
940 {
941 .str = "a_mac_control_frames_received",
942 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
943 },
944 {
945 .str = "a_unsupported_opcodes_received",
946 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
947 },
948 {
949 .str = "a_pause_mac_ctrl_frames_received",
950 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
951 },
952 {
953 .str = "a_pause_mac_ctrl_frames_xmitted",
954 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
955 },
956};
957
958#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
959
960static void mlxsw_sp_port_get_strings(struct net_device *dev,
961 u32 stringset, u8 *data)
962{
963 u8 *p = data;
964 int i;
965
966 switch (stringset) {
967 case ETH_SS_STATS:
968 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
969 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
970 ETH_GSTRING_LEN);
971 p += ETH_GSTRING_LEN;
972 }
973 break;
974 }
975}
976
Ido Schimmel3a66ee32015-11-27 13:45:55 +0100977static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
978 enum ethtool_phys_id_state state)
979{
980 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
982 char mlcr_pl[MLXSW_REG_MLCR_LEN];
983 bool active;
984
985 switch (state) {
986 case ETHTOOL_ID_ACTIVE:
987 active = true;
988 break;
989 case ETHTOOL_ID_INACTIVE:
990 active = false;
991 break;
992 default:
993 return -EOPNOTSUPP;
994 }
995
996 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
998}
999
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001000static void mlxsw_sp_port_get_stats(struct net_device *dev,
1001 struct ethtool_stats *stats, u64 *data)
1002{
1003 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1004 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1005 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1006 int i;
1007 int err;
1008
1009 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1010 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1011 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1012 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1013}
1014
1015static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1016{
1017 switch (sset) {
1018 case ETH_SS_STATS:
1019 return MLXSW_SP_PORT_HW_STATS_LEN;
1020 default:
1021 return -EOPNOTSUPP;
1022 }
1023}
1024
1025struct mlxsw_sp_port_link_mode {
1026 u32 mask;
1027 u32 supported;
1028 u32 advertised;
1029 u32 speed;
1030};
1031
1032static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1033 {
1034 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1035 .supported = SUPPORTED_100baseT_Full,
1036 .advertised = ADVERTISED_100baseT_Full,
1037 .speed = 100,
1038 },
1039 {
1040 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1041 .speed = 100,
1042 },
1043 {
1044 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1045 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1046 .supported = SUPPORTED_1000baseKX_Full,
1047 .advertised = ADVERTISED_1000baseKX_Full,
1048 .speed = 1000,
1049 },
1050 {
1051 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1052 .supported = SUPPORTED_10000baseT_Full,
1053 .advertised = ADVERTISED_10000baseT_Full,
1054 .speed = 10000,
1055 },
1056 {
1057 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1058 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1059 .supported = SUPPORTED_10000baseKX4_Full,
1060 .advertised = ADVERTISED_10000baseKX4_Full,
1061 .speed = 10000,
1062 },
1063 {
1064 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1065 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1066 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1067 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1068 .supported = SUPPORTED_10000baseKR_Full,
1069 .advertised = ADVERTISED_10000baseKR_Full,
1070 .speed = 10000,
1071 },
1072 {
1073 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1074 .supported = SUPPORTED_20000baseKR2_Full,
1075 .advertised = ADVERTISED_20000baseKR2_Full,
1076 .speed = 20000,
1077 },
1078 {
1079 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1080 .supported = SUPPORTED_40000baseCR4_Full,
1081 .advertised = ADVERTISED_40000baseCR4_Full,
1082 .speed = 40000,
1083 },
1084 {
1085 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1086 .supported = SUPPORTED_40000baseKR4_Full,
1087 .advertised = ADVERTISED_40000baseKR4_Full,
1088 .speed = 40000,
1089 },
1090 {
1091 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1092 .supported = SUPPORTED_40000baseSR4_Full,
1093 .advertised = ADVERTISED_40000baseSR4_Full,
1094 .speed = 40000,
1095 },
1096 {
1097 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1098 .supported = SUPPORTED_40000baseLR4_Full,
1099 .advertised = ADVERTISED_40000baseLR4_Full,
1100 .speed = 40000,
1101 },
1102 {
1103 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1104 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1105 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1106 .speed = 25000,
1107 },
1108 {
1109 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1110 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1111 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1112 .speed = 50000,
1113 },
1114 {
1115 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1116 .supported = SUPPORTED_56000baseKR4_Full,
1117 .advertised = ADVERTISED_56000baseKR4_Full,
1118 .speed = 56000,
1119 },
1120 {
1121 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1122 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1123 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1124 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1125 .speed = 100000,
1126 },
1127};
1128
1129#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1130
1131static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1132{
1133 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1134 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1135 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1136 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1137 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1138 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1139 return SUPPORTED_FIBRE;
1140
1141 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1142 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1143 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1144 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1145 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1146 return SUPPORTED_Backplane;
1147 return 0;
1148}
1149
1150static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1151{
1152 u32 modes = 0;
1153 int i;
1154
1155 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1156 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1157 modes |= mlxsw_sp_port_link_mode[i].supported;
1158 }
1159 return modes;
1160}
1161
1162static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1163{
1164 u32 modes = 0;
1165 int i;
1166
1167 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1168 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1169 modes |= mlxsw_sp_port_link_mode[i].advertised;
1170 }
1171 return modes;
1172}
1173
1174static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1175 struct ethtool_cmd *cmd)
1176{
1177 u32 speed = SPEED_UNKNOWN;
1178 u8 duplex = DUPLEX_UNKNOWN;
1179 int i;
1180
1181 if (!carrier_ok)
1182 goto out;
1183
1184 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1185 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1186 speed = mlxsw_sp_port_link_mode[i].speed;
1187 duplex = DUPLEX_FULL;
1188 break;
1189 }
1190 }
1191out:
1192 ethtool_cmd_speed_set(cmd, speed);
1193 cmd->duplex = duplex;
1194}
1195
1196static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1197{
1198 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1199 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1200 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1201 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1202 return PORT_FIBRE;
1203
1204 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1205 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1206 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1207 return PORT_DA;
1208
1209 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1210 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1211 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1212 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1213 return PORT_NONE;
1214
1215 return PORT_OTHER;
1216}
1217
1218static int mlxsw_sp_port_get_settings(struct net_device *dev,
1219 struct ethtool_cmd *cmd)
1220{
1221 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1222 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1223 char ptys_pl[MLXSW_REG_PTYS_LEN];
1224 u32 eth_proto_cap;
1225 u32 eth_proto_admin;
1226 u32 eth_proto_oper;
1227 int err;
1228
1229 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1230 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1231 if (err) {
1232 netdev_err(dev, "Failed to get proto");
1233 return err;
1234 }
1235 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1236 &eth_proto_admin, &eth_proto_oper);
1237
1238 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1239 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1240 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1241 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1242 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1243 eth_proto_oper, cmd);
1244
1245 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1246 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1247 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1248
1249 cmd->transceiver = XCVR_INTERNAL;
1250 return 0;
1251}
1252
1253static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1254{
1255 u32 ptys_proto = 0;
1256 int i;
1257
1258 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1259 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1260 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1261 }
1262 return ptys_proto;
1263}
1264
1265static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1266{
1267 u32 ptys_proto = 0;
1268 int i;
1269
1270 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1271 if (speed == mlxsw_sp_port_link_mode[i].speed)
1272 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1273 }
1274 return ptys_proto;
1275}
1276
1277static int mlxsw_sp_port_set_settings(struct net_device *dev,
1278 struct ethtool_cmd *cmd)
1279{
1280 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1281 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1282 char ptys_pl[MLXSW_REG_PTYS_LEN];
1283 u32 speed;
1284 u32 eth_proto_new;
1285 u32 eth_proto_cap;
1286 u32 eth_proto_admin;
1287 bool is_up;
1288 int err;
1289
1290 speed = ethtool_cmd_speed(cmd);
1291
1292 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1293 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1294 mlxsw_sp_to_ptys_speed(speed);
1295
1296 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1297 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1298 if (err) {
1299 netdev_err(dev, "Failed to get proto");
1300 return err;
1301 }
1302 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1303
1304 eth_proto_new = eth_proto_new & eth_proto_cap;
1305 if (!eth_proto_new) {
1306 netdev_err(dev, "Not supported proto admin requested");
1307 return -EINVAL;
1308 }
1309 if (eth_proto_new == eth_proto_admin)
1310 return 0;
1311
1312 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1313 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1314 if (err) {
1315 netdev_err(dev, "Failed to set proto admin");
1316 return err;
1317 }
1318
1319 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1320 if (err) {
1321 netdev_err(dev, "Failed to get oper status");
1322 return err;
1323 }
1324 if (!is_up)
1325 return 0;
1326
1327 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1328 if (err) {
1329 netdev_err(dev, "Failed to set admin status");
1330 return err;
1331 }
1332
1333 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1334 if (err) {
1335 netdev_err(dev, "Failed to set admin status");
1336 return err;
1337 }
1338
1339 return 0;
1340}
1341
1342static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1343 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1344 .get_link = ethtool_op_get_link,
1345 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001346 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001347 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1348 .get_sset_count = mlxsw_sp_port_get_sset_count,
1349 .get_settings = mlxsw_sp_port_get_settings,
1350 .set_settings = mlxsw_sp_port_set_settings,
1351};
1352
1353static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1354{
Jiri Pirkoc4745502016-02-26 17:32:26 +01001355 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001356 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirkoc4745502016-02-26 17:32:26 +01001357 struct devlink_port *devlink_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001358 struct net_device *dev;
1359 bool usable;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001360 size_t bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001361 int err;
1362
1363 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1364 if (!dev)
1365 return -ENOMEM;
1366 mlxsw_sp_port = netdev_priv(dev);
1367 mlxsw_sp_port->dev = dev;
1368 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1369 mlxsw_sp_port->local_port = local_port;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001370 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1371 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1372 if (!mlxsw_sp_port->active_vlans) {
1373 err = -ENOMEM;
1374 goto err_port_active_vlans_alloc;
1375 }
Elad Razfc1273a2016-01-06 13:01:11 +01001376 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1377 if (!mlxsw_sp_port->untagged_vlans) {
1378 err = -ENOMEM;
1379 goto err_port_untagged_vlans_alloc;
1380 }
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001381 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001382
1383 mlxsw_sp_port->pcpu_stats =
1384 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1385 if (!mlxsw_sp_port->pcpu_stats) {
1386 err = -ENOMEM;
1387 goto err_alloc_stats;
1388 }
1389
1390 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1391 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1392
1393 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1394 if (err) {
1395 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1396 mlxsw_sp_port->local_port);
1397 goto err_dev_addr_init;
1398 }
1399
1400 netif_carrier_off(dev);
1401
1402 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1403 NETIF_F_HW_VLAN_CTAG_FILTER;
1404
1405 /* Each packet needs to have a Tx header (metadata) on top all other
1406 * headers.
1407 */
1408 dev->hard_header_len += MLXSW_TXHDR_LEN;
1409
1410 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
1411 if (err) {
1412 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
1413 mlxsw_sp_port->local_port);
1414 goto err_port_module_check;
1415 }
1416
1417 if (!usable) {
1418 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1419 mlxsw_sp_port->local_port);
1420 goto port_not_usable;
1421 }
1422
Jiri Pirkoc4745502016-02-26 17:32:26 +01001423 devlink_port = &mlxsw_sp_port->devlink_port;
1424 err = devlink_port_register(devlink, devlink_port, local_port);
1425 if (err) {
1426 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1427 mlxsw_sp_port->local_port);
1428 goto err_devlink_port_register;
1429 }
1430
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001431 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1432 if (err) {
1433 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1434 mlxsw_sp_port->local_port);
1435 goto err_port_system_port_mapping_set;
1436 }
1437
1438 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1439 if (err) {
1440 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1441 mlxsw_sp_port->local_port);
1442 goto err_port_swid_set;
1443 }
1444
1445 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1446 if (err) {
1447 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1448 mlxsw_sp_port->local_port);
1449 goto err_port_mtu_set;
1450 }
1451
1452 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1453 if (err)
1454 goto err_port_admin_status_set;
1455
1456 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1457 if (err) {
1458 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1459 mlxsw_sp_port->local_port);
1460 goto err_port_buffers_init;
1461 }
1462
1463 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1464 err = register_netdev(dev);
1465 if (err) {
1466 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1467 mlxsw_sp_port->local_port);
1468 goto err_register_netdev;
1469 }
1470
Jiri Pirkoc4745502016-02-26 17:32:26 +01001471 devlink_port_type_eth_set(devlink_port, dev);
1472
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001473 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1474 if (err)
1475 goto err_port_vlan_init;
1476
1477 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1478 return 0;
1479
1480err_port_vlan_init:
1481 unregister_netdev(dev);
1482err_register_netdev:
1483err_port_buffers_init:
1484err_port_admin_status_set:
1485err_port_mtu_set:
1486err_port_swid_set:
1487err_port_system_port_mapping_set:
Jiri Pirkoc4745502016-02-26 17:32:26 +01001488 devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1489err_devlink_port_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001490port_not_usable:
1491err_port_module_check:
1492err_dev_addr_init:
1493 free_percpu(mlxsw_sp_port->pcpu_stats);
1494err_alloc_stats:
Elad Razfc1273a2016-01-06 13:01:11 +01001495 kfree(mlxsw_sp_port->untagged_vlans);
1496err_port_untagged_vlans_alloc:
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001497 kfree(mlxsw_sp_port->active_vlans);
1498err_port_active_vlans_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001499 free_netdev(dev);
1500 return err;
1501}
1502
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001503static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001504{
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001505 struct net_device *dev = mlxsw_sp_port->dev;
1506 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001507
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001508 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1509 &mlxsw_sp_port->vports_list, vport.list) {
1510 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1511
1512 /* vPorts created for VLAN devices should already be gone
1513 * by now, since we unregistered the port netdev.
1514 */
1515 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1516 mlxsw_sp_port_kill_vid(dev, 0, vid);
1517 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001518}
1519
1520static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1521{
1522 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
Jiri Pirkoc4745502016-02-26 17:32:26 +01001523 struct devlink_port *devlink_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001524
1525 if (!mlxsw_sp_port)
1526 return;
Jiri Pirkoc4745502016-02-26 17:32:26 +01001527 devlink_port = &mlxsw_sp_port->devlink_port;
1528 devlink_port_type_clear(devlink_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001529 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Jiri Pirkoc4745502016-02-26 17:32:26 +01001530 devlink_port_unregister(devlink_port);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001531 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001532 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1533 free_percpu(mlxsw_sp_port->pcpu_stats);
Elad Razfc1273a2016-01-06 13:01:11 +01001534 kfree(mlxsw_sp_port->untagged_vlans);
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001535 kfree(mlxsw_sp_port->active_vlans);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001536 free_netdev(mlxsw_sp_port->dev);
1537}
1538
1539static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1540{
1541 int i;
1542
1543 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1544 mlxsw_sp_port_remove(mlxsw_sp, i);
1545 kfree(mlxsw_sp->ports);
1546}
1547
1548static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1549{
1550 size_t alloc_size;
1551 int i;
1552 int err;
1553
1554 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1555 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1556 if (!mlxsw_sp->ports)
1557 return -ENOMEM;
1558
1559 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1560 err = mlxsw_sp_port_create(mlxsw_sp, i);
1561 if (err)
1562 goto err_port_create;
1563 }
1564 return 0;
1565
1566err_port_create:
1567 for (i--; i >= 1; i--)
1568 mlxsw_sp_port_remove(mlxsw_sp, i);
1569 kfree(mlxsw_sp->ports);
1570 return err;
1571}
1572
1573static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1574 char *pude_pl, void *priv)
1575{
1576 struct mlxsw_sp *mlxsw_sp = priv;
1577 struct mlxsw_sp_port *mlxsw_sp_port;
1578 enum mlxsw_reg_pude_oper_status status;
1579 u8 local_port;
1580
1581 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1582 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1583 if (!mlxsw_sp_port) {
1584 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1585 local_port);
1586 return;
1587 }
1588
1589 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1590 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1591 netdev_info(mlxsw_sp_port->dev, "link up\n");
1592 netif_carrier_on(mlxsw_sp_port->dev);
1593 } else {
1594 netdev_info(mlxsw_sp_port->dev, "link down\n");
1595 netif_carrier_off(mlxsw_sp_port->dev);
1596 }
1597}
1598
1599static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1600 .func = mlxsw_sp_pude_event_func,
1601 .trap_id = MLXSW_TRAP_ID_PUDE,
1602};
1603
1604static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1605 enum mlxsw_event_trap_id trap_id)
1606{
1607 struct mlxsw_event_listener *el;
1608 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1609 int err;
1610
1611 switch (trap_id) {
1612 case MLXSW_TRAP_ID_PUDE:
1613 el = &mlxsw_sp_pude_event;
1614 break;
1615 }
1616 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1617 if (err)
1618 return err;
1619
1620 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1621 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1622 if (err)
1623 goto err_event_trap_set;
1624
1625 return 0;
1626
1627err_event_trap_set:
1628 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1629 return err;
1630}
1631
1632static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1633 enum mlxsw_event_trap_id trap_id)
1634{
1635 struct mlxsw_event_listener *el;
1636
1637 switch (trap_id) {
1638 case MLXSW_TRAP_ID_PUDE:
1639 el = &mlxsw_sp_pude_event;
1640 break;
1641 }
1642 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1643}
1644
1645static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1646 void *priv)
1647{
1648 struct mlxsw_sp *mlxsw_sp = priv;
1649 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1650 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1651
1652 if (unlikely(!mlxsw_sp_port)) {
1653 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1654 local_port);
1655 return;
1656 }
1657
1658 skb->dev = mlxsw_sp_port->dev;
1659
1660 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1661 u64_stats_update_begin(&pcpu_stats->syncp);
1662 pcpu_stats->rx_packets++;
1663 pcpu_stats->rx_bytes += skb->len;
1664 u64_stats_update_end(&pcpu_stats->syncp);
1665
1666 skb->protocol = eth_type_trans(skb, skb->dev);
1667 netif_receive_skb(skb);
1668}
1669
1670static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1671 {
1672 .func = mlxsw_sp_rx_listener_func,
1673 .local_port = MLXSW_PORT_DONT_CARE,
1674 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1675 },
1676 /* Traps for specific L2 packet types, not trapped as FDB MC */
1677 {
1678 .func = mlxsw_sp_rx_listener_func,
1679 .local_port = MLXSW_PORT_DONT_CARE,
1680 .trap_id = MLXSW_TRAP_ID_STP,
1681 },
1682 {
1683 .func = mlxsw_sp_rx_listener_func,
1684 .local_port = MLXSW_PORT_DONT_CARE,
1685 .trap_id = MLXSW_TRAP_ID_LACP,
1686 },
1687 {
1688 .func = mlxsw_sp_rx_listener_func,
1689 .local_port = MLXSW_PORT_DONT_CARE,
1690 .trap_id = MLXSW_TRAP_ID_EAPOL,
1691 },
1692 {
1693 .func = mlxsw_sp_rx_listener_func,
1694 .local_port = MLXSW_PORT_DONT_CARE,
1695 .trap_id = MLXSW_TRAP_ID_LLDP,
1696 },
1697 {
1698 .func = mlxsw_sp_rx_listener_func,
1699 .local_port = MLXSW_PORT_DONT_CARE,
1700 .trap_id = MLXSW_TRAP_ID_MMRP,
1701 },
1702 {
1703 .func = mlxsw_sp_rx_listener_func,
1704 .local_port = MLXSW_PORT_DONT_CARE,
1705 .trap_id = MLXSW_TRAP_ID_MVRP,
1706 },
1707 {
1708 .func = mlxsw_sp_rx_listener_func,
1709 .local_port = MLXSW_PORT_DONT_CARE,
1710 .trap_id = MLXSW_TRAP_ID_RPVST,
1711 },
1712 {
1713 .func = mlxsw_sp_rx_listener_func,
1714 .local_port = MLXSW_PORT_DONT_CARE,
1715 .trap_id = MLXSW_TRAP_ID_DHCP,
1716 },
1717 {
1718 .func = mlxsw_sp_rx_listener_func,
1719 .local_port = MLXSW_PORT_DONT_CARE,
1720 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1721 },
1722 {
1723 .func = mlxsw_sp_rx_listener_func,
1724 .local_port = MLXSW_PORT_DONT_CARE,
1725 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1726 },
1727 {
1728 .func = mlxsw_sp_rx_listener_func,
1729 .local_port = MLXSW_PORT_DONT_CARE,
1730 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1731 },
1732 {
1733 .func = mlxsw_sp_rx_listener_func,
1734 .local_port = MLXSW_PORT_DONT_CARE,
1735 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1736 },
1737 {
1738 .func = mlxsw_sp_rx_listener_func,
1739 .local_port = MLXSW_PORT_DONT_CARE,
1740 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1741 },
1742};
1743
1744static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1745{
1746 char htgt_pl[MLXSW_REG_HTGT_LEN];
1747 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1748 int i;
1749 int err;
1750
1751 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1752 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1753 if (err)
1754 return err;
1755
1756 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1757 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1758 if (err)
1759 return err;
1760
1761 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1762 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1763 &mlxsw_sp_rx_listener[i],
1764 mlxsw_sp);
1765 if (err)
1766 goto err_rx_listener_register;
1767
1768 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1769 mlxsw_sp_rx_listener[i].trap_id);
1770 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1771 if (err)
1772 goto err_rx_trap_set;
1773 }
1774 return 0;
1775
1776err_rx_trap_set:
1777 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1778 &mlxsw_sp_rx_listener[i],
1779 mlxsw_sp);
1780err_rx_listener_register:
1781 for (i--; i >= 0; i--) {
1782 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1783 mlxsw_sp_rx_listener[i].trap_id);
1784 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1785
1786 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1787 &mlxsw_sp_rx_listener[i],
1788 mlxsw_sp);
1789 }
1790 return err;
1791}
1792
1793static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1794{
1795 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1796 int i;
1797
1798 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1799 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1800 mlxsw_sp_rx_listener[i].trap_id);
1801 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1802
1803 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1804 &mlxsw_sp_rx_listener[i],
1805 mlxsw_sp);
1806 }
1807}
1808
1809static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1810 enum mlxsw_reg_sfgc_type type,
1811 enum mlxsw_reg_sfgc_bridge_type bridge_type)
1812{
1813 enum mlxsw_flood_table_type table_type;
1814 enum mlxsw_sp_flood_table flood_table;
1815 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1816
Ido Schimmel19ae6122015-12-15 16:03:39 +01001817 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001818 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
Ido Schimmel19ae6122015-12-15 16:03:39 +01001819 else
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001820 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
Ido Schimmel19ae6122015-12-15 16:03:39 +01001821
1822 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1823 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1824 else
1825 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001826
1827 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1828 flood_table);
1829 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1830}
1831
1832static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1833{
1834 int type, err;
1835
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001836 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1837 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1838 continue;
1839
1840 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1841 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1842 if (err)
1843 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001844
1845 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1846 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1847 if (err)
1848 return err;
1849 }
1850
1851 return 0;
1852}
1853
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001854static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
1855{
1856 char slcr_pl[MLXSW_REG_SLCR_LEN];
1857
1858 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
1859 MLXSW_REG_SLCR_LAG_HASH_DMAC |
1860 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
1861 MLXSW_REG_SLCR_LAG_HASH_VLANID |
1862 MLXSW_REG_SLCR_LAG_HASH_SIP |
1863 MLXSW_REG_SLCR_LAG_HASH_DIP |
1864 MLXSW_REG_SLCR_LAG_HASH_SPORT |
1865 MLXSW_REG_SLCR_LAG_HASH_DPORT |
1866 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
1867 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
1868}
1869
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001870static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1871 const struct mlxsw_bus_info *mlxsw_bus_info)
1872{
1873 struct mlxsw_sp *mlxsw_sp = priv;
1874 int err;
1875
1876 mlxsw_sp->core = mlxsw_core;
1877 mlxsw_sp->bus_info = mlxsw_bus_info;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001878 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01001879 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
Elad Raz3a49b4f2016-01-10 21:06:28 +01001880 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001881
1882 err = mlxsw_sp_base_mac_get(mlxsw_sp);
1883 if (err) {
1884 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1885 return err;
1886 }
1887
1888 err = mlxsw_sp_ports_create(mlxsw_sp);
1889 if (err) {
1890 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001891 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001892 }
1893
1894 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1895 if (err) {
1896 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1897 goto err_event_register;
1898 }
1899
1900 err = mlxsw_sp_traps_init(mlxsw_sp);
1901 if (err) {
1902 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1903 goto err_rx_listener_register;
1904 }
1905
1906 err = mlxsw_sp_flood_init(mlxsw_sp);
1907 if (err) {
1908 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1909 goto err_flood_init;
1910 }
1911
1912 err = mlxsw_sp_buffers_init(mlxsw_sp);
1913 if (err) {
1914 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1915 goto err_buffers_init;
1916 }
1917
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001918 err = mlxsw_sp_lag_init(mlxsw_sp);
1919 if (err) {
1920 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
1921 goto err_lag_init;
1922 }
1923
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001924 err = mlxsw_sp_switchdev_init(mlxsw_sp);
1925 if (err) {
1926 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1927 goto err_switchdev_init;
1928 }
1929
1930 return 0;
1931
1932err_switchdev_init:
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001933err_lag_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001934err_buffers_init:
1935err_flood_init:
1936 mlxsw_sp_traps_fini(mlxsw_sp);
1937err_rx_listener_register:
1938 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1939err_event_register:
1940 mlxsw_sp_ports_remove(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001941 return err;
1942}
1943
1944static void mlxsw_sp_fini(void *priv)
1945{
1946 struct mlxsw_sp *mlxsw_sp = priv;
1947
1948 mlxsw_sp_switchdev_fini(mlxsw_sp);
1949 mlxsw_sp_traps_fini(mlxsw_sp);
1950 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1951 mlxsw_sp_ports_remove(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001952}
1953
1954static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1955 .used_max_vepa_channels = 1,
1956 .max_vepa_channels = 0,
1957 .used_max_lag = 1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001958 .max_lag = MLXSW_SP_LAG_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001959 .used_max_port_per_lag = 1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001960 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001961 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01001962 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001963 .used_max_pgt = 1,
1964 .max_pgt = 0,
1965 .used_max_system_port = 1,
1966 .max_system_port = 64,
1967 .used_max_vlan_groups = 1,
1968 .max_vlan_groups = 127,
1969 .used_max_regions = 1,
1970 .max_regions = 400,
1971 .used_flood_tables = 1,
1972 .used_flood_mode = 1,
1973 .flood_mode = 3,
1974 .max_fid_offset_flood_tables = 2,
1975 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Ido Schimmel19ae6122015-12-15 16:03:39 +01001976 .max_fid_flood_tables = 2,
1977 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001978 .used_max_ib_mc = 1,
1979 .max_ib_mc = 0,
1980 .used_max_pkey = 1,
1981 .max_pkey = 0,
1982 .swid_config = {
1983 {
1984 .used_type = 1,
1985 .type = MLXSW_PORT_SWID_TYPE_ETH,
1986 }
1987 },
1988};
1989
1990static struct mlxsw_driver mlxsw_sp_driver = {
1991 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
1992 .owner = THIS_MODULE,
1993 .priv_size = sizeof(struct mlxsw_sp),
1994 .init = mlxsw_sp_init,
1995 .fini = mlxsw_sp_fini,
1996 .txhdr_construct = mlxsw_sp_txhdr_construct,
1997 .txhdr_len = MLXSW_TXHDR_LEN,
1998 .profile = &mlxsw_sp_config_profile,
1999};
2000
Ido Schimmel039c49a2016-01-27 15:20:18 +01002001static int
2002mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2003{
2004 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2005 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2006
2007 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2008 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2009
2010 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2011}
2012
2013static int
2014mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2015 u16 fid)
2016{
2017 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2018 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2019
2020 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2021 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2022 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2023 mlxsw_sp_port->local_port);
2024
2025 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2026}
2027
2028static int
2029mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2030{
2031 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2032 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2033
2034 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2035 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2036
2037 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2038}
2039
2040static int
2041mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2042 u16 fid)
2043{
2044 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2045 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2046
2047 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2048 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2049 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2050
2051 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2052}
2053
2054static int
2055__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2056{
2057 int err, last_err = 0;
2058 u16 vid;
2059
2060 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2061 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2062 if (err)
2063 last_err = err;
2064 }
2065
2066 return last_err;
2067}
2068
2069static int
2070__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2071{
2072 int err, last_err = 0;
2073 u16 vid;
2074
2075 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2076 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2077 if (err)
2078 last_err = err;
2079 }
2080
2081 return last_err;
2082}
2083
2084static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2085{
2086 if (!list_empty(&mlxsw_sp_port->vports_list))
2087 if (mlxsw_sp_port->lagged)
2088 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2089 else
2090 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2091 else
2092 if (mlxsw_sp_port->lagged)
2093 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2094 else
2095 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2096}
2097
2098static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2099{
2100 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2101 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2102
2103 if (mlxsw_sp_vport->lagged)
2104 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2105 fid);
2106 else
2107 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2108}
2109
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002110static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2111{
2112 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2113}
2114
2115static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2116{
2117 struct net_device *dev = mlxsw_sp_port->dev;
2118 int err;
2119
2120 /* When port is not bridged untagged packets are tagged with
2121 * PVID=VID=1, thereby creating an implicit VLAN interface in
2122 * the device. Remove it and let bridge code take care of its
2123 * own VLANs.
2124 */
2125 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01002126 if (err)
2127 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002128
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01002129 mlxsw_sp_port->learning = 1;
2130 mlxsw_sp_port->learning_sync = 1;
2131 mlxsw_sp_port->uc_flood = 1;
2132 mlxsw_sp_port->bridged = 1;
2133
2134 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002135}
2136
Ido Schimmel039c49a2016-01-27 15:20:18 +01002137static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2138 bool flush_fdb)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002139{
2140 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01002141
Ido Schimmel039c49a2016-01-27 15:20:18 +01002142 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2143 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2144
Ido Schimmel28a01d22016-02-18 11:30:02 +01002145 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2146
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01002147 mlxsw_sp_port->learning = 0;
2148 mlxsw_sp_port->learning_sync = 0;
2149 mlxsw_sp_port->uc_flood = 0;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01002150 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002151
2152 /* Add implicit VLAN interface in the device, so that untagged
2153 * packets will be classified to the default vFID.
2154 */
Ido Schimmel5a8f4522016-01-04 10:42:25 +01002155 return mlxsw_sp_port_add_vid(dev, 0, 1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002156}
2157
2158static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2159 struct net_device *br_dev)
2160{
2161 return !mlxsw_sp->master_bridge.dev ||
2162 mlxsw_sp->master_bridge.dev == br_dev;
2163}
2164
2165static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2166 struct net_device *br_dev)
2167{
2168 mlxsw_sp->master_bridge.dev = br_dev;
2169 mlxsw_sp->master_bridge.ref_count++;
2170}
2171
2172static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2173 struct net_device *br_dev)
2174{
2175 if (--mlxsw_sp->master_bridge.ref_count == 0)
2176 mlxsw_sp->master_bridge.dev = NULL;
2177}
2178
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002179static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002180{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002181 char sldr_pl[MLXSW_REG_SLDR_LEN];
2182
2183 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2184 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2185}
2186
2187static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2188{
2189 char sldr_pl[MLXSW_REG_SLDR_LEN];
2190
2191 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2192 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2193}
2194
2195static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2196 u16 lag_id, u8 port_index)
2197{
2198 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2199 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2200
2201 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2202 lag_id, port_index);
2203 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2204}
2205
2206static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2207 u16 lag_id)
2208{
2209 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2210 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2211
2212 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2213 lag_id);
2214 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2215}
2216
2217static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2218 u16 lag_id)
2219{
2220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2221 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2222
2223 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2224 lag_id);
2225 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2226}
2227
2228static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2229 u16 lag_id)
2230{
2231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2232 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2233
2234 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2235 lag_id);
2236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2237}
2238
2239static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2240 struct net_device *lag_dev,
2241 u16 *p_lag_id)
2242{
2243 struct mlxsw_sp_upper *lag;
2244 int free_lag_id = -1;
2245 int i;
2246
2247 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2248 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2249 if (lag->ref_count) {
2250 if (lag->dev == lag_dev) {
2251 *p_lag_id = i;
2252 return 0;
2253 }
2254 } else if (free_lag_id < 0) {
2255 free_lag_id = i;
2256 }
2257 }
2258 if (free_lag_id < 0)
2259 return -EBUSY;
2260 *p_lag_id = free_lag_id;
2261 return 0;
2262}
2263
2264static bool
2265mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2266 struct net_device *lag_dev,
2267 struct netdev_lag_upper_info *lag_upper_info)
2268{
2269 u16 lag_id;
2270
2271 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2272 return false;
2273 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2274 return false;
2275 return true;
2276}
2277
2278static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2279 u16 lag_id, u8 *p_port_index)
2280{
2281 int i;
2282
2283 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2284 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2285 *p_port_index = i;
2286 return 0;
2287 }
2288 }
2289 return -EBUSY;
2290}
2291
2292static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2293 struct net_device *lag_dev)
2294{
2295 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2296 struct mlxsw_sp_upper *lag;
2297 u16 lag_id;
2298 u8 port_index;
2299 int err;
2300
2301 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2302 if (err)
2303 return err;
2304 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2305 if (!lag->ref_count) {
2306 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2307 if (err)
2308 return err;
2309 lag->dev = lag_dev;
2310 }
2311
2312 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2313 if (err)
2314 return err;
2315 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2316 if (err)
2317 goto err_col_port_add;
2318 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2319 if (err)
2320 goto err_col_port_enable;
2321
2322 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2323 mlxsw_sp_port->local_port);
2324 mlxsw_sp_port->lag_id = lag_id;
2325 mlxsw_sp_port->lagged = 1;
2326 lag->ref_count++;
2327 return 0;
2328
2329err_col_port_add:
2330 if (!lag->ref_count)
2331 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2332err_col_port_enable:
2333 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2334 return err;
2335}
2336
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002337static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel039c49a2016-01-27 15:20:18 +01002338 struct net_device *br_dev,
2339 bool flush_fdb);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002340
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002341static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2342 struct net_device *lag_dev)
2343{
2344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002345 struct mlxsw_sp_port *mlxsw_sp_vport;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002346 struct mlxsw_sp_upper *lag;
2347 u16 lag_id = mlxsw_sp_port->lag_id;
2348 int err;
2349
2350 if (!mlxsw_sp_port->lagged)
2351 return 0;
2352 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2353 WARN_ON(lag->ref_count == 0);
2354
2355 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2356 if (err)
2357 return err;
Dan Carpenter82a06422015-12-09 13:33:51 +03002358 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002359 if (err)
2360 return err;
2361
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002362 /* In case we leave a LAG device that has bridges built on top,
2363 * then their teardown sequence is never issued and we need to
2364 * invoke the necessary cleanup routines ourselves.
2365 */
2366 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2367 vport.list) {
2368 struct net_device *br_dev;
2369
2370 if (!mlxsw_sp_vport->bridged)
2371 continue;
2372
2373 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
Ido Schimmel039c49a2016-01-27 15:20:18 +01002374 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002375 }
2376
2377 if (mlxsw_sp_port->bridged) {
2378 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
Ido Schimmel039c49a2016-01-27 15:20:18 +01002379 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002380
2381 if (lag->ref_count == 1)
2382 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2383 }
2384
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002385 if (lag->ref_count == 1) {
Ido Schimmel039c49a2016-01-27 15:20:18 +01002386 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2387 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002388 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2389 if (err)
2390 return err;
2391 }
2392
2393 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2394 mlxsw_sp_port->local_port);
2395 mlxsw_sp_port->lagged = 0;
2396 lag->ref_count--;
2397 return 0;
2398}
2399
Jiri Pirko74581202015-12-03 12:12:30 +01002400static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2401 u16 lag_id)
2402{
2403 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2404 char sldr_pl[MLXSW_REG_SLDR_LEN];
2405
2406 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2407 mlxsw_sp_port->local_port);
2408 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2409}
2410
2411static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2412 u16 lag_id)
2413{
2414 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2415 char sldr_pl[MLXSW_REG_SLDR_LEN];
2416
2417 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2418 mlxsw_sp_port->local_port);
2419 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2420}
2421
2422static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2423 bool lag_tx_enabled)
2424{
2425 if (lag_tx_enabled)
2426 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2427 mlxsw_sp_port->lag_id);
2428 else
2429 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2430 mlxsw_sp_port->lag_id);
2431}
2432
2433static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2434 struct netdev_lag_lower_state_info *info)
2435{
2436 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2437}
2438
Ido Schimmel9589a7b52015-12-15 16:03:43 +01002439static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2440 struct net_device *vlan_dev)
2441{
2442 struct mlxsw_sp_port *mlxsw_sp_vport;
2443 u16 vid = vlan_dev_vlan_id(vlan_dev);
2444
2445 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2446 if (!mlxsw_sp_vport) {
2447 WARN_ON(!mlxsw_sp_vport);
2448 return -EINVAL;
2449 }
2450
2451 mlxsw_sp_vport->dev = vlan_dev;
2452
2453 return 0;
2454}
2455
2456static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2457 struct net_device *vlan_dev)
2458{
2459 struct mlxsw_sp_port *mlxsw_sp_vport;
2460 u16 vid = vlan_dev_vlan_id(vlan_dev);
2461
2462 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2463 if (!mlxsw_sp_vport) {
2464 WARN_ON(!mlxsw_sp_vport);
2465 return -EINVAL;
2466 }
2467
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002468 /* When removing a VLAN device while still bridged we should first
2469 * remove it from the bridge, as we receive the bridge's notification
2470 * when the vPort is already gone.
2471 */
2472 if (mlxsw_sp_vport->bridged) {
2473 struct net_device *br_dev;
2474
2475 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
Ido Schimmel039c49a2016-01-27 15:20:18 +01002476 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002477 }
2478
Ido Schimmel9589a7b52015-12-15 16:03:43 +01002479 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2480
2481 return 0;
2482}
2483
Jiri Pirko74581202015-12-03 12:12:30 +01002484static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2485 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002486{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002487 struct netdev_notifier_changeupper_info *info;
2488 struct mlxsw_sp_port *mlxsw_sp_port;
2489 struct net_device *upper_dev;
2490 struct mlxsw_sp *mlxsw_sp;
2491 int err;
2492
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002493 mlxsw_sp_port = netdev_priv(dev);
2494 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2495 info = ptr;
2496
2497 switch (event) {
2498 case NETDEV_PRECHANGEUPPER:
2499 upper_dev = info->upper_dev;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002500 if (!info->master || !info->linking)
2501 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002502 /* HW limitation forbids to put ports to multiple bridges. */
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002503 if (netif_is_bridge_master(upper_dev) &&
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002504 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2505 return NOTIFY_BAD;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002506 if (netif_is_lag_master(upper_dev) &&
2507 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2508 info->upper_info))
2509 return NOTIFY_BAD;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002510 break;
2511 case NETDEV_CHANGEUPPER:
2512 upper_dev = info->upper_dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01002513 if (is_vlan_dev(upper_dev)) {
2514 if (info->linking) {
2515 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2516 upper_dev);
2517 if (err) {
2518 netdev_err(dev, "Failed to link VLAN device\n");
2519 return NOTIFY_BAD;
2520 }
2521 } else {
2522 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2523 upper_dev);
2524 if (err) {
2525 netdev_err(dev, "Failed to unlink VLAN device\n");
2526 return NOTIFY_BAD;
2527 }
2528 }
2529 } else if (netif_is_bridge_master(upper_dev)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002530 if (info->linking) {
2531 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
Ido Schimmel78124072016-01-04 10:42:24 +01002532 if (err) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002533 netdev_err(dev, "Failed to join bridge\n");
Ido Schimmel78124072016-01-04 10:42:24 +01002534 return NOTIFY_BAD;
2535 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002536 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002537 } else {
Ido Schimmel039c49a2016-01-27 15:20:18 +01002538 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2539 true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002540 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
Ido Schimmel78124072016-01-04 10:42:24 +01002541 if (err) {
2542 netdev_err(dev, "Failed to leave bridge\n");
2543 return NOTIFY_BAD;
2544 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002545 }
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002546 } else if (netif_is_lag_master(upper_dev)) {
2547 if (info->linking) {
2548 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2549 upper_dev);
2550 if (err) {
2551 netdev_err(dev, "Failed to join link aggregation\n");
2552 return NOTIFY_BAD;
2553 }
2554 } else {
2555 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2556 upper_dev);
2557 if (err) {
2558 netdev_err(dev, "Failed to leave link aggregation\n");
2559 return NOTIFY_BAD;
2560 }
2561 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002562 }
2563 break;
2564 }
2565
2566 return NOTIFY_DONE;
2567}
2568
Jiri Pirko74581202015-12-03 12:12:30 +01002569static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2570 unsigned long event, void *ptr)
2571{
2572 struct netdev_notifier_changelowerstate_info *info;
2573 struct mlxsw_sp_port *mlxsw_sp_port;
2574 int err;
2575
2576 mlxsw_sp_port = netdev_priv(dev);
2577 info = ptr;
2578
2579 switch (event) {
2580 case NETDEV_CHANGELOWERSTATE:
2581 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2582 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2583 info->lower_state_info);
2584 if (err)
2585 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2586 }
2587 break;
2588 }
2589
2590 return NOTIFY_DONE;
2591}
2592
2593static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2594 unsigned long event, void *ptr)
2595{
2596 switch (event) {
2597 case NETDEV_PRECHANGEUPPER:
2598 case NETDEV_CHANGEUPPER:
2599 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2600 case NETDEV_CHANGELOWERSTATE:
2601 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2602 }
2603
2604 return NOTIFY_DONE;
2605}
2606
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002607static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2608 unsigned long event, void *ptr)
2609{
2610 struct net_device *dev;
2611 struct list_head *iter;
2612 int ret;
2613
2614 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2615 if (mlxsw_sp_port_dev_check(dev)) {
2616 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2617 if (ret == NOTIFY_BAD)
2618 return ret;
2619 }
2620 }
2621
2622 return NOTIFY_DONE;
2623}
2624
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002625static struct mlxsw_sp_vfid *
2626mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2627 const struct net_device *br_dev)
2628{
2629 struct mlxsw_sp_vfid *vfid;
2630
2631 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2632 if (vfid->br_dev == br_dev)
2633 return vfid;
2634 }
2635
2636 return NULL;
2637}
2638
2639static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2640{
2641 return vfid - MLXSW_SP_VFID_PORT_MAX;
2642}
2643
2644static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2645{
2646 return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2647}
2648
2649static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2650{
2651 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2652 MLXSW_SP_VFID_BR_MAX);
2653}
2654
2655static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2656 struct net_device *br_dev)
2657{
2658 struct device *dev = mlxsw_sp->bus_info->dev;
2659 struct mlxsw_sp_vfid *vfid;
2660 u16 n_vfid;
2661 int err;
2662
2663 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2664 if (n_vfid == MLXSW_SP_VFID_MAX) {
2665 dev_err(dev, "No available vFIDs\n");
2666 return ERR_PTR(-ERANGE);
2667 }
2668
2669 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2670 if (err) {
2671 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2672 return ERR_PTR(err);
2673 }
2674
2675 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2676 if (!vfid)
2677 goto err_allocate_vfid;
2678
2679 vfid->vfid = n_vfid;
2680 vfid->br_dev = br_dev;
2681
2682 list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2683 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2684
2685 return vfid;
2686
2687err_allocate_vfid:
2688 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2689 return ERR_PTR(-ENOMEM);
2690}
2691
2692static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2693 struct mlxsw_sp_vfid *vfid)
2694{
2695 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2696
2697 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2698 list_del(&vfid->list);
2699
2700 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2701
2702 kfree(vfid);
2703}
2704
2705static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel039c49a2016-01-27 15:20:18 +01002706 struct net_device *br_dev,
2707 bool flush_fdb)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002708{
2709 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2710 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2711 struct net_device *dev = mlxsw_sp_vport->dev;
2712 struct mlxsw_sp_vfid *vfid, *new_vfid;
2713 int err;
2714
2715 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2716 if (!vfid) {
2717 WARN_ON(!vfid);
2718 return -EINVAL;
2719 }
2720
2721 /* We need a vFID to go back to after leaving the bridge's vFID. */
2722 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2723 if (!new_vfid) {
2724 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
2725 if (IS_ERR(new_vfid)) {
2726 netdev_err(dev, "Failed to create vFID for VID=%d\n",
2727 vid);
2728 return PTR_ERR(new_vfid);
2729 }
2730 }
2731
2732 /* Invalidate existing {Port, VID} to vFID mapping and create a new
2733 * one for the new vFID.
2734 */
2735 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2736 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2737 false,
2738 mlxsw_sp_vfid_to_fid(vfid->vfid),
2739 vid);
2740 if (err) {
2741 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2742 vfid->vfid);
2743 goto err_port_vid_to_fid_invalidate;
2744 }
2745
2746 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2747 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2748 true,
2749 mlxsw_sp_vfid_to_fid(new_vfid->vfid),
2750 vid);
2751 if (err) {
2752 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
2753 new_vfid->vfid);
2754 goto err_port_vid_to_fid_validate;
2755 }
2756
2757 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
2758 if (err) {
2759 netdev_err(dev, "Failed to disable learning\n");
2760 goto err_port_vid_learning_set;
2761 }
2762
2763 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
2764 false);
2765 if (err) {
2766 netdev_err(dev, "Failed clear to clear flooding\n");
2767 goto err_vport_flood_set;
2768 }
2769
Ido Schimmel6a9863a2016-02-15 13:19:54 +01002770 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2771 MLXSW_REG_SPMS_STATE_FORWARDING);
2772 if (err) {
2773 netdev_err(dev, "Failed to set STP state\n");
2774 goto err_port_stp_state_set;
2775 }
2776
Ido Schimmel039c49a2016-01-27 15:20:18 +01002777 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2778 netdev_err(dev, "Failed to flush FDB\n");
2779
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002780 /* Switch between the vFIDs and destroy the old one if needed. */
2781 new_vfid->nr_vports++;
2782 mlxsw_sp_vport->vport.vfid = new_vfid;
2783 vfid->nr_vports--;
2784 if (!vfid->nr_vports)
2785 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
2786
2787 mlxsw_sp_vport->learning = 0;
2788 mlxsw_sp_vport->learning_sync = 0;
2789 mlxsw_sp_vport->uc_flood = 0;
2790 mlxsw_sp_vport->bridged = 0;
2791
2792 return 0;
2793
Ido Schimmel6a9863a2016-02-15 13:19:54 +01002794err_port_stp_state_set:
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002795err_vport_flood_set:
2796err_port_vid_learning_set:
2797err_port_vid_to_fid_validate:
2798err_port_vid_to_fid_invalidate:
2799 /* Rollback vFID only if new. */
2800 if (!new_vfid->nr_vports)
2801 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
2802 return err;
2803}
2804
2805static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
2806 struct net_device *br_dev)
2807{
2808 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
2809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2810 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2811 struct net_device *dev = mlxsw_sp_vport->dev;
2812 struct mlxsw_sp_vfid *vfid;
2813 int err;
2814
2815 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2816 if (!vfid) {
2817 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
2818 if (IS_ERR(vfid)) {
2819 netdev_err(dev, "Failed to create bridge vFID\n");
2820 return PTR_ERR(vfid);
2821 }
2822 }
2823
2824 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
2825 if (err) {
2826 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
2827 vfid->vfid);
2828 goto err_port_flood_set;
2829 }
2830
2831 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
2832 if (err) {
2833 netdev_err(dev, "Failed to enable learning\n");
2834 goto err_port_vid_learning_set;
2835 }
2836
2837 /* We need to invalidate existing {Port, VID} to vFID mapping and
2838 * create a new one for the bridge's vFID.
2839 */
2840 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2841 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2842 false,
2843 mlxsw_sp_vfid_to_fid(old_vfid->vfid),
2844 vid);
2845 if (err) {
2846 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2847 old_vfid->vfid);
2848 goto err_port_vid_to_fid_invalidate;
2849 }
2850
2851 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2852 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2853 true,
2854 mlxsw_sp_vfid_to_fid(vfid->vfid),
2855 vid);
2856 if (err) {
2857 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
2858 vfid->vfid);
2859 goto err_port_vid_to_fid_validate;
2860 }
2861
2862 /* Switch between the vFIDs and destroy the old one if needed. */
2863 vfid->nr_vports++;
2864 mlxsw_sp_vport->vport.vfid = vfid;
2865 old_vfid->nr_vports--;
2866 if (!old_vfid->nr_vports)
2867 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
2868
2869 mlxsw_sp_vport->learning = 1;
2870 mlxsw_sp_vport->learning_sync = 1;
2871 mlxsw_sp_vport->uc_flood = 1;
2872 mlxsw_sp_vport->bridged = 1;
2873
2874 return 0;
2875
2876err_port_vid_to_fid_validate:
2877 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2878 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
2879 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
2880err_port_vid_to_fid_invalidate:
2881 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
2882err_port_vid_learning_set:
2883 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
2884err_port_flood_set:
2885 if (!vfid->nr_vports)
2886 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
2887 return err;
2888}
2889
2890static bool
2891mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
2892 const struct net_device *br_dev)
2893{
2894 struct mlxsw_sp_port *mlxsw_sp_vport;
2895
2896 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2897 vport.list) {
2898 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
2899 return false;
2900 }
2901
2902 return true;
2903}
2904
2905static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
2906 unsigned long event, void *ptr,
2907 u16 vid)
2908{
2909 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2910 struct netdev_notifier_changeupper_info *info = ptr;
2911 struct mlxsw_sp_port *mlxsw_sp_vport;
2912 struct net_device *upper_dev;
2913 int err;
2914
2915 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2916
2917 switch (event) {
2918 case NETDEV_PRECHANGEUPPER:
2919 upper_dev = info->upper_dev;
2920 if (!info->master || !info->linking)
2921 break;
2922 if (!netif_is_bridge_master(upper_dev))
2923 return NOTIFY_BAD;
2924 /* We can't have multiple VLAN interfaces configured on
2925 * the same port and being members in the same bridge.
2926 */
2927 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
2928 upper_dev))
2929 return NOTIFY_BAD;
2930 break;
2931 case NETDEV_CHANGEUPPER:
2932 upper_dev = info->upper_dev;
2933 if (!info->master)
2934 break;
2935 if (info->linking) {
2936 if (!mlxsw_sp_vport) {
2937 WARN_ON(!mlxsw_sp_vport);
2938 return NOTIFY_BAD;
2939 }
2940 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
2941 upper_dev);
2942 if (err) {
2943 netdev_err(dev, "Failed to join bridge\n");
2944 return NOTIFY_BAD;
2945 }
2946 } else {
2947 /* We ignore bridge's unlinking notifications if vPort
2948 * is gone, since we already left the bridge when the
2949 * VLAN device was unlinked from the real device.
2950 */
2951 if (!mlxsw_sp_vport)
2952 return NOTIFY_DONE;
2953 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
Ido Schimmel039c49a2016-01-27 15:20:18 +01002954 upper_dev, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002955 if (err) {
2956 netdev_err(dev, "Failed to leave bridge\n");
2957 return NOTIFY_BAD;
2958 }
2959 }
2960 }
2961
2962 return NOTIFY_DONE;
2963}
2964
Ido Schimmel272c4472015-12-15 16:03:47 +01002965static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
2966 unsigned long event, void *ptr,
2967 u16 vid)
2968{
2969 struct net_device *dev;
2970 struct list_head *iter;
2971 int ret;
2972
2973 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2974 if (mlxsw_sp_port_dev_check(dev)) {
2975 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
2976 vid);
2977 if (ret == NOTIFY_BAD)
2978 return ret;
2979 }
2980 }
2981
2982 return NOTIFY_DONE;
2983}
2984
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002985static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
2986 unsigned long event, void *ptr)
2987{
2988 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
2989 u16 vid = vlan_dev_vlan_id(vlan_dev);
2990
Ido Schimmel272c4472015-12-15 16:03:47 +01002991 if (mlxsw_sp_port_dev_check(real_dev))
2992 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
2993 vid);
2994 else if (netif_is_lag_master(real_dev))
2995 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
2996 vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002997
Ido Schimmel272c4472015-12-15 16:03:47 +01002998 return NOTIFY_DONE;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002999}
3000
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003001static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3002 unsigned long event, void *ptr)
3003{
3004 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3005
3006 if (mlxsw_sp_port_dev_check(dev))
3007 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3008
3009 if (netif_is_lag_master(dev))
3010 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3011
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003012 if (is_vlan_dev(dev))
3013 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3014
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003015 return NOTIFY_DONE;
3016}
3017
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003018static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3019 .notifier_call = mlxsw_sp_netdevice_event,
3020};
3021
3022static int __init mlxsw_sp_module_init(void)
3023{
3024 int err;
3025
3026 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3027 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3028 if (err)
3029 goto err_core_driver_register;
3030 return 0;
3031
3032err_core_driver_register:
3033 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3034 return err;
3035}
3036
3037static void __exit mlxsw_sp_module_exit(void)
3038{
3039 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3040 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3041}
3042
3043module_init(mlxsw_sp_module_init);
3044module_exit(mlxsw_sp_module_exit);
3045
3046MODULE_LICENSE("Dual BSD/GPL");
3047MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3048MODULE_DESCRIPTION("Mellanox Spectrum driver");
3049MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);