blob: cb5f36e497e916359a5af8a982dbc6b3f448e166 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/ethtool.h>
43#include <linux/slab.h>
44#include <linux/device.h>
45#include <linux/skbuff.h>
46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
48#include <linux/workqueue.h>
49#include <linux/jiffies.h>
50#include <linux/bitops.h>
Ido Schimmel7f71eb42015-12-15 16:03:37 +010051#include <linux/list.h>
Jiri Pirkoc4745502016-02-26 17:32:26 +010052#include <net/devlink.h>
Jiri Pirko56ade8f2015-10-16 14:01:37 +020053#include <net/switchdev.h>
54#include <generated/utsrelease.h>
55
56#include "spectrum.h"
57#include "core.h"
58#include "reg.h"
59#include "port.h"
60#include "trap.h"
61#include "txheader.h"
62
63static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
64static const char mlxsw_sp_driver_version[] = "1.0";
65
66/* tx_hdr_version
67 * Tx header version.
68 * Must be set to 1.
69 */
70MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
71
72/* tx_hdr_ctl
73 * Packet control type.
74 * 0 - Ethernet control (e.g. EMADs, LACP)
75 * 1 - Ethernet data
76 */
77MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
78
79/* tx_hdr_proto
80 * Packet protocol type. Must be set to 1 (Ethernet).
81 */
82MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
83
84/* tx_hdr_rx_is_router
85 * Packet is sent from the router. Valid for data packets only.
86 */
87MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
88
89/* tx_hdr_fid_valid
90 * Indicates if the 'fid' field is valid and should be used for
91 * forwarding lookup. Valid for data packets only.
92 */
93MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
94
95/* tx_hdr_swid
96 * Switch partition ID. Must be set to 0.
97 */
98MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
99
100/* tx_hdr_control_tclass
101 * Indicates if the packet should use the control TClass and not one
102 * of the data TClasses.
103 */
104MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
105
106/* tx_hdr_etclass
107 * Egress TClass to be used on the egress device on the egress port.
108 */
109MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
110
111/* tx_hdr_port_mid
112 * Destination local port for unicast packets.
113 * Destination multicast ID for multicast packets.
114 *
115 * Control packets are directed to a specific egress port, while data
116 * packets are transmitted through the CPU port (0) into the switch partition,
117 * where forwarding rules are applied.
118 */
119MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
120
121/* tx_hdr_fid
122 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
123 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
124 * Valid for data packets only.
125 */
126MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
127
128/* tx_hdr_type
129 * 0 - Data packets
130 * 6 - Control packets
131 */
132MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
133
134static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
135 const struct mlxsw_tx_info *tx_info)
136{
137 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
138
139 memset(txhdr, 0, MLXSW_TXHDR_LEN);
140
141 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
142 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
143 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
144 mlxsw_tx_hdr_swid_set(txhdr, 0);
145 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
146 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
147 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
148}
149
150static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
151{
152 char spad_pl[MLXSW_REG_SPAD_LEN];
153 int err;
154
155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
156 if (err)
157 return err;
158 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
159 return 0;
160}
161
162static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
163 bool is_up)
164{
165 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
166 char paos_pl[MLXSW_REG_PAOS_LEN];
167
168 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
169 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
170 MLXSW_PORT_ADMIN_STATUS_DOWN);
171 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
172}
173
174static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
175 bool *p_is_up)
176{
177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
178 char paos_pl[MLXSW_REG_PAOS_LEN];
179 u8 oper_status;
180 int err;
181
182 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
183 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
184 if (err)
185 return err;
186 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
187 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
188 return 0;
189}
190
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200191static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
192 unsigned char *addr)
193{
194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
195 char ppad_pl[MLXSW_REG_PPAD_LEN];
196
197 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
198 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
199 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
200}
201
202static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
203{
204 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
205 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
206
207 ether_addr_copy(addr, mlxsw_sp->base_mac);
208 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
209 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
210}
211
212static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
213 u16 vid, enum mlxsw_reg_spms_state state)
214{
215 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
216 char *spms_pl;
217 int err;
218
219 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
220 if (!spms_pl)
221 return -ENOMEM;
222 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
223 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
224 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
225 kfree(spms_pl);
226 return err;
227}
228
229static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
230{
231 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
232 char pmtu_pl[MLXSW_REG_PMTU_LEN];
233 int max_mtu;
234 int err;
235
236 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
237 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
238 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
239 if (err)
240 return err;
241 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
242
243 if (mtu > max_mtu)
244 return -EINVAL;
245
246 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
248}
249
250static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
251{
252 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
253 char pspa_pl[MLXSW_REG_PSPA_LEN];
254
255 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
256 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
257}
258
259static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
260 bool enable)
261{
262 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
263 char svpe_pl[MLXSW_REG_SVPE_LEN];
264
265 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
266 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
267}
268
269int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
270 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
271 u16 vid)
272{
273 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
274 char svfa_pl[MLXSW_REG_SVFA_LEN];
275
276 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
277 fid, vid);
278 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
279}
280
281static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
282 u16 vid, bool learn_enable)
283{
284 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
285 char *spvmlr_pl;
286 int err;
287
288 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
289 if (!spvmlr_pl)
290 return -ENOMEM;
291 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
292 learn_enable);
293 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
294 kfree(spvmlr_pl);
295 return err;
296}
297
298static int
299mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
300{
301 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
302 char sspr_pl[MLXSW_REG_SSPR_LEN];
303
304 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
306}
307
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200308static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
309 u8 local_port, u8 *p_module,
310 u8 *p_width, u8 *p_lane)
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200311{
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200312 char pmlp_pl[MLXSW_REG_PMLP_LEN];
313 int err;
314
Ido Schimmel558c2d52016-02-26 17:32:29 +0100315 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200316 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
317 if (err)
318 return err;
Ido Schimmel558c2d52016-02-26 17:32:29 +0100319 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
320 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200321 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200322 return 0;
323}
324
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200325static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
326 u8 local_port, u8 *p_module,
327 u8 *p_width)
328{
329 u8 lane;
330
331 return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module,
332 p_width, &lane);
333}
334
Ido Schimmel18f1e702016-02-26 17:32:31 +0100335static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
336 u8 module, u8 width, u8 lane)
337{
338 char pmlp_pl[MLXSW_REG_PMLP_LEN];
339 int i;
340
341 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
342 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
343 for (i = 0; i < width; i++) {
344 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
345 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
346 }
347
348 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
349}
350
Ido Schimmel3e9b27b2016-02-26 17:32:28 +0100351static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port)
352{
353 char pmlp_pl[MLXSW_REG_PMLP_LEN];
354
355 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
356 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
357 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
358}
359
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200360static int mlxsw_sp_port_open(struct net_device *dev)
361{
362 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363 int err;
364
365 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
366 if (err)
367 return err;
368 netif_start_queue(dev);
369 return 0;
370}
371
372static int mlxsw_sp_port_stop(struct net_device *dev)
373{
374 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
375
376 netif_stop_queue(dev);
377 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
378}
379
380static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
381 struct net_device *dev)
382{
383 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
384 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
385 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
386 const struct mlxsw_tx_info tx_info = {
387 .local_port = mlxsw_sp_port->local_port,
388 .is_emad = false,
389 };
390 u64 len;
391 int err;
392
393 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
394 return NETDEV_TX_BUSY;
395
396 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
397 struct sk_buff *skb_orig = skb;
398
399 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
400 if (!skb) {
401 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
402 dev_kfree_skb_any(skb_orig);
403 return NETDEV_TX_OK;
404 }
405 }
406
407 if (eth_skb_pad(skb)) {
408 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
409 return NETDEV_TX_OK;
410 }
411
412 mlxsw_sp_txhdr_construct(skb, &tx_info);
413 len = skb->len;
414 /* Due to a race we might fail here because of a full queue. In that
415 * unlikely case we simply drop the packet.
416 */
417 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
418
419 if (!err) {
420 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
421 u64_stats_update_begin(&pcpu_stats->syncp);
422 pcpu_stats->tx_packets++;
423 pcpu_stats->tx_bytes += len;
424 u64_stats_update_end(&pcpu_stats->syncp);
425 } else {
426 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
427 dev_kfree_skb_any(skb);
428 }
429 return NETDEV_TX_OK;
430}
431
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100432static void mlxsw_sp_set_rx_mode(struct net_device *dev)
433{
434}
435
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200436static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
437{
438 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
439 struct sockaddr *addr = p;
440 int err;
441
442 if (!is_valid_ether_addr(addr->sa_data))
443 return -EADDRNOTAVAIL;
444
445 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
446 if (err)
447 return err;
448 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
449 return 0;
450}
451
452static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
453{
454 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
455 int err;
456
457 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
458 if (err)
459 return err;
460 dev->mtu = mtu;
461 return 0;
462}
463
464static struct rtnl_link_stats64 *
465mlxsw_sp_port_get_stats64(struct net_device *dev,
466 struct rtnl_link_stats64 *stats)
467{
468 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
469 struct mlxsw_sp_port_pcpu_stats *p;
470 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
471 u32 tx_dropped = 0;
472 unsigned int start;
473 int i;
474
475 for_each_possible_cpu(i) {
476 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
477 do {
478 start = u64_stats_fetch_begin_irq(&p->syncp);
479 rx_packets = p->rx_packets;
480 rx_bytes = p->rx_bytes;
481 tx_packets = p->tx_packets;
482 tx_bytes = p->tx_bytes;
483 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
484
485 stats->rx_packets += rx_packets;
486 stats->rx_bytes += rx_bytes;
487 stats->tx_packets += tx_packets;
488 stats->tx_bytes += tx_bytes;
489 /* tx_dropped is u32, updated without syncp protection. */
490 tx_dropped += p->tx_dropped;
491 }
492 stats->tx_dropped = tx_dropped;
493 return stats;
494}
495
496int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
497 u16 vid_end, bool is_member, bool untagged)
498{
499 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
500 char *spvm_pl;
501 int err;
502
503 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
504 if (!spvm_pl)
505 return -ENOMEM;
506
507 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
508 vid_end, is_member, untagged);
509 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
510 kfree(spvm_pl);
511 return err;
512}
513
514static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
515{
516 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
517 u16 vid, last_visited_vid;
518 int err;
519
520 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
521 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
522 vid);
523 if (err) {
524 last_visited_vid = vid;
525 goto err_port_vid_to_fid_set;
526 }
527 }
528
529 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
530 if (err) {
531 last_visited_vid = VLAN_N_VID;
532 goto err_port_vid_to_fid_set;
533 }
534
535 return 0;
536
537err_port_vid_to_fid_set:
538 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
539 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
540 vid);
541 return err;
542}
543
544static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
545{
546 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
547 u16 vid;
548 int err;
549
550 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
551 if (err)
552 return err;
553
554 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
555 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
556 vid, vid);
557 if (err)
558 return err;
559 }
560
561 return 0;
562}
563
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100564static struct mlxsw_sp_vfid *
565mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
566{
567 struct mlxsw_sp_vfid *vfid;
568
569 list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
570 if (vfid->vid == vid)
571 return vfid;
572 }
573
574 return NULL;
575}
576
577static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
578{
579 return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
580 MLXSW_SP_VFID_PORT_MAX);
581}
582
583static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
584{
585 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
586 char sfmr_pl[MLXSW_REG_SFMR_LEN];
587
588 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
589 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
590}
591
592static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
593{
594 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
595 char sfmr_pl[MLXSW_REG_SFMR_LEN];
596
597 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
598 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
599}
600
601static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
602 u16 vid)
603{
604 struct device *dev = mlxsw_sp->bus_info->dev;
605 struct mlxsw_sp_vfid *vfid;
606 u16 n_vfid;
607 int err;
608
609 n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
610 if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
611 dev_err(dev, "No available vFIDs\n");
612 return ERR_PTR(-ERANGE);
613 }
614
615 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
616 if (err) {
617 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
618 return ERR_PTR(err);
619 }
620
621 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
622 if (!vfid)
623 goto err_allocate_vfid;
624
625 vfid->vfid = n_vfid;
626 vfid->vid = vid;
627
628 list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
629 set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
630
631 return vfid;
632
633err_allocate_vfid:
634 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
635 return ERR_PTR(-ENOMEM);
636}
637
638static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
639 struct mlxsw_sp_vfid *vfid)
640{
641 clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
642 list_del(&vfid->list);
643
644 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
645
646 kfree(vfid);
647}
648
649static struct mlxsw_sp_port *
650mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
651 struct mlxsw_sp_vfid *vfid)
652{
653 struct mlxsw_sp_port *mlxsw_sp_vport;
654
655 mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
656 if (!mlxsw_sp_vport)
657 return NULL;
658
659 /* dev will be set correctly after the VLAN device is linked
660 * with the real device. In case of bridge SELF invocation, dev
661 * will remain as is.
662 */
663 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
664 mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
665 mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
666 mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
Ido Schimmel272c4472015-12-15 16:03:47 +0100667 mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
668 mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100669 mlxsw_sp_vport->vport.vfid = vfid;
670 mlxsw_sp_vport->vport.vid = vfid->vid;
671
672 list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
673
674 return mlxsw_sp_vport;
675}
676
677static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
678{
679 list_del(&mlxsw_sp_vport->vport.list);
680 kfree(mlxsw_sp_vport);
681}
682
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200683int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
684 u16 vid)
685{
686 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
687 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100688 struct mlxsw_sp_port *mlxsw_sp_vport;
689 struct mlxsw_sp_vfid *vfid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200690 int err;
691
692 /* VLAN 0 is added to HW filter when device goes up, but it is
693 * reserved in our case, so simply return.
694 */
695 if (!vid)
696 return 0;
697
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100698 if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200699 netdev_warn(dev, "VID=%d already configured\n", vid);
700 return 0;
701 }
702
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100703 vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
704 if (!vfid) {
705 vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
706 if (IS_ERR(vfid)) {
707 netdev_err(dev, "Failed to create vFID for VID=%d\n",
708 vid);
709 return PTR_ERR(vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200710 }
711 }
712
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100713 mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
714 if (!mlxsw_sp_vport) {
715 netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
716 err = -ENOMEM;
717 goto err_port_vport_create;
718 }
719
720 if (!vfid->nr_vports) {
721 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
Ido Schimmel19ae6122015-12-15 16:03:39 +0100722 true, false);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100723 if (err) {
724 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
725 vfid->vfid);
726 goto err_vport_flood_set;
727 }
728 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200729
730 /* When adding the first VLAN interface on a bridged port we need to
731 * transition all the active 802.1Q bridge VLANs to use explicit
732 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
733 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100734 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200735 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
736 if (err) {
737 netdev_err(dev, "Failed to set to Virtual mode\n");
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100738 goto err_port_vp_mode_trans;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200739 }
740 }
741
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100742 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200743 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100744 true,
745 mlxsw_sp_vfid_to_fid(vfid->vfid),
746 vid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200747 if (err) {
748 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100749 vid, vfid->vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200750 goto err_port_vid_to_fid_set;
751 }
752
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100753 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200754 if (err) {
755 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
756 goto err_port_vid_learning_set;
757 }
758
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100759 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200760 if (err) {
761 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
762 vid);
763 goto err_port_add_vid;
764 }
765
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100766 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200767 MLXSW_REG_SPMS_STATE_FORWARDING);
768 if (err) {
769 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
770 goto err_port_stp_state_set;
771 }
772
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100773 vfid->nr_vports++;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200774
775 return 0;
776
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200777err_port_stp_state_set:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100778 mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200779err_port_add_vid:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100780 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200781err_port_vid_learning_set:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100782 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200783 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100784 mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200785err_port_vid_to_fid_set:
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100786 if (list_is_singular(&mlxsw_sp_port->vports_list))
787 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
788err_port_vp_mode_trans:
789 if (!vfid->nr_vports)
Ido Schimmel19ae6122015-12-15 16:03:39 +0100790 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
791 false);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100792err_vport_flood_set:
793 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
794err_port_vport_create:
795 if (!vfid->nr_vports)
796 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200797 return err;
798}
799
800int mlxsw_sp_port_kill_vid(struct net_device *dev,
801 __be16 __always_unused proto, u16 vid)
802{
803 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100804 struct mlxsw_sp_port *mlxsw_sp_vport;
805 struct mlxsw_sp_vfid *vfid;
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200806 int err;
807
808 /* VLAN 0 is removed from HW filter when device goes down, but
809 * it is reserved in our case, so simply return.
810 */
811 if (!vid)
812 return 0;
813
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100814 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
815 if (!mlxsw_sp_vport) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200816 netdev_warn(dev, "VID=%d does not exist\n", vid);
817 return 0;
818 }
819
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100820 vfid = mlxsw_sp_vport->vport.vfid;
821
822 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200823 MLXSW_REG_SPMS_STATE_DISCARDING);
824 if (err) {
825 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
826 return err;
827 }
828
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100829 err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200830 if (err) {
831 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
832 vid);
833 return err;
834 }
835
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100836 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200837 if (err) {
838 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
839 return err;
840 }
841
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100842 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200843 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100844 false,
845 mlxsw_sp_vfid_to_fid(vfid->vfid),
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200846 vid);
847 if (err) {
848 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100849 vid, vfid->vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200850 return err;
851 }
852
853 /* When removing the last VLAN interface on a bridged port we need to
854 * transition all active 802.1Q bridge VLANs to use VID to FID
855 * mappings and set port's mode to VLAN mode.
856 */
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100857 if (list_is_singular(&mlxsw_sp_port->vports_list)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200858 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
859 if (err) {
860 netdev_err(dev, "Failed to set to VLAN mode\n");
861 return err;
862 }
863 }
864
Ido Schimmel7f71eb42015-12-15 16:03:37 +0100865 vfid->nr_vports--;
866 mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
867
868 /* Destroy the vFID if no vPorts are assigned to it anymore. */
869 if (!vfid->nr_vports)
870 mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200871
872 return 0;
873}
874
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200875static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
876 size_t len)
877{
878 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
879 u8 module, width, lane;
880 int err;
881
882 err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp,
883 mlxsw_sp_port->local_port,
884 &module, &width, &lane);
885 if (err) {
886 netdev_err(dev, "Failed to retrieve module information\n");
887 return err;
888 }
889
890 if (!mlxsw_sp_port->split)
891 err = snprintf(name, len, "p%d", module + 1);
892 else
893 err = snprintf(name, len, "p%ds%d", module + 1,
894 lane / width);
895
896 if (err >= len)
897 return -EINVAL;
898
899 return 0;
900}
901
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200902static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
903 .ndo_open = mlxsw_sp_port_open,
904 .ndo_stop = mlxsw_sp_port_stop,
905 .ndo_start_xmit = mlxsw_sp_port_xmit,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100906 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200907 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
908 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
909 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
910 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
911 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
912 .ndo_fdb_add = switchdev_port_fdb_add,
913 .ndo_fdb_del = switchdev_port_fdb_del,
914 .ndo_fdb_dump = switchdev_port_fdb_dump,
915 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
916 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
917 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
Ido Schimmel2bf9a582016-04-05 10:20:04 +0200918 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200919};
920
921static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
922 struct ethtool_drvinfo *drvinfo)
923{
924 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
925 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
926
927 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
928 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
929 sizeof(drvinfo->version));
930 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
931 "%d.%d.%d",
932 mlxsw_sp->bus_info->fw_rev.major,
933 mlxsw_sp->bus_info->fw_rev.minor,
934 mlxsw_sp->bus_info->fw_rev.subminor);
935 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
936 sizeof(drvinfo->bus_info));
937}
938
939struct mlxsw_sp_port_hw_stats {
940 char str[ETH_GSTRING_LEN];
941 u64 (*getter)(char *payload);
942};
943
944static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
945 {
946 .str = "a_frames_transmitted_ok",
947 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
948 },
949 {
950 .str = "a_frames_received_ok",
951 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
952 },
953 {
954 .str = "a_frame_check_sequence_errors",
955 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
956 },
957 {
958 .str = "a_alignment_errors",
959 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
960 },
961 {
962 .str = "a_octets_transmitted_ok",
963 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
964 },
965 {
966 .str = "a_octets_received_ok",
967 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
968 },
969 {
970 .str = "a_multicast_frames_xmitted_ok",
971 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
972 },
973 {
974 .str = "a_broadcast_frames_xmitted_ok",
975 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
976 },
977 {
978 .str = "a_multicast_frames_received_ok",
979 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
980 },
981 {
982 .str = "a_broadcast_frames_received_ok",
983 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
984 },
985 {
986 .str = "a_in_range_length_errors",
987 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
988 },
989 {
990 .str = "a_out_of_range_length_field",
991 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
992 },
993 {
994 .str = "a_frame_too_long_errors",
995 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
996 },
997 {
998 .str = "a_symbol_error_during_carrier",
999 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1000 },
1001 {
1002 .str = "a_mac_control_frames_transmitted",
1003 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1004 },
1005 {
1006 .str = "a_mac_control_frames_received",
1007 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1008 },
1009 {
1010 .str = "a_unsupported_opcodes_received",
1011 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1012 },
1013 {
1014 .str = "a_pause_mac_ctrl_frames_received",
1015 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1016 },
1017 {
1018 .str = "a_pause_mac_ctrl_frames_xmitted",
1019 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1020 },
1021};
1022
1023#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1024
1025static void mlxsw_sp_port_get_strings(struct net_device *dev,
1026 u32 stringset, u8 *data)
1027{
1028 u8 *p = data;
1029 int i;
1030
1031 switch (stringset) {
1032 case ETH_SS_STATS:
1033 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
1034 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
1035 ETH_GSTRING_LEN);
1036 p += ETH_GSTRING_LEN;
1037 }
1038 break;
1039 }
1040}
1041
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001042static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
1043 enum ethtool_phys_id_state state)
1044{
1045 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1046 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1047 char mlcr_pl[MLXSW_REG_MLCR_LEN];
1048 bool active;
1049
1050 switch (state) {
1051 case ETHTOOL_ID_ACTIVE:
1052 active = true;
1053 break;
1054 case ETHTOOL_ID_INACTIVE:
1055 active = false;
1056 break;
1057 default:
1058 return -EOPNOTSUPP;
1059 }
1060
1061 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
1062 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
1063}
1064
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001065static void mlxsw_sp_port_get_stats(struct net_device *dev,
1066 struct ethtool_stats *stats, u64 *data)
1067{
1068 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1069 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1070 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1071 int i;
1072 int err;
1073
1074 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
1075 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1076 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
1077 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
1078}
1079
1080static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
1081{
1082 switch (sset) {
1083 case ETH_SS_STATS:
1084 return MLXSW_SP_PORT_HW_STATS_LEN;
1085 default:
1086 return -EOPNOTSUPP;
1087 }
1088}
1089
1090struct mlxsw_sp_port_link_mode {
1091 u32 mask;
1092 u32 supported;
1093 u32 advertised;
1094 u32 speed;
1095};
1096
1097static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
1098 {
1099 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
1100 .supported = SUPPORTED_100baseT_Full,
1101 .advertised = ADVERTISED_100baseT_Full,
1102 .speed = 100,
1103 },
1104 {
1105 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
1106 .speed = 100,
1107 },
1108 {
1109 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
1110 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
1111 .supported = SUPPORTED_1000baseKX_Full,
1112 .advertised = ADVERTISED_1000baseKX_Full,
1113 .speed = 1000,
1114 },
1115 {
1116 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
1117 .supported = SUPPORTED_10000baseT_Full,
1118 .advertised = ADVERTISED_10000baseT_Full,
1119 .speed = 10000,
1120 },
1121 {
1122 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
1123 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
1124 .supported = SUPPORTED_10000baseKX4_Full,
1125 .advertised = ADVERTISED_10000baseKX4_Full,
1126 .speed = 10000,
1127 },
1128 {
1129 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1130 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1131 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1132 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
1133 .supported = SUPPORTED_10000baseKR_Full,
1134 .advertised = ADVERTISED_10000baseKR_Full,
1135 .speed = 10000,
1136 },
1137 {
1138 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
1139 .supported = SUPPORTED_20000baseKR2_Full,
1140 .advertised = ADVERTISED_20000baseKR2_Full,
1141 .speed = 20000,
1142 },
1143 {
1144 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
1145 .supported = SUPPORTED_40000baseCR4_Full,
1146 .advertised = ADVERTISED_40000baseCR4_Full,
1147 .speed = 40000,
1148 },
1149 {
1150 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
1151 .supported = SUPPORTED_40000baseKR4_Full,
1152 .advertised = ADVERTISED_40000baseKR4_Full,
1153 .speed = 40000,
1154 },
1155 {
1156 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
1157 .supported = SUPPORTED_40000baseSR4_Full,
1158 .advertised = ADVERTISED_40000baseSR4_Full,
1159 .speed = 40000,
1160 },
1161 {
1162 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
1163 .supported = SUPPORTED_40000baseLR4_Full,
1164 .advertised = ADVERTISED_40000baseLR4_Full,
1165 .speed = 40000,
1166 },
1167 {
1168 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
1169 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
1170 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
1171 .speed = 25000,
1172 },
1173 {
1174 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1175 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1176 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1177 .speed = 50000,
1178 },
1179 {
1180 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1181 .supported = SUPPORTED_56000baseKR4_Full,
1182 .advertised = ADVERTISED_56000baseKR4_Full,
1183 .speed = 56000,
1184 },
1185 {
1186 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1187 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1188 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1189 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1190 .speed = 100000,
1191 },
1192};
1193
1194#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1195
1196static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1197{
1198 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1199 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1200 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1201 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1202 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1203 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1204 return SUPPORTED_FIBRE;
1205
1206 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1207 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1208 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1209 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1210 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1211 return SUPPORTED_Backplane;
1212 return 0;
1213}
1214
1215static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1216{
1217 u32 modes = 0;
1218 int i;
1219
1220 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1221 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1222 modes |= mlxsw_sp_port_link_mode[i].supported;
1223 }
1224 return modes;
1225}
1226
1227static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1228{
1229 u32 modes = 0;
1230 int i;
1231
1232 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1233 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1234 modes |= mlxsw_sp_port_link_mode[i].advertised;
1235 }
1236 return modes;
1237}
1238
1239static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1240 struct ethtool_cmd *cmd)
1241{
1242 u32 speed = SPEED_UNKNOWN;
1243 u8 duplex = DUPLEX_UNKNOWN;
1244 int i;
1245
1246 if (!carrier_ok)
1247 goto out;
1248
1249 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1250 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1251 speed = mlxsw_sp_port_link_mode[i].speed;
1252 duplex = DUPLEX_FULL;
1253 break;
1254 }
1255 }
1256out:
1257 ethtool_cmd_speed_set(cmd, speed);
1258 cmd->duplex = duplex;
1259}
1260
1261static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1262{
1263 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1264 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1265 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1266 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1267 return PORT_FIBRE;
1268
1269 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1270 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1271 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1272 return PORT_DA;
1273
1274 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1275 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1276 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1277 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1278 return PORT_NONE;
1279
1280 return PORT_OTHER;
1281}
1282
1283static int mlxsw_sp_port_get_settings(struct net_device *dev,
1284 struct ethtool_cmd *cmd)
1285{
1286 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1288 char ptys_pl[MLXSW_REG_PTYS_LEN];
1289 u32 eth_proto_cap;
1290 u32 eth_proto_admin;
1291 u32 eth_proto_oper;
1292 int err;
1293
1294 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1295 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1296 if (err) {
1297 netdev_err(dev, "Failed to get proto");
1298 return err;
1299 }
1300 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1301 &eth_proto_admin, &eth_proto_oper);
1302
1303 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1304 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1305 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1306 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1307 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1308 eth_proto_oper, cmd);
1309
1310 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1311 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1312 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1313
1314 cmd->transceiver = XCVR_INTERNAL;
1315 return 0;
1316}
1317
1318static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1319{
1320 u32 ptys_proto = 0;
1321 int i;
1322
1323 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1324 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1325 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1326 }
1327 return ptys_proto;
1328}
1329
1330static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1331{
1332 u32 ptys_proto = 0;
1333 int i;
1334
1335 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1336 if (speed == mlxsw_sp_port_link_mode[i].speed)
1337 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1338 }
1339 return ptys_proto;
1340}
1341
Ido Schimmel18f1e702016-02-26 17:32:31 +01001342static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
1343{
1344 u32 ptys_proto = 0;
1345 int i;
1346
1347 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1348 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
1349 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1350 }
1351 return ptys_proto;
1352}
1353
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001354static int mlxsw_sp_port_set_settings(struct net_device *dev,
1355 struct ethtool_cmd *cmd)
1356{
1357 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1358 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1359 char ptys_pl[MLXSW_REG_PTYS_LEN];
1360 u32 speed;
1361 u32 eth_proto_new;
1362 u32 eth_proto_cap;
1363 u32 eth_proto_admin;
1364 bool is_up;
1365 int err;
1366
1367 speed = ethtool_cmd_speed(cmd);
1368
1369 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1370 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1371 mlxsw_sp_to_ptys_speed(speed);
1372
1373 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1374 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1375 if (err) {
1376 netdev_err(dev, "Failed to get proto");
1377 return err;
1378 }
1379 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1380
1381 eth_proto_new = eth_proto_new & eth_proto_cap;
1382 if (!eth_proto_new) {
1383 netdev_err(dev, "Not supported proto admin requested");
1384 return -EINVAL;
1385 }
1386 if (eth_proto_new == eth_proto_admin)
1387 return 0;
1388
1389 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1390 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1391 if (err) {
1392 netdev_err(dev, "Failed to set proto admin");
1393 return err;
1394 }
1395
1396 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1397 if (err) {
1398 netdev_err(dev, "Failed to get oper status");
1399 return err;
1400 }
1401 if (!is_up)
1402 return 0;
1403
1404 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1405 if (err) {
1406 netdev_err(dev, "Failed to set admin status");
1407 return err;
1408 }
1409
1410 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1411 if (err) {
1412 netdev_err(dev, "Failed to set admin status");
1413 return err;
1414 }
1415
1416 return 0;
1417}
1418
1419static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1420 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1421 .get_link = ethtool_op_get_link,
1422 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001423 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001424 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1425 .get_sset_count = mlxsw_sp_port_get_sset_count,
1426 .get_settings = mlxsw_sp_port_get_settings,
1427 .set_settings = mlxsw_sp_port_set_settings,
1428};
1429
Ido Schimmel18f1e702016-02-26 17:32:31 +01001430static int
1431mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
1432{
1433 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1434 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
1435 char ptys_pl[MLXSW_REG_PTYS_LEN];
1436 u32 eth_proto_admin;
1437
1438 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
1439 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port,
1440 eth_proto_admin);
1441 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1442}
1443
1444static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1445 bool split, u8 module, u8 width)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001446{
Jiri Pirkoc4745502016-02-26 17:32:26 +01001447 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001448 struct mlxsw_sp_port *mlxsw_sp_port;
Jiri Pirkoc4745502016-02-26 17:32:26 +01001449 struct devlink_port *devlink_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001450 struct net_device *dev;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001451 size_t bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001452 int err;
1453
1454 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1455 if (!dev)
1456 return -ENOMEM;
1457 mlxsw_sp_port = netdev_priv(dev);
1458 mlxsw_sp_port->dev = dev;
1459 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1460 mlxsw_sp_port->local_port = local_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01001461 mlxsw_sp_port->split = split;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001462 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1463 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1464 if (!mlxsw_sp_port->active_vlans) {
1465 err = -ENOMEM;
1466 goto err_port_active_vlans_alloc;
1467 }
Elad Razfc1273a2016-01-06 13:01:11 +01001468 mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL);
1469 if (!mlxsw_sp_port->untagged_vlans) {
1470 err = -ENOMEM;
1471 goto err_port_untagged_vlans_alloc;
1472 }
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001473 INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001474
1475 mlxsw_sp_port->pcpu_stats =
1476 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1477 if (!mlxsw_sp_port->pcpu_stats) {
1478 err = -ENOMEM;
1479 goto err_alloc_stats;
1480 }
1481
1482 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1483 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1484
1485 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1486 if (err) {
1487 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1488 mlxsw_sp_port->local_port);
1489 goto err_dev_addr_init;
1490 }
1491
1492 netif_carrier_off(dev);
1493
1494 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1495 NETIF_F_HW_VLAN_CTAG_FILTER;
1496
1497 /* Each packet needs to have a Tx header (metadata) on top all other
1498 * headers.
1499 */
1500 dev->hard_header_len += MLXSW_TXHDR_LEN;
1501
Jiri Pirkoc4745502016-02-26 17:32:26 +01001502 devlink_port = &mlxsw_sp_port->devlink_port;
Ido Schimmel18f1e702016-02-26 17:32:31 +01001503 if (mlxsw_sp_port->split)
1504 devlink_port_split_set(devlink_port, module);
Jiri Pirkoc4745502016-02-26 17:32:26 +01001505 err = devlink_port_register(devlink, devlink_port, local_port);
1506 if (err) {
1507 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
1508 mlxsw_sp_port->local_port);
1509 goto err_devlink_port_register;
1510 }
1511
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001512 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1513 if (err) {
1514 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1515 mlxsw_sp_port->local_port);
1516 goto err_port_system_port_mapping_set;
1517 }
1518
1519 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1520 if (err) {
1521 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1522 mlxsw_sp_port->local_port);
1523 goto err_port_swid_set;
1524 }
1525
Ido Schimmel18f1e702016-02-26 17:32:31 +01001526 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
1527 if (err) {
1528 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1529 mlxsw_sp_port->local_port);
1530 goto err_port_speed_by_width_set;
1531 }
1532
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001533 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1534 if (err) {
1535 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1536 mlxsw_sp_port->local_port);
1537 goto err_port_mtu_set;
1538 }
1539
1540 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1541 if (err)
1542 goto err_port_admin_status_set;
1543
1544 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1545 if (err) {
1546 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1547 mlxsw_sp_port->local_port);
1548 goto err_port_buffers_init;
1549 }
1550
1551 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1552 err = register_netdev(dev);
1553 if (err) {
1554 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1555 mlxsw_sp_port->local_port);
1556 goto err_register_netdev;
1557 }
1558
Jiri Pirkoc4745502016-02-26 17:32:26 +01001559 devlink_port_type_eth_set(devlink_port, dev);
1560
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001561 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1562 if (err)
1563 goto err_port_vlan_init;
1564
1565 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1566 return 0;
1567
1568err_port_vlan_init:
1569 unregister_netdev(dev);
1570err_register_netdev:
1571err_port_buffers_init:
1572err_port_admin_status_set:
1573err_port_mtu_set:
Ido Schimmel18f1e702016-02-26 17:32:31 +01001574err_port_speed_by_width_set:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001575err_port_swid_set:
1576err_port_system_port_mapping_set:
Jiri Pirkoc4745502016-02-26 17:32:26 +01001577 devlink_port_unregister(&mlxsw_sp_port->devlink_port);
1578err_devlink_port_register:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001579err_dev_addr_init:
1580 free_percpu(mlxsw_sp_port->pcpu_stats);
1581err_alloc_stats:
Elad Razfc1273a2016-01-06 13:01:11 +01001582 kfree(mlxsw_sp_port->untagged_vlans);
1583err_port_untagged_vlans_alloc:
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001584 kfree(mlxsw_sp_port->active_vlans);
1585err_port_active_vlans_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001586 free_netdev(dev);
1587 return err;
1588}
1589
Ido Schimmel18f1e702016-02-26 17:32:31 +01001590static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1591 bool split, u8 module, u8 width, u8 lane)
1592{
1593 int err;
1594
1595 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
1596 lane);
1597 if (err)
1598 return err;
1599
1600 err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
1601 width);
1602 if (err)
1603 goto err_port_create;
1604
1605 return 0;
1606
1607err_port_create:
1608 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
1609 return err;
1610}
1611
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001612static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001613{
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001614 struct net_device *dev = mlxsw_sp_port->dev;
1615 struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001616
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001617 list_for_each_entry_safe(mlxsw_sp_vport, tmp,
1618 &mlxsw_sp_port->vports_list, vport.list) {
1619 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
1620
1621 /* vPorts created for VLAN devices should already be gone
1622 * by now, since we unregistered the port netdev.
1623 */
1624 WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
1625 mlxsw_sp_port_kill_vid(dev, 0, vid);
1626 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001627}
1628
1629static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1630{
1631 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
Jiri Pirkoc4745502016-02-26 17:32:26 +01001632 struct devlink_port *devlink_port;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001633
1634 if (!mlxsw_sp_port)
1635 return;
Ido Schimmela1333182016-02-26 17:32:30 +01001636 mlxsw_sp->ports[local_port] = NULL;
Jiri Pirkoc4745502016-02-26 17:32:26 +01001637 devlink_port = &mlxsw_sp_port->devlink_port;
1638 devlink_port_type_clear(devlink_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001639 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
Jiri Pirkoc4745502016-02-26 17:32:26 +01001640 devlink_port_unregister(devlink_port);
Ido Schimmel7f71eb42015-12-15 16:03:37 +01001641 mlxsw_sp_port_vports_fini(mlxsw_sp_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001642 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
Ido Schimmel3e9b27b2016-02-26 17:32:28 +01001643 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
1644 mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001645 free_percpu(mlxsw_sp_port->pcpu_stats);
Elad Razfc1273a2016-01-06 13:01:11 +01001646 kfree(mlxsw_sp_port->untagged_vlans);
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001647 kfree(mlxsw_sp_port->active_vlans);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001648 free_netdev(mlxsw_sp_port->dev);
1649}
1650
1651static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1652{
1653 int i;
1654
1655 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1656 mlxsw_sp_port_remove(mlxsw_sp, i);
1657 kfree(mlxsw_sp->ports);
1658}
1659
1660static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1661{
1662 size_t alloc_size;
Ido Schimmel558c2d52016-02-26 17:32:29 +01001663 u8 module, width;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001664 int i;
1665 int err;
1666
1667 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1668 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1669 if (!mlxsw_sp->ports)
1670 return -ENOMEM;
1671
1672 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
Ido Schimmel558c2d52016-02-26 17:32:29 +01001673 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
1674 &width);
1675 if (err)
1676 goto err_port_module_info_get;
1677 if (!width)
1678 continue;
1679 mlxsw_sp->port_to_module[i] = module;
Ido Schimmel18f1e702016-02-26 17:32:31 +01001680 err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001681 if (err)
1682 goto err_port_create;
1683 }
1684 return 0;
1685
1686err_port_create:
Ido Schimmel558c2d52016-02-26 17:32:29 +01001687err_port_module_info_get:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001688 for (i--; i >= 1; i--)
1689 mlxsw_sp_port_remove(mlxsw_sp, i);
1690 kfree(mlxsw_sp->ports);
1691 return err;
1692}
1693
Ido Schimmel18f1e702016-02-26 17:32:31 +01001694static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
1695{
1696 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
1697
1698 return local_port - offset;
1699}
1700
1701static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
1702{
1703 struct mlxsw_sp *mlxsw_sp = priv;
1704 struct mlxsw_sp_port *mlxsw_sp_port;
1705 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
1706 u8 module, cur_width, base_port;
1707 int i;
1708 int err;
1709
1710 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1711 if (!mlxsw_sp_port) {
1712 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1713 local_port);
1714 return -EINVAL;
1715 }
1716
1717 if (count != 2 && count != 4) {
1718 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
1719 return -EINVAL;
1720 }
1721
1722 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1723 &cur_width);
1724 if (err) {
1725 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1726 return err;
1727 }
1728
1729 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
1730 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
1731 return -EINVAL;
1732 }
1733
1734 /* Make sure we have enough slave (even) ports for the split. */
1735 if (count == 2) {
1736 base_port = local_port;
1737 if (mlxsw_sp->ports[base_port + 1]) {
1738 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1739 return -EINVAL;
1740 }
1741 } else {
1742 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1743 if (mlxsw_sp->ports[base_port + 1] ||
1744 mlxsw_sp->ports[base_port + 3]) {
1745 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
1746 return -EINVAL;
1747 }
1748 }
1749
1750 for (i = 0; i < count; i++)
1751 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1752
1753 for (i = 0; i < count; i++) {
1754 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
1755 module, width, i * width);
1756 if (err) {
1757 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
1758 goto err_port_create;
1759 }
1760 }
1761
1762 return 0;
1763
1764err_port_create:
1765 for (i--; i >= 0; i--)
1766 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1767 for (i = 0; i < count / 2; i++) {
1768 module = mlxsw_sp->port_to_module[base_port + i * 2];
1769 mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1770 module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
1771 }
1772 return err;
1773}
1774
1775static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
1776{
1777 struct mlxsw_sp *mlxsw_sp = priv;
1778 struct mlxsw_sp_port *mlxsw_sp_port;
1779 u8 module, cur_width, base_port;
1780 unsigned int count;
1781 int i;
1782 int err;
1783
1784 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1785 if (!mlxsw_sp_port) {
1786 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
1787 local_port);
1788 return -EINVAL;
1789 }
1790
1791 if (!mlxsw_sp_port->split) {
1792 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
1793 return -EINVAL;
1794 }
1795
1796 err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
1797 &cur_width);
1798 if (err) {
1799 netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
1800 return err;
1801 }
1802 count = cur_width == 1 ? 4 : 2;
1803
1804 base_port = mlxsw_sp_cluster_base_port_get(local_port);
1805
1806 /* Determine which ports to remove. */
1807 if (count == 2 && local_port >= base_port + 2)
1808 base_port = base_port + 2;
1809
1810 for (i = 0; i < count; i++)
1811 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
1812
1813 for (i = 0; i < count / 2; i++) {
1814 module = mlxsw_sp->port_to_module[base_port + i * 2];
1815 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
1816 module, MLXSW_PORT_MODULE_MAX_WIDTH,
1817 0);
1818 if (err)
1819 dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
1820 }
1821
1822 return 0;
1823}
1824
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001825static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1826 char *pude_pl, void *priv)
1827{
1828 struct mlxsw_sp *mlxsw_sp = priv;
1829 struct mlxsw_sp_port *mlxsw_sp_port;
1830 enum mlxsw_reg_pude_oper_status status;
1831 u8 local_port;
1832
1833 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1834 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1835 if (!mlxsw_sp_port) {
1836 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1837 local_port);
1838 return;
1839 }
1840
1841 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1842 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1843 netdev_info(mlxsw_sp_port->dev, "link up\n");
1844 netif_carrier_on(mlxsw_sp_port->dev);
1845 } else {
1846 netdev_info(mlxsw_sp_port->dev, "link down\n");
1847 netif_carrier_off(mlxsw_sp_port->dev);
1848 }
1849}
1850
1851static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1852 .func = mlxsw_sp_pude_event_func,
1853 .trap_id = MLXSW_TRAP_ID_PUDE,
1854};
1855
1856static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1857 enum mlxsw_event_trap_id trap_id)
1858{
1859 struct mlxsw_event_listener *el;
1860 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1861 int err;
1862
1863 switch (trap_id) {
1864 case MLXSW_TRAP_ID_PUDE:
1865 el = &mlxsw_sp_pude_event;
1866 break;
1867 }
1868 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1869 if (err)
1870 return err;
1871
1872 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1873 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1874 if (err)
1875 goto err_event_trap_set;
1876
1877 return 0;
1878
1879err_event_trap_set:
1880 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1881 return err;
1882}
1883
1884static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1885 enum mlxsw_event_trap_id trap_id)
1886{
1887 struct mlxsw_event_listener *el;
1888
1889 switch (trap_id) {
1890 case MLXSW_TRAP_ID_PUDE:
1891 el = &mlxsw_sp_pude_event;
1892 break;
1893 }
1894 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1895}
1896
1897static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1898 void *priv)
1899{
1900 struct mlxsw_sp *mlxsw_sp = priv;
1901 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1902 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1903
1904 if (unlikely(!mlxsw_sp_port)) {
1905 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1906 local_port);
1907 return;
1908 }
1909
1910 skb->dev = mlxsw_sp_port->dev;
1911
1912 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1913 u64_stats_update_begin(&pcpu_stats->syncp);
1914 pcpu_stats->rx_packets++;
1915 pcpu_stats->rx_bytes += skb->len;
1916 u64_stats_update_end(&pcpu_stats->syncp);
1917
1918 skb->protocol = eth_type_trans(skb, skb->dev);
1919 netif_receive_skb(skb);
1920}
1921
1922static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1923 {
1924 .func = mlxsw_sp_rx_listener_func,
1925 .local_port = MLXSW_PORT_DONT_CARE,
1926 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1927 },
1928 /* Traps for specific L2 packet types, not trapped as FDB MC */
1929 {
1930 .func = mlxsw_sp_rx_listener_func,
1931 .local_port = MLXSW_PORT_DONT_CARE,
1932 .trap_id = MLXSW_TRAP_ID_STP,
1933 },
1934 {
1935 .func = mlxsw_sp_rx_listener_func,
1936 .local_port = MLXSW_PORT_DONT_CARE,
1937 .trap_id = MLXSW_TRAP_ID_LACP,
1938 },
1939 {
1940 .func = mlxsw_sp_rx_listener_func,
1941 .local_port = MLXSW_PORT_DONT_CARE,
1942 .trap_id = MLXSW_TRAP_ID_EAPOL,
1943 },
1944 {
1945 .func = mlxsw_sp_rx_listener_func,
1946 .local_port = MLXSW_PORT_DONT_CARE,
1947 .trap_id = MLXSW_TRAP_ID_LLDP,
1948 },
1949 {
1950 .func = mlxsw_sp_rx_listener_func,
1951 .local_port = MLXSW_PORT_DONT_CARE,
1952 .trap_id = MLXSW_TRAP_ID_MMRP,
1953 },
1954 {
1955 .func = mlxsw_sp_rx_listener_func,
1956 .local_port = MLXSW_PORT_DONT_CARE,
1957 .trap_id = MLXSW_TRAP_ID_MVRP,
1958 },
1959 {
1960 .func = mlxsw_sp_rx_listener_func,
1961 .local_port = MLXSW_PORT_DONT_CARE,
1962 .trap_id = MLXSW_TRAP_ID_RPVST,
1963 },
1964 {
1965 .func = mlxsw_sp_rx_listener_func,
1966 .local_port = MLXSW_PORT_DONT_CARE,
1967 .trap_id = MLXSW_TRAP_ID_DHCP,
1968 },
1969 {
1970 .func = mlxsw_sp_rx_listener_func,
1971 .local_port = MLXSW_PORT_DONT_CARE,
1972 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1973 },
1974 {
1975 .func = mlxsw_sp_rx_listener_func,
1976 .local_port = MLXSW_PORT_DONT_CARE,
1977 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1978 },
1979 {
1980 .func = mlxsw_sp_rx_listener_func,
1981 .local_port = MLXSW_PORT_DONT_CARE,
1982 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1983 },
1984 {
1985 .func = mlxsw_sp_rx_listener_func,
1986 .local_port = MLXSW_PORT_DONT_CARE,
1987 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1988 },
1989 {
1990 .func = mlxsw_sp_rx_listener_func,
1991 .local_port = MLXSW_PORT_DONT_CARE,
1992 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1993 },
1994};
1995
1996static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1997{
1998 char htgt_pl[MLXSW_REG_HTGT_LEN];
1999 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2000 int i;
2001 int err;
2002
2003 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
2004 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2005 if (err)
2006 return err;
2007
2008 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
2009 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
2010 if (err)
2011 return err;
2012
2013 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2014 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
2015 &mlxsw_sp_rx_listener[i],
2016 mlxsw_sp);
2017 if (err)
2018 goto err_rx_listener_register;
2019
2020 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
2021 mlxsw_sp_rx_listener[i].trap_id);
2022 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2023 if (err)
2024 goto err_rx_trap_set;
2025 }
2026 return 0;
2027
2028err_rx_trap_set:
2029 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2030 &mlxsw_sp_rx_listener[i],
2031 mlxsw_sp);
2032err_rx_listener_register:
2033 for (i--; i >= 0; i--) {
2034 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2035 mlxsw_sp_rx_listener[i].trap_id);
2036 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2037
2038 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2039 &mlxsw_sp_rx_listener[i],
2040 mlxsw_sp);
2041 }
2042 return err;
2043}
2044
2045static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2046{
2047 char hpkt_pl[MLXSW_REG_HPKT_LEN];
2048 int i;
2049
2050 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
2051 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
2052 mlxsw_sp_rx_listener[i].trap_id);
2053 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
2054
2055 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
2056 &mlxsw_sp_rx_listener[i],
2057 mlxsw_sp);
2058 }
2059}
2060
2061static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
2062 enum mlxsw_reg_sfgc_type type,
2063 enum mlxsw_reg_sfgc_bridge_type bridge_type)
2064{
2065 enum mlxsw_flood_table_type table_type;
2066 enum mlxsw_sp_flood_table flood_table;
2067 char sfgc_pl[MLXSW_REG_SFGC_LEN];
2068
Ido Schimmel19ae6122015-12-15 16:03:39 +01002069 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002070 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
Ido Schimmel19ae6122015-12-15 16:03:39 +01002071 else
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002072 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
Ido Schimmel19ae6122015-12-15 16:03:39 +01002073
2074 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
2075 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
2076 else
2077 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002078
2079 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
2080 flood_table);
2081 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
2082}
2083
2084static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
2085{
2086 int type, err;
2087
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002088 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
2089 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
2090 continue;
2091
2092 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2093 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
2094 if (err)
2095 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002096
2097 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
2098 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
2099 if (err)
2100 return err;
2101 }
2102
2103 return 0;
2104}
2105
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002106static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2107{
2108 char slcr_pl[MLXSW_REG_SLCR_LEN];
2109
2110 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2111 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2112 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2113 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2114 MLXSW_REG_SLCR_LAG_HASH_SIP |
2115 MLXSW_REG_SLCR_LAG_HASH_DIP |
2116 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2117 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2118 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
2119 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2120}
2121
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002122static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
2123 const struct mlxsw_bus_info *mlxsw_bus_info)
2124{
2125 struct mlxsw_sp *mlxsw_sp = priv;
2126 int err;
2127
2128 mlxsw_sp->core = mlxsw_core;
2129 mlxsw_sp->bus_info = mlxsw_bus_info;
Ido Schimmel7f71eb42015-12-15 16:03:37 +01002130 INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002131 INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
Elad Raz3a49b4f2016-01-10 21:06:28 +01002132 INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002133
2134 err = mlxsw_sp_base_mac_get(mlxsw_sp);
2135 if (err) {
2136 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
2137 return err;
2138 }
2139
2140 err = mlxsw_sp_ports_create(mlxsw_sp);
2141 if (err) {
2142 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
Ido Schimmel7f71eb42015-12-15 16:03:37 +01002143 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002144 }
2145
2146 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2147 if (err) {
2148 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
2149 goto err_event_register;
2150 }
2151
2152 err = mlxsw_sp_traps_init(mlxsw_sp);
2153 if (err) {
2154 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
2155 goto err_rx_listener_register;
2156 }
2157
2158 err = mlxsw_sp_flood_init(mlxsw_sp);
2159 if (err) {
2160 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
2161 goto err_flood_init;
2162 }
2163
2164 err = mlxsw_sp_buffers_init(mlxsw_sp);
2165 if (err) {
2166 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
2167 goto err_buffers_init;
2168 }
2169
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002170 err = mlxsw_sp_lag_init(mlxsw_sp);
2171 if (err) {
2172 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
2173 goto err_lag_init;
2174 }
2175
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002176 err = mlxsw_sp_switchdev_init(mlxsw_sp);
2177 if (err) {
2178 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
2179 goto err_switchdev_init;
2180 }
2181
2182 return 0;
2183
2184err_switchdev_init:
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002185err_lag_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002186err_buffers_init:
2187err_flood_init:
2188 mlxsw_sp_traps_fini(mlxsw_sp);
2189err_rx_listener_register:
2190 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2191err_event_register:
2192 mlxsw_sp_ports_remove(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002193 return err;
2194}
2195
2196static void mlxsw_sp_fini(void *priv)
2197{
2198 struct mlxsw_sp *mlxsw_sp = priv;
2199
2200 mlxsw_sp_switchdev_fini(mlxsw_sp);
2201 mlxsw_sp_traps_fini(mlxsw_sp);
2202 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
2203 mlxsw_sp_ports_remove(mlxsw_sp);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002204}
2205
2206static struct mlxsw_config_profile mlxsw_sp_config_profile = {
2207 .used_max_vepa_channels = 1,
2208 .max_vepa_channels = 0,
2209 .used_max_lag = 1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002210 .max_lag = MLXSW_SP_LAG_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002211 .used_max_port_per_lag = 1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002212 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002213 .used_max_mid = 1,
Elad Raz53ae6282016-01-10 21:06:26 +01002214 .max_mid = MLXSW_SP_MID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002215 .used_max_pgt = 1,
2216 .max_pgt = 0,
2217 .used_max_system_port = 1,
2218 .max_system_port = 64,
2219 .used_max_vlan_groups = 1,
2220 .max_vlan_groups = 127,
2221 .used_max_regions = 1,
2222 .max_regions = 400,
2223 .used_flood_tables = 1,
2224 .used_flood_mode = 1,
2225 .flood_mode = 3,
2226 .max_fid_offset_flood_tables = 2,
2227 .fid_offset_flood_table_size = VLAN_N_VID - 1,
Ido Schimmel19ae6122015-12-15 16:03:39 +01002228 .max_fid_flood_tables = 2,
2229 .fid_flood_table_size = MLXSW_SP_VFID_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002230 .used_max_ib_mc = 1,
2231 .max_ib_mc = 0,
2232 .used_max_pkey = 1,
2233 .max_pkey = 0,
2234 .swid_config = {
2235 {
2236 .used_type = 1,
2237 .type = MLXSW_PORT_SWID_TYPE_ETH,
2238 }
2239 },
2240};
2241
2242static struct mlxsw_driver mlxsw_sp_driver = {
2243 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
2244 .owner = THIS_MODULE,
2245 .priv_size = sizeof(struct mlxsw_sp),
2246 .init = mlxsw_sp_init,
2247 .fini = mlxsw_sp_fini,
Ido Schimmel18f1e702016-02-26 17:32:31 +01002248 .port_split = mlxsw_sp_port_split,
2249 .port_unsplit = mlxsw_sp_port_unsplit,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002250 .txhdr_construct = mlxsw_sp_txhdr_construct,
2251 .txhdr_len = MLXSW_TXHDR_LEN,
2252 .profile = &mlxsw_sp_config_profile,
2253};
2254
Ido Schimmel039c49a2016-01-27 15:20:18 +01002255static int
2256mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
2257{
2258 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2259 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2260
2261 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
2262 mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
2263
2264 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2265}
2266
2267static int
2268mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2269 u16 fid)
2270{
2271 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2272 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2273
2274 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
2275 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2276 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
2277 mlxsw_sp_port->local_port);
2278
2279 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2280}
2281
2282static int
2283mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
2284{
2285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2286 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2287
2288 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
2289 mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2290
2291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2292}
2293
2294static int
2295mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
2296 u16 fid)
2297{
2298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2299 char sfdf_pl[MLXSW_REG_SFDF_LEN];
2300
2301 mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
2302 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
2303 mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
2304
2305 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
2306}
2307
2308static int
2309__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
2310{
2311 int err, last_err = 0;
2312 u16 vid;
2313
2314 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2315 err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
2316 if (err)
2317 last_err = err;
2318 }
2319
2320 return last_err;
2321}
2322
2323static int
2324__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
2325{
2326 int err, last_err = 0;
2327 u16 vid;
2328
2329 for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
2330 err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
2331 if (err)
2332 last_err = err;
2333 }
2334
2335 return last_err;
2336}
2337
2338static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
2339{
2340 if (!list_empty(&mlxsw_sp_port->vports_list))
2341 if (mlxsw_sp_port->lagged)
2342 return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
2343 else
2344 return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
2345 else
2346 if (mlxsw_sp_port->lagged)
2347 return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
2348 else
2349 return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
2350}
2351
2352static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
2353{
2354 u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
2355 u16 fid = mlxsw_sp_vfid_to_fid(vfid);
2356
2357 if (mlxsw_sp_vport->lagged)
2358 return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
2359 fid);
2360 else
2361 return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
2362}
2363
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002364static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
2365{
2366 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
2367}
2368
2369static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
2370{
2371 struct net_device *dev = mlxsw_sp_port->dev;
2372 int err;
2373
2374 /* When port is not bridged untagged packets are tagged with
2375 * PVID=VID=1, thereby creating an implicit VLAN interface in
2376 * the device. Remove it and let bridge code take care of its
2377 * own VLANs.
2378 */
2379 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01002380 if (err)
2381 return err;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002382
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01002383 mlxsw_sp_port->learning = 1;
2384 mlxsw_sp_port->learning_sync = 1;
2385 mlxsw_sp_port->uc_flood = 1;
2386 mlxsw_sp_port->bridged = 1;
2387
2388 return 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002389}
2390
Ido Schimmel039c49a2016-01-27 15:20:18 +01002391static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2392 bool flush_fdb)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002393{
2394 struct net_device *dev = mlxsw_sp_port->dev;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01002395
Ido Schimmel039c49a2016-01-27 15:20:18 +01002396 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2397 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2398
Ido Schimmel28a01d22016-02-18 11:30:02 +01002399 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2400
Ido Schimmel6c72a3d2016-01-04 10:42:26 +01002401 mlxsw_sp_port->learning = 0;
2402 mlxsw_sp_port->learning_sync = 0;
2403 mlxsw_sp_port->uc_flood = 0;
Ido Schimmel5a8f4522016-01-04 10:42:25 +01002404 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002405
2406 /* Add implicit VLAN interface in the device, so that untagged
2407 * packets will be classified to the default vFID.
2408 */
Ido Schimmel5a8f4522016-01-04 10:42:25 +01002409 return mlxsw_sp_port_add_vid(dev, 0, 1);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002410}
2411
2412static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
2413 struct net_device *br_dev)
2414{
2415 return !mlxsw_sp->master_bridge.dev ||
2416 mlxsw_sp->master_bridge.dev == br_dev;
2417}
2418
2419static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
2420 struct net_device *br_dev)
2421{
2422 mlxsw_sp->master_bridge.dev = br_dev;
2423 mlxsw_sp->master_bridge.ref_count++;
2424}
2425
2426static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
2427 struct net_device *br_dev)
2428{
2429 if (--mlxsw_sp->master_bridge.ref_count == 0)
2430 mlxsw_sp->master_bridge.dev = NULL;
2431}
2432
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002433static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002434{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002435 char sldr_pl[MLXSW_REG_SLDR_LEN];
2436
2437 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
2438 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2439}
2440
2441static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
2442{
2443 char sldr_pl[MLXSW_REG_SLDR_LEN];
2444
2445 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
2446 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2447}
2448
2449static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2450 u16 lag_id, u8 port_index)
2451{
2452 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2453 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2454
2455 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
2456 lag_id, port_index);
2457 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2458}
2459
2460static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2461 u16 lag_id)
2462{
2463 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2464 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2465
2466 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
2467 lag_id);
2468 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2469}
2470
2471static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
2472 u16 lag_id)
2473{
2474 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2475 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2476
2477 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
2478 lag_id);
2479 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2480}
2481
2482static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
2483 u16 lag_id)
2484{
2485 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2486 char slcor_pl[MLXSW_REG_SLCOR_LEN];
2487
2488 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
2489 lag_id);
2490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
2491}
2492
2493static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2494 struct net_device *lag_dev,
2495 u16 *p_lag_id)
2496{
2497 struct mlxsw_sp_upper *lag;
2498 int free_lag_id = -1;
2499 int i;
2500
2501 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
2502 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2503 if (lag->ref_count) {
2504 if (lag->dev == lag_dev) {
2505 *p_lag_id = i;
2506 return 0;
2507 }
2508 } else if (free_lag_id < 0) {
2509 free_lag_id = i;
2510 }
2511 }
2512 if (free_lag_id < 0)
2513 return -EBUSY;
2514 *p_lag_id = free_lag_id;
2515 return 0;
2516}
2517
2518static bool
2519mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2520 struct net_device *lag_dev,
2521 struct netdev_lag_upper_info *lag_upper_info)
2522{
2523 u16 lag_id;
2524
2525 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2526 return false;
2527 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2528 return false;
2529 return true;
2530}
2531
2532static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2533 u16 lag_id, u8 *p_port_index)
2534{
2535 int i;
2536
2537 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2538 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2539 *p_port_index = i;
2540 return 0;
2541 }
2542 }
2543 return -EBUSY;
2544}
2545
2546static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2547 struct net_device *lag_dev)
2548{
2549 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2550 struct mlxsw_sp_upper *lag;
2551 u16 lag_id;
2552 u8 port_index;
2553 int err;
2554
2555 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2556 if (err)
2557 return err;
2558 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2559 if (!lag->ref_count) {
2560 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2561 if (err)
2562 return err;
2563 lag->dev = lag_dev;
2564 }
2565
2566 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2567 if (err)
2568 return err;
2569 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2570 if (err)
2571 goto err_col_port_add;
2572 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2573 if (err)
2574 goto err_col_port_enable;
2575
2576 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2577 mlxsw_sp_port->local_port);
2578 mlxsw_sp_port->lag_id = lag_id;
2579 mlxsw_sp_port->lagged = 1;
2580 lag->ref_count++;
2581 return 0;
2582
2583err_col_port_add:
2584 if (!lag->ref_count)
2585 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2586err_col_port_enable:
2587 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2588 return err;
2589}
2590
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002591static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel039c49a2016-01-27 15:20:18 +01002592 struct net_device *br_dev,
2593 bool flush_fdb);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002594
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002595static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2596 struct net_device *lag_dev)
2597{
2598 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002599 struct mlxsw_sp_port *mlxsw_sp_vport;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002600 struct mlxsw_sp_upper *lag;
2601 u16 lag_id = mlxsw_sp_port->lag_id;
2602 int err;
2603
2604 if (!mlxsw_sp_port->lagged)
2605 return 0;
2606 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2607 WARN_ON(lag->ref_count == 0);
2608
2609 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2610 if (err)
2611 return err;
Dan Carpenter82a06422015-12-09 13:33:51 +03002612 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002613 if (err)
2614 return err;
2615
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002616 /* In case we leave a LAG device that has bridges built on top,
2617 * then their teardown sequence is never issued and we need to
2618 * invoke the necessary cleanup routines ourselves.
2619 */
2620 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
2621 vport.list) {
2622 struct net_device *br_dev;
2623
2624 if (!mlxsw_sp_vport->bridged)
2625 continue;
2626
2627 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
Ido Schimmel039c49a2016-01-27 15:20:18 +01002628 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002629 }
2630
2631 if (mlxsw_sp_port->bridged) {
2632 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
Ido Schimmel039c49a2016-01-27 15:20:18 +01002633 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
Ido Schimmel912b1c82016-03-07 15:15:29 +01002634 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
Ido Schimmel4dc236c2016-01-27 15:20:16 +01002635 }
2636
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002637 if (lag->ref_count == 1) {
Ido Schimmel039c49a2016-01-27 15:20:18 +01002638 if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
2639 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002640 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2641 if (err)
2642 return err;
2643 }
2644
2645 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2646 mlxsw_sp_port->local_port);
2647 mlxsw_sp_port->lagged = 0;
2648 lag->ref_count--;
2649 return 0;
2650}
2651
Jiri Pirko74581202015-12-03 12:12:30 +01002652static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2653 u16 lag_id)
2654{
2655 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2656 char sldr_pl[MLXSW_REG_SLDR_LEN];
2657
2658 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2659 mlxsw_sp_port->local_port);
2660 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2661}
2662
2663static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2664 u16 lag_id)
2665{
2666 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2667 char sldr_pl[MLXSW_REG_SLDR_LEN];
2668
2669 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2670 mlxsw_sp_port->local_port);
2671 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2672}
2673
2674static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2675 bool lag_tx_enabled)
2676{
2677 if (lag_tx_enabled)
2678 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2679 mlxsw_sp_port->lag_id);
2680 else
2681 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2682 mlxsw_sp_port->lag_id);
2683}
2684
2685static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2686 struct netdev_lag_lower_state_info *info)
2687{
2688 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2689}
2690
Ido Schimmel9589a7b52015-12-15 16:03:43 +01002691static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
2692 struct net_device *vlan_dev)
2693{
2694 struct mlxsw_sp_port *mlxsw_sp_vport;
2695 u16 vid = vlan_dev_vlan_id(vlan_dev);
2696
2697 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2698 if (!mlxsw_sp_vport) {
2699 WARN_ON(!mlxsw_sp_vport);
2700 return -EINVAL;
2701 }
2702
2703 mlxsw_sp_vport->dev = vlan_dev;
2704
2705 return 0;
2706}
2707
2708static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
2709 struct net_device *vlan_dev)
2710{
2711 struct mlxsw_sp_port *mlxsw_sp_vport;
2712 u16 vid = vlan_dev_vlan_id(vlan_dev);
2713
2714 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
2715 if (!mlxsw_sp_vport) {
2716 WARN_ON(!mlxsw_sp_vport);
2717 return -EINVAL;
2718 }
2719
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002720 /* When removing a VLAN device while still bridged we should first
2721 * remove it from the bridge, as we receive the bridge's notification
2722 * when the vPort is already gone.
2723 */
2724 if (mlxsw_sp_vport->bridged) {
2725 struct net_device *br_dev;
2726
2727 br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
Ido Schimmel039c49a2016-01-27 15:20:18 +01002728 mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002729 }
2730
Ido Schimmel9589a7b52015-12-15 16:03:43 +01002731 mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
2732
2733 return 0;
2734}
2735
Jiri Pirko74581202015-12-03 12:12:30 +01002736static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2737 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002738{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002739 struct netdev_notifier_changeupper_info *info;
2740 struct mlxsw_sp_port *mlxsw_sp_port;
2741 struct net_device *upper_dev;
2742 struct mlxsw_sp *mlxsw_sp;
2743 int err;
2744
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002745 mlxsw_sp_port = netdev_priv(dev);
2746 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2747 info = ptr;
2748
2749 switch (event) {
2750 case NETDEV_PRECHANGEUPPER:
2751 upper_dev = info->upper_dev;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002752 if (!info->master || !info->linking)
2753 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002754 /* HW limitation forbids to put ports to multiple bridges. */
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002755 if (netif_is_bridge_master(upper_dev) &&
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002756 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2757 return NOTIFY_BAD;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002758 if (netif_is_lag_master(upper_dev) &&
2759 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2760 info->upper_info))
2761 return NOTIFY_BAD;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002762 break;
2763 case NETDEV_CHANGEUPPER:
2764 upper_dev = info->upper_dev;
Ido Schimmel9589a7b52015-12-15 16:03:43 +01002765 if (is_vlan_dev(upper_dev)) {
2766 if (info->linking) {
2767 err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
2768 upper_dev);
2769 if (err) {
2770 netdev_err(dev, "Failed to link VLAN device\n");
2771 return NOTIFY_BAD;
2772 }
2773 } else {
2774 err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
2775 upper_dev);
2776 if (err) {
2777 netdev_err(dev, "Failed to unlink VLAN device\n");
2778 return NOTIFY_BAD;
2779 }
2780 }
2781 } else if (netif_is_bridge_master(upper_dev)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002782 if (info->linking) {
2783 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
Ido Schimmel78124072016-01-04 10:42:24 +01002784 if (err) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002785 netdev_err(dev, "Failed to join bridge\n");
Ido Schimmel78124072016-01-04 10:42:24 +01002786 return NOTIFY_BAD;
2787 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002788 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002789 } else {
Ido Schimmel039c49a2016-01-27 15:20:18 +01002790 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
2791 true);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002792 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
Ido Schimmel78124072016-01-04 10:42:24 +01002793 if (err) {
2794 netdev_err(dev, "Failed to leave bridge\n");
2795 return NOTIFY_BAD;
2796 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002797 }
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002798 } else if (netif_is_lag_master(upper_dev)) {
2799 if (info->linking) {
2800 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2801 upper_dev);
2802 if (err) {
2803 netdev_err(dev, "Failed to join link aggregation\n");
2804 return NOTIFY_BAD;
2805 }
2806 } else {
2807 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2808 upper_dev);
2809 if (err) {
2810 netdev_err(dev, "Failed to leave link aggregation\n");
2811 return NOTIFY_BAD;
2812 }
2813 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002814 }
2815 break;
2816 }
2817
2818 return NOTIFY_DONE;
2819}
2820
Jiri Pirko74581202015-12-03 12:12:30 +01002821static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2822 unsigned long event, void *ptr)
2823{
2824 struct netdev_notifier_changelowerstate_info *info;
2825 struct mlxsw_sp_port *mlxsw_sp_port;
2826 int err;
2827
2828 mlxsw_sp_port = netdev_priv(dev);
2829 info = ptr;
2830
2831 switch (event) {
2832 case NETDEV_CHANGELOWERSTATE:
2833 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2834 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2835 info->lower_state_info);
2836 if (err)
2837 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2838 }
2839 break;
2840 }
2841
2842 return NOTIFY_DONE;
2843}
2844
2845static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2846 unsigned long event, void *ptr)
2847{
2848 switch (event) {
2849 case NETDEV_PRECHANGEUPPER:
2850 case NETDEV_CHANGEUPPER:
2851 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2852 case NETDEV_CHANGELOWERSTATE:
2853 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2854 }
2855
2856 return NOTIFY_DONE;
2857}
2858
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002859static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2860 unsigned long event, void *ptr)
2861{
2862 struct net_device *dev;
2863 struct list_head *iter;
2864 int ret;
2865
2866 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2867 if (mlxsw_sp_port_dev_check(dev)) {
2868 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2869 if (ret == NOTIFY_BAD)
2870 return ret;
2871 }
2872 }
2873
2874 return NOTIFY_DONE;
2875}
2876
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002877static struct mlxsw_sp_vfid *
2878mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
2879 const struct net_device *br_dev)
2880{
2881 struct mlxsw_sp_vfid *vfid;
2882
2883 list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
2884 if (vfid->br_dev == br_dev)
2885 return vfid;
2886 }
2887
2888 return NULL;
2889}
2890
2891static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
2892{
2893 return vfid - MLXSW_SP_VFID_PORT_MAX;
2894}
2895
2896static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
2897{
2898 return MLXSW_SP_VFID_PORT_MAX + br_vfid;
2899}
2900
2901static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
2902{
2903 return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
2904 MLXSW_SP_VFID_BR_MAX);
2905}
2906
2907static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
2908 struct net_device *br_dev)
2909{
2910 struct device *dev = mlxsw_sp->bus_info->dev;
2911 struct mlxsw_sp_vfid *vfid;
2912 u16 n_vfid;
2913 int err;
2914
2915 n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
2916 if (n_vfid == MLXSW_SP_VFID_MAX) {
2917 dev_err(dev, "No available vFIDs\n");
2918 return ERR_PTR(-ERANGE);
2919 }
2920
2921 err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
2922 if (err) {
2923 dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
2924 return ERR_PTR(err);
2925 }
2926
2927 vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
2928 if (!vfid)
2929 goto err_allocate_vfid;
2930
2931 vfid->vfid = n_vfid;
2932 vfid->br_dev = br_dev;
2933
2934 list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
2935 set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
2936
2937 return vfid;
2938
2939err_allocate_vfid:
2940 __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
2941 return ERR_PTR(-ENOMEM);
2942}
2943
2944static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
2945 struct mlxsw_sp_vfid *vfid)
2946{
2947 u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
2948
2949 clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
2950 list_del(&vfid->list);
2951
2952 __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
2953
2954 kfree(vfid);
2955}
2956
2957static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
Ido Schimmel039c49a2016-01-27 15:20:18 +01002958 struct net_device *br_dev,
2959 bool flush_fdb)
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01002960{
2961 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
2962 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
2963 struct net_device *dev = mlxsw_sp_vport->dev;
2964 struct mlxsw_sp_vfid *vfid, *new_vfid;
2965 int err;
2966
2967 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
2968 if (!vfid) {
2969 WARN_ON(!vfid);
2970 return -EINVAL;
2971 }
2972
2973 /* We need a vFID to go back to after leaving the bridge's vFID. */
2974 new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
2975 if (!new_vfid) {
2976 new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
2977 if (IS_ERR(new_vfid)) {
2978 netdev_err(dev, "Failed to create vFID for VID=%d\n",
2979 vid);
2980 return PTR_ERR(new_vfid);
2981 }
2982 }
2983
2984 /* Invalidate existing {Port, VID} to vFID mapping and create a new
2985 * one for the new vFID.
2986 */
2987 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2988 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
2989 false,
2990 mlxsw_sp_vfid_to_fid(vfid->vfid),
2991 vid);
2992 if (err) {
2993 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
2994 vfid->vfid);
2995 goto err_port_vid_to_fid_invalidate;
2996 }
2997
2998 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
2999 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3000 true,
3001 mlxsw_sp_vfid_to_fid(new_vfid->vfid),
3002 vid);
3003 if (err) {
3004 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3005 new_vfid->vfid);
3006 goto err_port_vid_to_fid_validate;
3007 }
3008
3009 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3010 if (err) {
3011 netdev_err(dev, "Failed to disable learning\n");
3012 goto err_port_vid_learning_set;
3013 }
3014
3015 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
3016 false);
3017 if (err) {
3018 netdev_err(dev, "Failed clear to clear flooding\n");
3019 goto err_vport_flood_set;
3020 }
3021
Ido Schimmel6a9863a2016-02-15 13:19:54 +01003022 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
3023 MLXSW_REG_SPMS_STATE_FORWARDING);
3024 if (err) {
3025 netdev_err(dev, "Failed to set STP state\n");
3026 goto err_port_stp_state_set;
3027 }
3028
Ido Schimmel039c49a2016-01-27 15:20:18 +01003029 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
3030 netdev_err(dev, "Failed to flush FDB\n");
3031
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003032 /* Switch between the vFIDs and destroy the old one if needed. */
3033 new_vfid->nr_vports++;
3034 mlxsw_sp_vport->vport.vfid = new_vfid;
3035 vfid->nr_vports--;
3036 if (!vfid->nr_vports)
3037 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3038
3039 mlxsw_sp_vport->learning = 0;
3040 mlxsw_sp_vport->learning_sync = 0;
3041 mlxsw_sp_vport->uc_flood = 0;
3042 mlxsw_sp_vport->bridged = 0;
3043
3044 return 0;
3045
Ido Schimmel6a9863a2016-02-15 13:19:54 +01003046err_port_stp_state_set:
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003047err_vport_flood_set:
3048err_port_vid_learning_set:
3049err_port_vid_to_fid_validate:
3050err_port_vid_to_fid_invalidate:
3051 /* Rollback vFID only if new. */
3052 if (!new_vfid->nr_vports)
3053 mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
3054 return err;
3055}
3056
3057static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
3058 struct net_device *br_dev)
3059{
3060 struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
3061 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
3062 u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
3063 struct net_device *dev = mlxsw_sp_vport->dev;
3064 struct mlxsw_sp_vfid *vfid;
3065 int err;
3066
3067 vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
3068 if (!vfid) {
3069 vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
3070 if (IS_ERR(vfid)) {
3071 netdev_err(dev, "Failed to create bridge vFID\n");
3072 return PTR_ERR(vfid);
3073 }
3074 }
3075
3076 err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
3077 if (err) {
3078 netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
3079 vfid->vfid);
3080 goto err_port_flood_set;
3081 }
3082
3083 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
3084 if (err) {
3085 netdev_err(dev, "Failed to enable learning\n");
3086 goto err_port_vid_learning_set;
3087 }
3088
3089 /* We need to invalidate existing {Port, VID} to vFID mapping and
3090 * create a new one for the bridge's vFID.
3091 */
3092 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3093 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3094 false,
3095 mlxsw_sp_vfid_to_fid(old_vfid->vfid),
3096 vid);
3097 if (err) {
3098 netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
3099 old_vfid->vfid);
3100 goto err_port_vid_to_fid_invalidate;
3101 }
3102
3103 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3104 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
3105 true,
3106 mlxsw_sp_vfid_to_fid(vfid->vfid),
3107 vid);
3108 if (err) {
3109 netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
3110 vfid->vfid);
3111 goto err_port_vid_to_fid_validate;
3112 }
3113
3114 /* Switch between the vFIDs and destroy the old one if needed. */
3115 vfid->nr_vports++;
3116 mlxsw_sp_vport->vport.vfid = vfid;
3117 old_vfid->nr_vports--;
3118 if (!old_vfid->nr_vports)
3119 mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
3120
3121 mlxsw_sp_vport->learning = 1;
3122 mlxsw_sp_vport->learning_sync = 1;
3123 mlxsw_sp_vport->uc_flood = 1;
3124 mlxsw_sp_vport->bridged = 1;
3125
3126 return 0;
3127
3128err_port_vid_to_fid_validate:
3129 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
3130 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
3131 mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
3132err_port_vid_to_fid_invalidate:
3133 mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
3134err_port_vid_learning_set:
3135 mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
3136err_port_flood_set:
3137 if (!vfid->nr_vports)
3138 mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
3139 return err;
3140}
3141
3142static bool
3143mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
3144 const struct net_device *br_dev)
3145{
3146 struct mlxsw_sp_port *mlxsw_sp_vport;
3147
3148 list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
3149 vport.list) {
3150 if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
3151 return false;
3152 }
3153
3154 return true;
3155}
3156
3157static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
3158 unsigned long event, void *ptr,
3159 u16 vid)
3160{
3161 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
3162 struct netdev_notifier_changeupper_info *info = ptr;
3163 struct mlxsw_sp_port *mlxsw_sp_vport;
3164 struct net_device *upper_dev;
3165 int err;
3166
3167 mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
3168
3169 switch (event) {
3170 case NETDEV_PRECHANGEUPPER:
3171 upper_dev = info->upper_dev;
3172 if (!info->master || !info->linking)
3173 break;
3174 if (!netif_is_bridge_master(upper_dev))
3175 return NOTIFY_BAD;
3176 /* We can't have multiple VLAN interfaces configured on
3177 * the same port and being members in the same bridge.
3178 */
3179 if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
3180 upper_dev))
3181 return NOTIFY_BAD;
3182 break;
3183 case NETDEV_CHANGEUPPER:
3184 upper_dev = info->upper_dev;
3185 if (!info->master)
3186 break;
3187 if (info->linking) {
3188 if (!mlxsw_sp_vport) {
3189 WARN_ON(!mlxsw_sp_vport);
3190 return NOTIFY_BAD;
3191 }
3192 err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
3193 upper_dev);
3194 if (err) {
3195 netdev_err(dev, "Failed to join bridge\n");
3196 return NOTIFY_BAD;
3197 }
3198 } else {
3199 /* We ignore bridge's unlinking notifications if vPort
3200 * is gone, since we already left the bridge when the
3201 * VLAN device was unlinked from the real device.
3202 */
3203 if (!mlxsw_sp_vport)
3204 return NOTIFY_DONE;
3205 err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
Ido Schimmel039c49a2016-01-27 15:20:18 +01003206 upper_dev, true);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003207 if (err) {
3208 netdev_err(dev, "Failed to leave bridge\n");
3209 return NOTIFY_BAD;
3210 }
3211 }
3212 }
3213
3214 return NOTIFY_DONE;
3215}
3216
Ido Schimmel272c4472015-12-15 16:03:47 +01003217static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
3218 unsigned long event, void *ptr,
3219 u16 vid)
3220{
3221 struct net_device *dev;
3222 struct list_head *iter;
3223 int ret;
3224
3225 netdev_for_each_lower_dev(lag_dev, dev, iter) {
3226 if (mlxsw_sp_port_dev_check(dev)) {
3227 ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
3228 vid);
3229 if (ret == NOTIFY_BAD)
3230 return ret;
3231 }
3232 }
3233
3234 return NOTIFY_DONE;
3235}
3236
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003237static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
3238 unsigned long event, void *ptr)
3239{
3240 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3241 u16 vid = vlan_dev_vlan_id(vlan_dev);
3242
Ido Schimmel272c4472015-12-15 16:03:47 +01003243 if (mlxsw_sp_port_dev_check(real_dev))
3244 return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
3245 vid);
3246 else if (netif_is_lag_master(real_dev))
3247 return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
3248 vid);
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003249
Ido Schimmel272c4472015-12-15 16:03:47 +01003250 return NOTIFY_DONE;
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003251}
3252
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003253static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
3254 unsigned long event, void *ptr)
3255{
3256 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3257
3258 if (mlxsw_sp_port_dev_check(dev))
3259 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
3260
3261 if (netif_is_lag_master(dev))
3262 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
3263
Ido Schimmel26f0e7f2015-12-15 16:03:44 +01003264 if (is_vlan_dev(dev))
3265 return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
3266
Jiri Pirko0d65fc12015-12-03 12:12:28 +01003267 return NOTIFY_DONE;
3268}
3269
Jiri Pirko56ade8f2015-10-16 14:01:37 +02003270static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
3271 .notifier_call = mlxsw_sp_netdevice_event,
3272};
3273
3274static int __init mlxsw_sp_module_init(void)
3275{
3276 int err;
3277
3278 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3279 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
3280 if (err)
3281 goto err_core_driver_register;
3282 return 0;
3283
3284err_core_driver_register:
3285 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3286 return err;
3287}
3288
3289static void __exit mlxsw_sp_module_exit(void)
3290{
3291 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
3292 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
3293}
3294
3295module_init(mlxsw_sp_module_init);
3296module_exit(mlxsw_sp_module_exit);
3297
3298MODULE_LICENSE("Dual BSD/GPL");
3299MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
3300MODULE_DESCRIPTION("Mellanox Spectrum driver");
3301MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);