blob: 6bacdcf5ac7689f93eff2d1122dbc86ca6659913 [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/ethtool.h>
43#include <linux/slab.h>
44#include <linux/device.h>
45#include <linux/skbuff.h>
46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
48#include <linux/workqueue.h>
49#include <linux/jiffies.h>
50#include <linux/bitops.h>
51#include <net/switchdev.h>
52#include <generated/utsrelease.h>
53
54#include "spectrum.h"
55#include "core.h"
56#include "reg.h"
57#include "port.h"
58#include "trap.h"
59#include "txheader.h"
60
61static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
62static const char mlxsw_sp_driver_version[] = "1.0";
63
64/* tx_hdr_version
65 * Tx header version.
66 * Must be set to 1.
67 */
68MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
69
70/* tx_hdr_ctl
71 * Packet control type.
72 * 0 - Ethernet control (e.g. EMADs, LACP)
73 * 1 - Ethernet data
74 */
75MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
76
77/* tx_hdr_proto
78 * Packet protocol type. Must be set to 1 (Ethernet).
79 */
80MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
81
82/* tx_hdr_rx_is_router
83 * Packet is sent from the router. Valid for data packets only.
84 */
85MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
86
87/* tx_hdr_fid_valid
88 * Indicates if the 'fid' field is valid and should be used for
89 * forwarding lookup. Valid for data packets only.
90 */
91MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
92
93/* tx_hdr_swid
94 * Switch partition ID. Must be set to 0.
95 */
96MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
97
98/* tx_hdr_control_tclass
99 * Indicates if the packet should use the control TClass and not one
100 * of the data TClasses.
101 */
102MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
103
104/* tx_hdr_etclass
105 * Egress TClass to be used on the egress device on the egress port.
106 */
107MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
108
109/* tx_hdr_port_mid
110 * Destination local port for unicast packets.
111 * Destination multicast ID for multicast packets.
112 *
113 * Control packets are directed to a specific egress port, while data
114 * packets are transmitted through the CPU port (0) into the switch partition,
115 * where forwarding rules are applied.
116 */
117MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
118
119/* tx_hdr_fid
120 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
121 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
122 * Valid for data packets only.
123 */
124MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
125
126/* tx_hdr_type
127 * 0 - Data packets
128 * 6 - Control packets
129 */
130MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
131
132static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
133 const struct mlxsw_tx_info *tx_info)
134{
135 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
136
137 memset(txhdr, 0, MLXSW_TXHDR_LEN);
138
139 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
140 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
141 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
142 mlxsw_tx_hdr_swid_set(txhdr, 0);
143 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
144 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
145 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
146}
147
148static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
149{
150 char spad_pl[MLXSW_REG_SPAD_LEN];
151 int err;
152
153 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
154 if (err)
155 return err;
156 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
157 return 0;
158}
159
160static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
161 bool is_up)
162{
163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
164 char paos_pl[MLXSW_REG_PAOS_LEN];
165
166 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
167 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
168 MLXSW_PORT_ADMIN_STATUS_DOWN);
169 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
170}
171
172static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
173 bool *p_is_up)
174{
175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
176 char paos_pl[MLXSW_REG_PAOS_LEN];
177 u8 oper_status;
178 int err;
179
180 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
182 if (err)
183 return err;
184 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
185 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
186 return 0;
187}
188
189static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
190{
191 char sfmr_pl[MLXSW_REG_SFMR_LEN];
192 int err;
193
194 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
195 MLXSW_SP_VFID_BASE + vfid, 0);
196 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
197
198 if (err)
199 return err;
200
201 set_bit(vfid, mlxsw_sp->active_vfids);
202 return 0;
203}
204
205static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
206{
207 char sfmr_pl[MLXSW_REG_SFMR_LEN];
208
209 clear_bit(vfid, mlxsw_sp->active_vfids);
210
211 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
212 MLXSW_SP_VFID_BASE + vfid, 0);
213 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
214}
215
216static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
217 unsigned char *addr)
218{
219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
220 char ppad_pl[MLXSW_REG_PPAD_LEN];
221
222 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
223 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
225}
226
227static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
228{
229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
230 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
231
232 ether_addr_copy(addr, mlxsw_sp->base_mac);
233 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
234 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
235}
236
237static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
238 u16 vid, enum mlxsw_reg_spms_state state)
239{
240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
241 char *spms_pl;
242 int err;
243
244 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
245 if (!spms_pl)
246 return -ENOMEM;
247 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
248 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
249 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
250 kfree(spms_pl);
251 return err;
252}
253
254static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
255{
256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
257 char pmtu_pl[MLXSW_REG_PMTU_LEN];
258 int max_mtu;
259 int err;
260
261 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
262 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
263 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
264 if (err)
265 return err;
266 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
267
268 if (mtu > max_mtu)
269 return -EINVAL;
270
271 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
273}
274
275static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
276{
277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278 char pspa_pl[MLXSW_REG_PSPA_LEN];
279
280 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
282}
283
284static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
285 bool enable)
286{
287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
288 char svpe_pl[MLXSW_REG_SVPE_LEN];
289
290 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
292}
293
294int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
295 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
296 u16 vid)
297{
298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
299 char svfa_pl[MLXSW_REG_SVFA_LEN];
300
301 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
302 fid, vid);
303 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
304}
305
306static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
307 u16 vid, bool learn_enable)
308{
309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310 char *spvmlr_pl;
311 int err;
312
313 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
314 if (!spvmlr_pl)
315 return -ENOMEM;
316 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
317 learn_enable);
318 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
319 kfree(spvmlr_pl);
320 return err;
321}
322
323static int
324mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
325{
326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327 char sspr_pl[MLXSW_REG_SSPR_LEN];
328
329 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
331}
332
333static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
334 bool *p_usable)
335{
336 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
337 char pmlp_pl[MLXSW_REG_PMLP_LEN];
338 int err;
339
340 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
341 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
342 if (err)
343 return err;
344 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
345 return 0;
346}
347
348static int mlxsw_sp_port_open(struct net_device *dev)
349{
350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
351 int err;
352
353 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
354 if (err)
355 return err;
356 netif_start_queue(dev);
357 return 0;
358}
359
360static int mlxsw_sp_port_stop(struct net_device *dev)
361{
362 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363
364 netif_stop_queue(dev);
365 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
366}
367
368static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
369 struct net_device *dev)
370{
371 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
372 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
373 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
374 const struct mlxsw_tx_info tx_info = {
375 .local_port = mlxsw_sp_port->local_port,
376 .is_emad = false,
377 };
378 u64 len;
379 int err;
380
381 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
382 return NETDEV_TX_BUSY;
383
384 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
385 struct sk_buff *skb_orig = skb;
386
387 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
388 if (!skb) {
389 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
390 dev_kfree_skb_any(skb_orig);
391 return NETDEV_TX_OK;
392 }
393 }
394
395 if (eth_skb_pad(skb)) {
396 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
397 return NETDEV_TX_OK;
398 }
399
400 mlxsw_sp_txhdr_construct(skb, &tx_info);
401 len = skb->len;
402 /* Due to a race we might fail here because of a full queue. In that
403 * unlikely case we simply drop the packet.
404 */
405 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
406
407 if (!err) {
408 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
409 u64_stats_update_begin(&pcpu_stats->syncp);
410 pcpu_stats->tx_packets++;
411 pcpu_stats->tx_bytes += len;
412 u64_stats_update_end(&pcpu_stats->syncp);
413 } else {
414 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
415 dev_kfree_skb_any(skb);
416 }
417 return NETDEV_TX_OK;
418}
419
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100420static void mlxsw_sp_set_rx_mode(struct net_device *dev)
421{
422}
423
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200424static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
425{
426 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
427 struct sockaddr *addr = p;
428 int err;
429
430 if (!is_valid_ether_addr(addr->sa_data))
431 return -EADDRNOTAVAIL;
432
433 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
434 if (err)
435 return err;
436 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
437 return 0;
438}
439
440static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
441{
442 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
443 int err;
444
445 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
446 if (err)
447 return err;
448 dev->mtu = mtu;
449 return 0;
450}
451
452static struct rtnl_link_stats64 *
453mlxsw_sp_port_get_stats64(struct net_device *dev,
454 struct rtnl_link_stats64 *stats)
455{
456 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
457 struct mlxsw_sp_port_pcpu_stats *p;
458 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
459 u32 tx_dropped = 0;
460 unsigned int start;
461 int i;
462
463 for_each_possible_cpu(i) {
464 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
465 do {
466 start = u64_stats_fetch_begin_irq(&p->syncp);
467 rx_packets = p->rx_packets;
468 rx_bytes = p->rx_bytes;
469 tx_packets = p->tx_packets;
470 tx_bytes = p->tx_bytes;
471 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
472
473 stats->rx_packets += rx_packets;
474 stats->rx_bytes += rx_bytes;
475 stats->tx_packets += tx_packets;
476 stats->tx_bytes += tx_bytes;
477 /* tx_dropped is u32, updated without syncp protection. */
478 tx_dropped += p->tx_dropped;
479 }
480 stats->tx_dropped = tx_dropped;
481 return stats;
482}
483
484int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
485 u16 vid_end, bool is_member, bool untagged)
486{
487 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
488 char *spvm_pl;
489 int err;
490
491 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
492 if (!spvm_pl)
493 return -ENOMEM;
494
495 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
496 vid_end, is_member, untagged);
497 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
498 kfree(spvm_pl);
499 return err;
500}
501
502static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
503{
504 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
505 u16 vid, last_visited_vid;
506 int err;
507
508 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
509 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
510 vid);
511 if (err) {
512 last_visited_vid = vid;
513 goto err_port_vid_to_fid_set;
514 }
515 }
516
517 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
518 if (err) {
519 last_visited_vid = VLAN_N_VID;
520 goto err_port_vid_to_fid_set;
521 }
522
523 return 0;
524
525err_port_vid_to_fid_set:
526 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
527 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
528 vid);
529 return err;
530}
531
532static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
533{
534 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
535 u16 vid;
536 int err;
537
538 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
539 if (err)
540 return err;
541
542 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
543 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
544 vid, vid);
545 if (err)
546 return err;
547 }
548
549 return 0;
550}
551
552int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
553 u16 vid)
554{
555 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
557 char *sftr_pl;
558 int err;
559
560 /* VLAN 0 is added to HW filter when device goes up, but it is
561 * reserved in our case, so simply return.
562 */
563 if (!vid)
564 return 0;
565
566 if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
567 netdev_warn(dev, "VID=%d already configured\n", vid);
568 return 0;
569 }
570
571 if (!test_bit(vid, mlxsw_sp->active_vfids)) {
572 err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
573 if (err) {
574 netdev_err(dev, "Failed to create vFID=%d\n",
575 MLXSW_SP_VFID_BASE + vid);
576 return err;
577 }
578
579 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
580 if (!sftr_pl) {
581 err = -ENOMEM;
582 goto err_flood_table_alloc;
583 }
584 mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
585 MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
586 MLXSW_PORT_CPU_PORT, true);
587 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
588 kfree(sftr_pl);
589 if (err) {
590 netdev_err(dev, "Failed to configure flood table\n");
591 goto err_flood_table_config;
592 }
593 }
594
595 /* In case we fail in the following steps, we intentionally do not
596 * destroy the associated vFID.
597 */
598
599 /* When adding the first VLAN interface on a bridged port we need to
600 * transition all the active 802.1Q bridge VLANs to use explicit
601 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
602 */
603 if (!mlxsw_sp_port->nr_vfids) {
604 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
605 if (err) {
606 netdev_err(dev, "Failed to set to Virtual mode\n");
607 return err;
608 }
609 }
610
611 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
612 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
613 true, MLXSW_SP_VFID_BASE + vid, vid);
614 if (err) {
615 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
616 vid, MLXSW_SP_VFID_BASE + vid);
617 goto err_port_vid_to_fid_set;
618 }
619
620 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
621 if (err) {
622 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
623 goto err_port_vid_learning_set;
624 }
625
626 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
627 if (err) {
628 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
629 vid);
630 goto err_port_add_vid;
631 }
632
633 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
634 MLXSW_REG_SPMS_STATE_FORWARDING);
635 if (err) {
636 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
637 goto err_port_stp_state_set;
638 }
639
640 mlxsw_sp_port->nr_vfids++;
641 set_bit(vid, mlxsw_sp_port->active_vfids);
642
643 return 0;
644
645err_flood_table_config:
646err_flood_table_alloc:
647 mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
648 return err;
649
650err_port_stp_state_set:
651 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
652err_port_add_vid:
653 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
654err_port_vid_learning_set:
655 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
656 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
657 MLXSW_SP_VFID_BASE + vid, vid);
658err_port_vid_to_fid_set:
659 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
660 return err;
661}
662
663int mlxsw_sp_port_kill_vid(struct net_device *dev,
664 __be16 __always_unused proto, u16 vid)
665{
666 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
667 int err;
668
669 /* VLAN 0 is removed from HW filter when device goes down, but
670 * it is reserved in our case, so simply return.
671 */
672 if (!vid)
673 return 0;
674
675 if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
676 netdev_warn(dev, "VID=%d does not exist\n", vid);
677 return 0;
678 }
679
680 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
681 MLXSW_REG_SPMS_STATE_DISCARDING);
682 if (err) {
683 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
684 return err;
685 }
686
687 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
688 if (err) {
689 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
690 vid);
691 return err;
692 }
693
694 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
695 if (err) {
696 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
697 return err;
698 }
699
700 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
701 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
702 false, MLXSW_SP_VFID_BASE + vid,
703 vid);
704 if (err) {
705 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
706 vid, MLXSW_SP_VFID_BASE + vid);
707 return err;
708 }
709
710 /* When removing the last VLAN interface on a bridged port we need to
711 * transition all active 802.1Q bridge VLANs to use VID to FID
712 * mappings and set port's mode to VLAN mode.
713 */
714 if (mlxsw_sp_port->nr_vfids == 1) {
715 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
716 if (err) {
717 netdev_err(dev, "Failed to set to VLAN mode\n");
718 return err;
719 }
720 }
721
722 mlxsw_sp_port->nr_vfids--;
723 clear_bit(vid, mlxsw_sp_port->active_vfids);
724
725 return 0;
726}
727
728static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
729 .ndo_open = mlxsw_sp_port_open,
730 .ndo_stop = mlxsw_sp_port_stop,
731 .ndo_start_xmit = mlxsw_sp_port_xmit,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100732 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200733 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
734 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
735 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
736 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
737 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
738 .ndo_fdb_add = switchdev_port_fdb_add,
739 .ndo_fdb_del = switchdev_port_fdb_del,
740 .ndo_fdb_dump = switchdev_port_fdb_dump,
741 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
742 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
743 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
744};
745
746static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
747 struct ethtool_drvinfo *drvinfo)
748{
749 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
750 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
751
752 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
753 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
754 sizeof(drvinfo->version));
755 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
756 "%d.%d.%d",
757 mlxsw_sp->bus_info->fw_rev.major,
758 mlxsw_sp->bus_info->fw_rev.minor,
759 mlxsw_sp->bus_info->fw_rev.subminor);
760 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
761 sizeof(drvinfo->bus_info));
762}
763
764struct mlxsw_sp_port_hw_stats {
765 char str[ETH_GSTRING_LEN];
766 u64 (*getter)(char *payload);
767};
768
769static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
770 {
771 .str = "a_frames_transmitted_ok",
772 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
773 },
774 {
775 .str = "a_frames_received_ok",
776 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
777 },
778 {
779 .str = "a_frame_check_sequence_errors",
780 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
781 },
782 {
783 .str = "a_alignment_errors",
784 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
785 },
786 {
787 .str = "a_octets_transmitted_ok",
788 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
789 },
790 {
791 .str = "a_octets_received_ok",
792 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
793 },
794 {
795 .str = "a_multicast_frames_xmitted_ok",
796 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
797 },
798 {
799 .str = "a_broadcast_frames_xmitted_ok",
800 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
801 },
802 {
803 .str = "a_multicast_frames_received_ok",
804 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
805 },
806 {
807 .str = "a_broadcast_frames_received_ok",
808 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
809 },
810 {
811 .str = "a_in_range_length_errors",
812 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
813 },
814 {
815 .str = "a_out_of_range_length_field",
816 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
817 },
818 {
819 .str = "a_frame_too_long_errors",
820 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
821 },
822 {
823 .str = "a_symbol_error_during_carrier",
824 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
825 },
826 {
827 .str = "a_mac_control_frames_transmitted",
828 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
829 },
830 {
831 .str = "a_mac_control_frames_received",
832 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
833 },
834 {
835 .str = "a_unsupported_opcodes_received",
836 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
837 },
838 {
839 .str = "a_pause_mac_ctrl_frames_received",
840 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
841 },
842 {
843 .str = "a_pause_mac_ctrl_frames_xmitted",
844 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
845 },
846};
847
848#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
849
850static void mlxsw_sp_port_get_strings(struct net_device *dev,
851 u32 stringset, u8 *data)
852{
853 u8 *p = data;
854 int i;
855
856 switch (stringset) {
857 case ETH_SS_STATS:
858 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
859 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
860 ETH_GSTRING_LEN);
861 p += ETH_GSTRING_LEN;
862 }
863 break;
864 }
865}
866
Ido Schimmel3a66ee32015-11-27 13:45:55 +0100867static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
868 enum ethtool_phys_id_state state)
869{
870 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
872 char mlcr_pl[MLXSW_REG_MLCR_LEN];
873 bool active;
874
875 switch (state) {
876 case ETHTOOL_ID_ACTIVE:
877 active = true;
878 break;
879 case ETHTOOL_ID_INACTIVE:
880 active = false;
881 break;
882 default:
883 return -EOPNOTSUPP;
884 }
885
886 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
887 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
888}
889
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200890static void mlxsw_sp_port_get_stats(struct net_device *dev,
891 struct ethtool_stats *stats, u64 *data)
892{
893 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
894 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
895 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
896 int i;
897 int err;
898
899 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
900 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
901 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
902 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
903}
904
905static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
906{
907 switch (sset) {
908 case ETH_SS_STATS:
909 return MLXSW_SP_PORT_HW_STATS_LEN;
910 default:
911 return -EOPNOTSUPP;
912 }
913}
914
915struct mlxsw_sp_port_link_mode {
916 u32 mask;
917 u32 supported;
918 u32 advertised;
919 u32 speed;
920};
921
922static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
923 {
924 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
925 .supported = SUPPORTED_100baseT_Full,
926 .advertised = ADVERTISED_100baseT_Full,
927 .speed = 100,
928 },
929 {
930 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
931 .speed = 100,
932 },
933 {
934 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
935 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
936 .supported = SUPPORTED_1000baseKX_Full,
937 .advertised = ADVERTISED_1000baseKX_Full,
938 .speed = 1000,
939 },
940 {
941 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
942 .supported = SUPPORTED_10000baseT_Full,
943 .advertised = ADVERTISED_10000baseT_Full,
944 .speed = 10000,
945 },
946 {
947 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
948 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
949 .supported = SUPPORTED_10000baseKX4_Full,
950 .advertised = ADVERTISED_10000baseKX4_Full,
951 .speed = 10000,
952 },
953 {
954 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
955 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
956 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
957 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
958 .supported = SUPPORTED_10000baseKR_Full,
959 .advertised = ADVERTISED_10000baseKR_Full,
960 .speed = 10000,
961 },
962 {
963 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
964 .supported = SUPPORTED_20000baseKR2_Full,
965 .advertised = ADVERTISED_20000baseKR2_Full,
966 .speed = 20000,
967 },
968 {
969 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
970 .supported = SUPPORTED_40000baseCR4_Full,
971 .advertised = ADVERTISED_40000baseCR4_Full,
972 .speed = 40000,
973 },
974 {
975 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
976 .supported = SUPPORTED_40000baseKR4_Full,
977 .advertised = ADVERTISED_40000baseKR4_Full,
978 .speed = 40000,
979 },
980 {
981 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
982 .supported = SUPPORTED_40000baseSR4_Full,
983 .advertised = ADVERTISED_40000baseSR4_Full,
984 .speed = 40000,
985 },
986 {
987 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
988 .supported = SUPPORTED_40000baseLR4_Full,
989 .advertised = ADVERTISED_40000baseLR4_Full,
990 .speed = 40000,
991 },
992 {
993 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
994 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
995 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
996 .speed = 25000,
997 },
998 {
999 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1000 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1001 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1002 .speed = 50000,
1003 },
1004 {
1005 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1006 .supported = SUPPORTED_56000baseKR4_Full,
1007 .advertised = ADVERTISED_56000baseKR4_Full,
1008 .speed = 56000,
1009 },
1010 {
1011 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1012 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1013 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1014 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1015 .speed = 100000,
1016 },
1017};
1018
1019#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1020
1021static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1022{
1023 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1024 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1025 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1026 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1027 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1028 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1029 return SUPPORTED_FIBRE;
1030
1031 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1032 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1033 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1034 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1035 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1036 return SUPPORTED_Backplane;
1037 return 0;
1038}
1039
1040static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1041{
1042 u32 modes = 0;
1043 int i;
1044
1045 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1046 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1047 modes |= mlxsw_sp_port_link_mode[i].supported;
1048 }
1049 return modes;
1050}
1051
1052static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1053{
1054 u32 modes = 0;
1055 int i;
1056
1057 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1058 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1059 modes |= mlxsw_sp_port_link_mode[i].advertised;
1060 }
1061 return modes;
1062}
1063
1064static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1065 struct ethtool_cmd *cmd)
1066{
1067 u32 speed = SPEED_UNKNOWN;
1068 u8 duplex = DUPLEX_UNKNOWN;
1069 int i;
1070
1071 if (!carrier_ok)
1072 goto out;
1073
1074 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1075 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1076 speed = mlxsw_sp_port_link_mode[i].speed;
1077 duplex = DUPLEX_FULL;
1078 break;
1079 }
1080 }
1081out:
1082 ethtool_cmd_speed_set(cmd, speed);
1083 cmd->duplex = duplex;
1084}
1085
1086static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1087{
1088 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1089 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1090 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1091 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1092 return PORT_FIBRE;
1093
1094 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1095 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1096 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1097 return PORT_DA;
1098
1099 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1100 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1101 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1102 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1103 return PORT_NONE;
1104
1105 return PORT_OTHER;
1106}
1107
1108static int mlxsw_sp_port_get_settings(struct net_device *dev,
1109 struct ethtool_cmd *cmd)
1110{
1111 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1112 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1113 char ptys_pl[MLXSW_REG_PTYS_LEN];
1114 u32 eth_proto_cap;
1115 u32 eth_proto_admin;
1116 u32 eth_proto_oper;
1117 int err;
1118
1119 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1120 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1121 if (err) {
1122 netdev_err(dev, "Failed to get proto");
1123 return err;
1124 }
1125 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1126 &eth_proto_admin, &eth_proto_oper);
1127
1128 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1129 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1130 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1131 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1132 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1133 eth_proto_oper, cmd);
1134
1135 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1136 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1137 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1138
1139 cmd->transceiver = XCVR_INTERNAL;
1140 return 0;
1141}
1142
1143static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1144{
1145 u32 ptys_proto = 0;
1146 int i;
1147
1148 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1149 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1150 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1151 }
1152 return ptys_proto;
1153}
1154
1155static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1156{
1157 u32 ptys_proto = 0;
1158 int i;
1159
1160 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1161 if (speed == mlxsw_sp_port_link_mode[i].speed)
1162 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1163 }
1164 return ptys_proto;
1165}
1166
1167static int mlxsw_sp_port_set_settings(struct net_device *dev,
1168 struct ethtool_cmd *cmd)
1169{
1170 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1172 char ptys_pl[MLXSW_REG_PTYS_LEN];
1173 u32 speed;
1174 u32 eth_proto_new;
1175 u32 eth_proto_cap;
1176 u32 eth_proto_admin;
1177 bool is_up;
1178 int err;
1179
1180 speed = ethtool_cmd_speed(cmd);
1181
1182 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1183 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1184 mlxsw_sp_to_ptys_speed(speed);
1185
1186 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1188 if (err) {
1189 netdev_err(dev, "Failed to get proto");
1190 return err;
1191 }
1192 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1193
1194 eth_proto_new = eth_proto_new & eth_proto_cap;
1195 if (!eth_proto_new) {
1196 netdev_err(dev, "Not supported proto admin requested");
1197 return -EINVAL;
1198 }
1199 if (eth_proto_new == eth_proto_admin)
1200 return 0;
1201
1202 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1204 if (err) {
1205 netdev_err(dev, "Failed to set proto admin");
1206 return err;
1207 }
1208
1209 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1210 if (err) {
1211 netdev_err(dev, "Failed to get oper status");
1212 return err;
1213 }
1214 if (!is_up)
1215 return 0;
1216
1217 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1218 if (err) {
1219 netdev_err(dev, "Failed to set admin status");
1220 return err;
1221 }
1222
1223 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1224 if (err) {
1225 netdev_err(dev, "Failed to set admin status");
1226 return err;
1227 }
1228
1229 return 0;
1230}
1231
1232static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1233 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1234 .get_link = ethtool_op_get_link,
1235 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001236 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001237 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1238 .get_sset_count = mlxsw_sp_port_get_sset_count,
1239 .get_settings = mlxsw_sp_port_get_settings,
1240 .set_settings = mlxsw_sp_port_set_settings,
1241};
1242
1243static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1244{
1245 struct mlxsw_sp_port *mlxsw_sp_port;
1246 struct net_device *dev;
1247 bool usable;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001248 size_t bytes;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001249 int err;
1250
1251 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1252 if (!dev)
1253 return -ENOMEM;
1254 mlxsw_sp_port = netdev_priv(dev);
1255 mlxsw_sp_port->dev = dev;
1256 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1257 mlxsw_sp_port->local_port = local_port;
1258 mlxsw_sp_port->learning = 1;
1259 mlxsw_sp_port->learning_sync = 1;
Ido Schimmel02930382015-10-28 10:16:58 +01001260 mlxsw_sp_port->uc_flood = 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001261 mlxsw_sp_port->pvid = 1;
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001262 bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
1263 mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
1264 if (!mlxsw_sp_port->active_vlans) {
1265 err = -ENOMEM;
1266 goto err_port_active_vlans_alloc;
1267 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001268
1269 mlxsw_sp_port->pcpu_stats =
1270 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1271 if (!mlxsw_sp_port->pcpu_stats) {
1272 err = -ENOMEM;
1273 goto err_alloc_stats;
1274 }
1275
1276 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1277 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1278
1279 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1280 if (err) {
1281 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1282 mlxsw_sp_port->local_port);
1283 goto err_dev_addr_init;
1284 }
1285
1286 netif_carrier_off(dev);
1287
1288 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1289 NETIF_F_HW_VLAN_CTAG_FILTER;
1290
1291 /* Each packet needs to have a Tx header (metadata) on top all other
1292 * headers.
1293 */
1294 dev->hard_header_len += MLXSW_TXHDR_LEN;
1295
1296 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
1297 if (err) {
1298 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
1299 mlxsw_sp_port->local_port);
1300 goto err_port_module_check;
1301 }
1302
1303 if (!usable) {
1304 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1305 mlxsw_sp_port->local_port);
1306 goto port_not_usable;
1307 }
1308
1309 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1310 if (err) {
1311 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1312 mlxsw_sp_port->local_port);
1313 goto err_port_system_port_mapping_set;
1314 }
1315
1316 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1317 if (err) {
1318 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1319 mlxsw_sp_port->local_port);
1320 goto err_port_swid_set;
1321 }
1322
1323 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1324 if (err) {
1325 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1326 mlxsw_sp_port->local_port);
1327 goto err_port_mtu_set;
1328 }
1329
1330 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1331 if (err)
1332 goto err_port_admin_status_set;
1333
1334 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1335 if (err) {
1336 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1337 mlxsw_sp_port->local_port);
1338 goto err_port_buffers_init;
1339 }
1340
1341 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1342 err = register_netdev(dev);
1343 if (err) {
1344 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1345 mlxsw_sp_port->local_port);
1346 goto err_register_netdev;
1347 }
1348
1349 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1350 if (err)
1351 goto err_port_vlan_init;
1352
1353 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1354 return 0;
1355
1356err_port_vlan_init:
1357 unregister_netdev(dev);
1358err_register_netdev:
1359err_port_buffers_init:
1360err_port_admin_status_set:
1361err_port_mtu_set:
1362err_port_swid_set:
1363err_port_system_port_mapping_set:
1364port_not_usable:
1365err_port_module_check:
1366err_dev_addr_init:
1367 free_percpu(mlxsw_sp_port->pcpu_stats);
1368err_alloc_stats:
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001369 kfree(mlxsw_sp_port->active_vlans);
1370err_port_active_vlans_alloc:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001371 free_netdev(dev);
1372 return err;
1373}
1374
1375static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
1376{
1377 u16 vfid;
1378
1379 for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
1380 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
1381}
1382
1383static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1384{
1385 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1386
1387 if (!mlxsw_sp_port)
1388 return;
1389 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1390 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1391 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1392 free_percpu(mlxsw_sp_port->pcpu_stats);
Ido Schimmelbd40e9d2015-12-15 16:03:36 +01001393 kfree(mlxsw_sp_port->active_vlans);
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001394 free_netdev(mlxsw_sp_port->dev);
1395}
1396
1397static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1398{
1399 int i;
1400
1401 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1402 mlxsw_sp_port_remove(mlxsw_sp, i);
1403 kfree(mlxsw_sp->ports);
1404}
1405
1406static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1407{
1408 size_t alloc_size;
1409 int i;
1410 int err;
1411
1412 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1413 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1414 if (!mlxsw_sp->ports)
1415 return -ENOMEM;
1416
1417 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1418 err = mlxsw_sp_port_create(mlxsw_sp, i);
1419 if (err)
1420 goto err_port_create;
1421 }
1422 return 0;
1423
1424err_port_create:
1425 for (i--; i >= 1; i--)
1426 mlxsw_sp_port_remove(mlxsw_sp, i);
1427 kfree(mlxsw_sp->ports);
1428 return err;
1429}
1430
1431static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1432 char *pude_pl, void *priv)
1433{
1434 struct mlxsw_sp *mlxsw_sp = priv;
1435 struct mlxsw_sp_port *mlxsw_sp_port;
1436 enum mlxsw_reg_pude_oper_status status;
1437 u8 local_port;
1438
1439 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1440 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1441 if (!mlxsw_sp_port) {
1442 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1443 local_port);
1444 return;
1445 }
1446
1447 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1448 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1449 netdev_info(mlxsw_sp_port->dev, "link up\n");
1450 netif_carrier_on(mlxsw_sp_port->dev);
1451 } else {
1452 netdev_info(mlxsw_sp_port->dev, "link down\n");
1453 netif_carrier_off(mlxsw_sp_port->dev);
1454 }
1455}
1456
1457static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1458 .func = mlxsw_sp_pude_event_func,
1459 .trap_id = MLXSW_TRAP_ID_PUDE,
1460};
1461
1462static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1463 enum mlxsw_event_trap_id trap_id)
1464{
1465 struct mlxsw_event_listener *el;
1466 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1467 int err;
1468
1469 switch (trap_id) {
1470 case MLXSW_TRAP_ID_PUDE:
1471 el = &mlxsw_sp_pude_event;
1472 break;
1473 }
1474 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1475 if (err)
1476 return err;
1477
1478 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1479 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1480 if (err)
1481 goto err_event_trap_set;
1482
1483 return 0;
1484
1485err_event_trap_set:
1486 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1487 return err;
1488}
1489
1490static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1491 enum mlxsw_event_trap_id trap_id)
1492{
1493 struct mlxsw_event_listener *el;
1494
1495 switch (trap_id) {
1496 case MLXSW_TRAP_ID_PUDE:
1497 el = &mlxsw_sp_pude_event;
1498 break;
1499 }
1500 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1501}
1502
1503static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1504 void *priv)
1505{
1506 struct mlxsw_sp *mlxsw_sp = priv;
1507 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1508 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1509
1510 if (unlikely(!mlxsw_sp_port)) {
1511 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1512 local_port);
1513 return;
1514 }
1515
1516 skb->dev = mlxsw_sp_port->dev;
1517
1518 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1519 u64_stats_update_begin(&pcpu_stats->syncp);
1520 pcpu_stats->rx_packets++;
1521 pcpu_stats->rx_bytes += skb->len;
1522 u64_stats_update_end(&pcpu_stats->syncp);
1523
1524 skb->protocol = eth_type_trans(skb, skb->dev);
1525 netif_receive_skb(skb);
1526}
1527
1528static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1529 {
1530 .func = mlxsw_sp_rx_listener_func,
1531 .local_port = MLXSW_PORT_DONT_CARE,
1532 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1533 },
1534 /* Traps for specific L2 packet types, not trapped as FDB MC */
1535 {
1536 .func = mlxsw_sp_rx_listener_func,
1537 .local_port = MLXSW_PORT_DONT_CARE,
1538 .trap_id = MLXSW_TRAP_ID_STP,
1539 },
1540 {
1541 .func = mlxsw_sp_rx_listener_func,
1542 .local_port = MLXSW_PORT_DONT_CARE,
1543 .trap_id = MLXSW_TRAP_ID_LACP,
1544 },
1545 {
1546 .func = mlxsw_sp_rx_listener_func,
1547 .local_port = MLXSW_PORT_DONT_CARE,
1548 .trap_id = MLXSW_TRAP_ID_EAPOL,
1549 },
1550 {
1551 .func = mlxsw_sp_rx_listener_func,
1552 .local_port = MLXSW_PORT_DONT_CARE,
1553 .trap_id = MLXSW_TRAP_ID_LLDP,
1554 },
1555 {
1556 .func = mlxsw_sp_rx_listener_func,
1557 .local_port = MLXSW_PORT_DONT_CARE,
1558 .trap_id = MLXSW_TRAP_ID_MMRP,
1559 },
1560 {
1561 .func = mlxsw_sp_rx_listener_func,
1562 .local_port = MLXSW_PORT_DONT_CARE,
1563 .trap_id = MLXSW_TRAP_ID_MVRP,
1564 },
1565 {
1566 .func = mlxsw_sp_rx_listener_func,
1567 .local_port = MLXSW_PORT_DONT_CARE,
1568 .trap_id = MLXSW_TRAP_ID_RPVST,
1569 },
1570 {
1571 .func = mlxsw_sp_rx_listener_func,
1572 .local_port = MLXSW_PORT_DONT_CARE,
1573 .trap_id = MLXSW_TRAP_ID_DHCP,
1574 },
1575 {
1576 .func = mlxsw_sp_rx_listener_func,
1577 .local_port = MLXSW_PORT_DONT_CARE,
1578 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1579 },
1580 {
1581 .func = mlxsw_sp_rx_listener_func,
1582 .local_port = MLXSW_PORT_DONT_CARE,
1583 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1584 },
1585 {
1586 .func = mlxsw_sp_rx_listener_func,
1587 .local_port = MLXSW_PORT_DONT_CARE,
1588 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1589 },
1590 {
1591 .func = mlxsw_sp_rx_listener_func,
1592 .local_port = MLXSW_PORT_DONT_CARE,
1593 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1594 },
1595 {
1596 .func = mlxsw_sp_rx_listener_func,
1597 .local_port = MLXSW_PORT_DONT_CARE,
1598 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1599 },
1600};
1601
1602static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1603{
1604 char htgt_pl[MLXSW_REG_HTGT_LEN];
1605 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1606 int i;
1607 int err;
1608
1609 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1610 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1611 if (err)
1612 return err;
1613
1614 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1615 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1616 if (err)
1617 return err;
1618
1619 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1620 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1621 &mlxsw_sp_rx_listener[i],
1622 mlxsw_sp);
1623 if (err)
1624 goto err_rx_listener_register;
1625
1626 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1627 mlxsw_sp_rx_listener[i].trap_id);
1628 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1629 if (err)
1630 goto err_rx_trap_set;
1631 }
1632 return 0;
1633
1634err_rx_trap_set:
1635 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1636 &mlxsw_sp_rx_listener[i],
1637 mlxsw_sp);
1638err_rx_listener_register:
1639 for (i--; i >= 0; i--) {
1640 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1641 mlxsw_sp_rx_listener[i].trap_id);
1642 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1643
1644 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1645 &mlxsw_sp_rx_listener[i],
1646 mlxsw_sp);
1647 }
1648 return err;
1649}
1650
1651static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1652{
1653 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1654 int i;
1655
1656 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1657 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1658 mlxsw_sp_rx_listener[i].trap_id);
1659 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1660
1661 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1662 &mlxsw_sp_rx_listener[i],
1663 mlxsw_sp);
1664 }
1665}
1666
1667static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1668 enum mlxsw_reg_sfgc_type type,
1669 enum mlxsw_reg_sfgc_bridge_type bridge_type)
1670{
1671 enum mlxsw_flood_table_type table_type;
1672 enum mlxsw_sp_flood_table flood_table;
1673 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1674
1675 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
1676 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
1677 flood_table = 0;
1678 } else {
1679 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
1680 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1681 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1682 else
1683 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
1684 }
1685
1686 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1687 flood_table);
1688 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1689}
1690
1691static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1692{
1693 int type, err;
1694
1695 /* For non-offloaded netdevs, flood all traffic types to CPU
1696 * port.
1697 */
1698 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1699 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1700 continue;
1701
1702 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1703 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1704 if (err)
1705 return err;
1706 }
1707
1708 /* For bridged ports, use one flooding table for unknown unicast
1709 * traffic and a second table for unregistered multicast and
1710 * broadcast.
1711 */
1712 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1713 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1714 continue;
1715
1716 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1717 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1718 if (err)
1719 return err;
1720 }
1721
1722 return 0;
1723}
1724
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001725static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
1726{
1727 char slcr_pl[MLXSW_REG_SLCR_LEN];
1728
1729 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
1730 MLXSW_REG_SLCR_LAG_HASH_DMAC |
1731 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
1732 MLXSW_REG_SLCR_LAG_HASH_VLANID |
1733 MLXSW_REG_SLCR_LAG_HASH_SIP |
1734 MLXSW_REG_SLCR_LAG_HASH_DIP |
1735 MLXSW_REG_SLCR_LAG_HASH_SPORT |
1736 MLXSW_REG_SLCR_LAG_HASH_DPORT |
1737 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
1738 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
1739}
1740
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001741static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1742 const struct mlxsw_bus_info *mlxsw_bus_info)
1743{
1744 struct mlxsw_sp *mlxsw_sp = priv;
1745 int err;
1746
1747 mlxsw_sp->core = mlxsw_core;
1748 mlxsw_sp->bus_info = mlxsw_bus_info;
1749
1750 err = mlxsw_sp_base_mac_get(mlxsw_sp);
1751 if (err) {
1752 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1753 return err;
1754 }
1755
1756 err = mlxsw_sp_ports_create(mlxsw_sp);
1757 if (err) {
1758 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
1759 goto err_ports_create;
1760 }
1761
1762 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1763 if (err) {
1764 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1765 goto err_event_register;
1766 }
1767
1768 err = mlxsw_sp_traps_init(mlxsw_sp);
1769 if (err) {
1770 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1771 goto err_rx_listener_register;
1772 }
1773
1774 err = mlxsw_sp_flood_init(mlxsw_sp);
1775 if (err) {
1776 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1777 goto err_flood_init;
1778 }
1779
1780 err = mlxsw_sp_buffers_init(mlxsw_sp);
1781 if (err) {
1782 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1783 goto err_buffers_init;
1784 }
1785
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001786 err = mlxsw_sp_lag_init(mlxsw_sp);
1787 if (err) {
1788 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
1789 goto err_lag_init;
1790 }
1791
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001792 err = mlxsw_sp_switchdev_init(mlxsw_sp);
1793 if (err) {
1794 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1795 goto err_switchdev_init;
1796 }
1797
1798 return 0;
1799
1800err_switchdev_init:
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001801err_lag_init:
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001802err_buffers_init:
1803err_flood_init:
1804 mlxsw_sp_traps_fini(mlxsw_sp);
1805err_rx_listener_register:
1806 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1807err_event_register:
1808 mlxsw_sp_ports_remove(mlxsw_sp);
1809err_ports_create:
1810 mlxsw_sp_vfids_fini(mlxsw_sp);
1811 return err;
1812}
1813
1814static void mlxsw_sp_fini(void *priv)
1815{
1816 struct mlxsw_sp *mlxsw_sp = priv;
1817
1818 mlxsw_sp_switchdev_fini(mlxsw_sp);
1819 mlxsw_sp_traps_fini(mlxsw_sp);
1820 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1821 mlxsw_sp_ports_remove(mlxsw_sp);
1822 mlxsw_sp_vfids_fini(mlxsw_sp);
1823}
1824
1825static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1826 .used_max_vepa_channels = 1,
1827 .max_vepa_channels = 0,
1828 .used_max_lag = 1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001829 .max_lag = MLXSW_SP_LAG_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001830 .used_max_port_per_lag = 1,
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001831 .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001832 .used_max_mid = 1,
1833 .max_mid = 7000,
1834 .used_max_pgt = 1,
1835 .max_pgt = 0,
1836 .used_max_system_port = 1,
1837 .max_system_port = 64,
1838 .used_max_vlan_groups = 1,
1839 .max_vlan_groups = 127,
1840 .used_max_regions = 1,
1841 .max_regions = 400,
1842 .used_flood_tables = 1,
1843 .used_flood_mode = 1,
1844 .flood_mode = 3,
1845 .max_fid_offset_flood_tables = 2,
1846 .fid_offset_flood_table_size = VLAN_N_VID - 1,
1847 .max_fid_flood_tables = 1,
1848 .fid_flood_table_size = VLAN_N_VID,
1849 .used_max_ib_mc = 1,
1850 .max_ib_mc = 0,
1851 .used_max_pkey = 1,
1852 .max_pkey = 0,
1853 .swid_config = {
1854 {
1855 .used_type = 1,
1856 .type = MLXSW_PORT_SWID_TYPE_ETH,
1857 }
1858 },
1859};
1860
1861static struct mlxsw_driver mlxsw_sp_driver = {
1862 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
1863 .owner = THIS_MODULE,
1864 .priv_size = sizeof(struct mlxsw_sp),
1865 .init = mlxsw_sp_init,
1866 .fini = mlxsw_sp_fini,
1867 .txhdr_construct = mlxsw_sp_txhdr_construct,
1868 .txhdr_len = MLXSW_TXHDR_LEN,
1869 .profile = &mlxsw_sp_config_profile,
1870};
1871
1872static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
1873{
1874 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
1875}
1876
1877static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
1878{
1879 struct net_device *dev = mlxsw_sp_port->dev;
1880 int err;
1881
1882 /* When port is not bridged untagged packets are tagged with
1883 * PVID=VID=1, thereby creating an implicit VLAN interface in
1884 * the device. Remove it and let bridge code take care of its
1885 * own VLANs.
1886 */
1887 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
1888 if (err)
1889 netdev_err(dev, "Failed to remove VID 1\n");
1890
1891 return err;
1892}
1893
1894static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
1895{
1896 struct net_device *dev = mlxsw_sp_port->dev;
1897 int err;
1898
1899 /* Add implicit VLAN interface in the device, so that untagged
1900 * packets will be classified to the default vFID.
1901 */
1902 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1903 if (err)
1904 netdev_err(dev, "Failed to add VID 1\n");
1905
1906 return err;
1907}
1908
1909static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
1910 struct net_device *br_dev)
1911{
1912 return !mlxsw_sp->master_bridge.dev ||
1913 mlxsw_sp->master_bridge.dev == br_dev;
1914}
1915
1916static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
1917 struct net_device *br_dev)
1918{
1919 mlxsw_sp->master_bridge.dev = br_dev;
1920 mlxsw_sp->master_bridge.ref_count++;
1921}
1922
1923static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
1924 struct net_device *br_dev)
1925{
1926 if (--mlxsw_sp->master_bridge.ref_count == 0)
1927 mlxsw_sp->master_bridge.dev = NULL;
1928}
1929
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001930static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001931{
Jiri Pirko0d65fc12015-12-03 12:12:28 +01001932 char sldr_pl[MLXSW_REG_SLDR_LEN];
1933
1934 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
1935 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
1936}
1937
1938static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
1939{
1940 char sldr_pl[MLXSW_REG_SLDR_LEN];
1941
1942 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
1943 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
1944}
1945
1946static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
1947 u16 lag_id, u8 port_index)
1948{
1949 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1950 char slcor_pl[MLXSW_REG_SLCOR_LEN];
1951
1952 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
1953 lag_id, port_index);
1954 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1955}
1956
1957static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
1958 u16 lag_id)
1959{
1960 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1961 char slcor_pl[MLXSW_REG_SLCOR_LEN];
1962
1963 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
1964 lag_id);
1965 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1966}
1967
1968static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
1969 u16 lag_id)
1970{
1971 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1972 char slcor_pl[MLXSW_REG_SLCOR_LEN];
1973
1974 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
1975 lag_id);
1976 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1977}
1978
1979static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
1980 u16 lag_id)
1981{
1982 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1983 char slcor_pl[MLXSW_REG_SLCOR_LEN];
1984
1985 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
1986 lag_id);
1987 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
1988}
1989
1990static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
1991 struct net_device *lag_dev,
1992 u16 *p_lag_id)
1993{
1994 struct mlxsw_sp_upper *lag;
1995 int free_lag_id = -1;
1996 int i;
1997
1998 for (i = 0; i < MLXSW_SP_LAG_MAX; i++) {
1999 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
2000 if (lag->ref_count) {
2001 if (lag->dev == lag_dev) {
2002 *p_lag_id = i;
2003 return 0;
2004 }
2005 } else if (free_lag_id < 0) {
2006 free_lag_id = i;
2007 }
2008 }
2009 if (free_lag_id < 0)
2010 return -EBUSY;
2011 *p_lag_id = free_lag_id;
2012 return 0;
2013}
2014
2015static bool
2016mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
2017 struct net_device *lag_dev,
2018 struct netdev_lag_upper_info *lag_upper_info)
2019{
2020 u16 lag_id;
2021
2022 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
2023 return false;
2024 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
2025 return false;
2026 return true;
2027}
2028
2029static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
2030 u16 lag_id, u8 *p_port_index)
2031{
2032 int i;
2033
2034 for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
2035 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
2036 *p_port_index = i;
2037 return 0;
2038 }
2039 }
2040 return -EBUSY;
2041}
2042
2043static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
2044 struct net_device *lag_dev)
2045{
2046 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2047 struct mlxsw_sp_upper *lag;
2048 u16 lag_id;
2049 u8 port_index;
2050 int err;
2051
2052 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
2053 if (err)
2054 return err;
2055 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2056 if (!lag->ref_count) {
2057 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
2058 if (err)
2059 return err;
2060 lag->dev = lag_dev;
2061 }
2062
2063 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
2064 if (err)
2065 return err;
2066 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
2067 if (err)
2068 goto err_col_port_add;
2069 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id);
2070 if (err)
2071 goto err_col_port_enable;
2072
2073 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
2074 mlxsw_sp_port->local_port);
2075 mlxsw_sp_port->lag_id = lag_id;
2076 mlxsw_sp_port->lagged = 1;
2077 lag->ref_count++;
2078 return 0;
2079
2080err_col_port_add:
2081 if (!lag->ref_count)
2082 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2083err_col_port_enable:
2084 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
2085 return err;
2086}
2087
2088static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2089 struct net_device *lag_dev)
2090{
2091 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2092 struct mlxsw_sp_upper *lag;
2093 u16 lag_id = mlxsw_sp_port->lag_id;
2094 int err;
2095
2096 if (!mlxsw_sp_port->lagged)
2097 return 0;
2098 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
2099 WARN_ON(lag->ref_count == 0);
2100
2101 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
2102 if (err)
2103 return err;
Dan Carpenter82a06422015-12-09 13:33:51 +03002104 err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002105 if (err)
2106 return err;
2107
2108 if (lag->ref_count == 1) {
2109 err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
2110 if (err)
2111 return err;
2112 }
2113
2114 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
2115 mlxsw_sp_port->local_port);
2116 mlxsw_sp_port->lagged = 0;
2117 lag->ref_count--;
2118 return 0;
2119}
2120
Jiri Pirko74581202015-12-03 12:12:30 +01002121static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
2122 u16 lag_id)
2123{
2124 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2125 char sldr_pl[MLXSW_REG_SLDR_LEN];
2126
2127 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
2128 mlxsw_sp_port->local_port);
2129 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2130}
2131
2132static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
2133 u16 lag_id)
2134{
2135 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2136 char sldr_pl[MLXSW_REG_SLDR_LEN];
2137
2138 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
2139 mlxsw_sp_port->local_port);
2140 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
2141}
2142
2143static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port,
2144 bool lag_tx_enabled)
2145{
2146 if (lag_tx_enabled)
2147 return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port,
2148 mlxsw_sp_port->lag_id);
2149 else
2150 return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
2151 mlxsw_sp_port->lag_id);
2152}
2153
2154static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
2155 struct netdev_lag_lower_state_info *info)
2156{
2157 return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
2158}
2159
2160static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
2161 unsigned long event, void *ptr)
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002162{
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002163 struct netdev_notifier_changeupper_info *info;
2164 struct mlxsw_sp_port *mlxsw_sp_port;
2165 struct net_device *upper_dev;
2166 struct mlxsw_sp *mlxsw_sp;
2167 int err;
2168
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002169 mlxsw_sp_port = netdev_priv(dev);
2170 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2171 info = ptr;
2172
2173 switch (event) {
2174 case NETDEV_PRECHANGEUPPER:
2175 upper_dev = info->upper_dev;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002176 if (!info->master || !info->linking)
2177 break;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002178 /* HW limitation forbids to put ports to multiple bridges. */
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002179 if (netif_is_bridge_master(upper_dev) &&
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002180 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
2181 return NOTIFY_BAD;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002182 if (netif_is_lag_master(upper_dev) &&
2183 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
2184 info->upper_info))
2185 return NOTIFY_BAD;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002186 break;
2187 case NETDEV_CHANGEUPPER:
2188 upper_dev = info->upper_dev;
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002189 if (!info->master)
2190 break;
2191 if (netif_is_bridge_master(upper_dev)) {
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002192 if (info->linking) {
2193 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
2194 if (err)
2195 netdev_err(dev, "Failed to join bridge\n");
2196 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
Jiri Pirko0d9b9702015-10-28 10:16:56 +01002197 mlxsw_sp_port->bridged = 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002198 } else {
2199 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
2200 if (err)
2201 netdev_err(dev, "Failed to leave bridge\n");
Jiri Pirko0d9b9702015-10-28 10:16:56 +01002202 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002203 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
2204 }
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002205 } else if (netif_is_lag_master(upper_dev)) {
2206 if (info->linking) {
2207 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
2208 upper_dev);
2209 if (err) {
2210 netdev_err(dev, "Failed to join link aggregation\n");
2211 return NOTIFY_BAD;
2212 }
2213 } else {
2214 err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
2215 upper_dev);
2216 if (err) {
2217 netdev_err(dev, "Failed to leave link aggregation\n");
2218 return NOTIFY_BAD;
2219 }
2220 }
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002221 }
2222 break;
2223 }
2224
2225 return NOTIFY_DONE;
2226}
2227
Jiri Pirko74581202015-12-03 12:12:30 +01002228static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
2229 unsigned long event, void *ptr)
2230{
2231 struct netdev_notifier_changelowerstate_info *info;
2232 struct mlxsw_sp_port *mlxsw_sp_port;
2233 int err;
2234
2235 mlxsw_sp_port = netdev_priv(dev);
2236 info = ptr;
2237
2238 switch (event) {
2239 case NETDEV_CHANGELOWERSTATE:
2240 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
2241 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
2242 info->lower_state_info);
2243 if (err)
2244 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
2245 }
2246 break;
2247 }
2248
2249 return NOTIFY_DONE;
2250}
2251
2252static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
2253 unsigned long event, void *ptr)
2254{
2255 switch (event) {
2256 case NETDEV_PRECHANGEUPPER:
2257 case NETDEV_CHANGEUPPER:
2258 return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr);
2259 case NETDEV_CHANGELOWERSTATE:
2260 return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
2261 }
2262
2263 return NOTIFY_DONE;
2264}
2265
Jiri Pirko0d65fc12015-12-03 12:12:28 +01002266static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
2267 unsigned long event, void *ptr)
2268{
2269 struct net_device *dev;
2270 struct list_head *iter;
2271 int ret;
2272
2273 netdev_for_each_lower_dev(lag_dev, dev, iter) {
2274 if (mlxsw_sp_port_dev_check(dev)) {
2275 ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
2276 if (ret == NOTIFY_BAD)
2277 return ret;
2278 }
2279 }
2280
2281 return NOTIFY_DONE;
2282}
2283
2284static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2285 unsigned long event, void *ptr)
2286{
2287 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2288
2289 if (mlxsw_sp_port_dev_check(dev))
2290 return mlxsw_sp_netdevice_port_event(dev, event, ptr);
2291
2292 if (netif_is_lag_master(dev))
2293 return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
2294
2295 return NOTIFY_DONE;
2296}
2297
Jiri Pirko56ade8f2015-10-16 14:01:37 +02002298static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
2299 .notifier_call = mlxsw_sp_netdevice_event,
2300};
2301
2302static int __init mlxsw_sp_module_init(void)
2303{
2304 int err;
2305
2306 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
2307 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
2308 if (err)
2309 goto err_core_driver_register;
2310 return 0;
2311
2312err_core_driver_register:
2313 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
2314 return err;
2315}
2316
2317static void __exit mlxsw_sp_module_exit(void)
2318{
2319 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
2320 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
2321}
2322
2323module_init(mlxsw_sp_module_init);
2324module_exit(mlxsw_sp_module_exit);
2325
2326MODULE_LICENSE("Dual BSD/GPL");
2327MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2328MODULE_DESCRIPTION("Mellanox Spectrum driver");
2329MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);