blob: a397cc18693d3d3c75322c1415063f26b5176f7f [file] [log] [blame]
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001/*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/types.h>
40#include <linux/netdevice.h>
41#include <linux/etherdevice.h>
42#include <linux/ethtool.h>
43#include <linux/slab.h>
44#include <linux/device.h>
45#include <linux/skbuff.h>
46#include <linux/if_vlan.h>
47#include <linux/if_bridge.h>
48#include <linux/workqueue.h>
49#include <linux/jiffies.h>
50#include <linux/bitops.h>
51#include <net/switchdev.h>
52#include <generated/utsrelease.h>
53
54#include "spectrum.h"
55#include "core.h"
56#include "reg.h"
57#include "port.h"
58#include "trap.h"
59#include "txheader.h"
60
61static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
62static const char mlxsw_sp_driver_version[] = "1.0";
63
64/* tx_hdr_version
65 * Tx header version.
66 * Must be set to 1.
67 */
68MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
69
70/* tx_hdr_ctl
71 * Packet control type.
72 * 0 - Ethernet control (e.g. EMADs, LACP)
73 * 1 - Ethernet data
74 */
75MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
76
77/* tx_hdr_proto
78 * Packet protocol type. Must be set to 1 (Ethernet).
79 */
80MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
81
82/* tx_hdr_rx_is_router
83 * Packet is sent from the router. Valid for data packets only.
84 */
85MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
86
87/* tx_hdr_fid_valid
88 * Indicates if the 'fid' field is valid and should be used for
89 * forwarding lookup. Valid for data packets only.
90 */
91MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
92
93/* tx_hdr_swid
94 * Switch partition ID. Must be set to 0.
95 */
96MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
97
98/* tx_hdr_control_tclass
99 * Indicates if the packet should use the control TClass and not one
100 * of the data TClasses.
101 */
102MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
103
104/* tx_hdr_etclass
105 * Egress TClass to be used on the egress device on the egress port.
106 */
107MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
108
109/* tx_hdr_port_mid
110 * Destination local port for unicast packets.
111 * Destination multicast ID for multicast packets.
112 *
113 * Control packets are directed to a specific egress port, while data
114 * packets are transmitted through the CPU port (0) into the switch partition,
115 * where forwarding rules are applied.
116 */
117MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
118
119/* tx_hdr_fid
120 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
121 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
122 * Valid for data packets only.
123 */
124MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
125
126/* tx_hdr_type
127 * 0 - Data packets
128 * 6 - Control packets
129 */
130MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
131
132static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
133 const struct mlxsw_tx_info *tx_info)
134{
135 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
136
137 memset(txhdr, 0, MLXSW_TXHDR_LEN);
138
139 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
140 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
141 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
142 mlxsw_tx_hdr_swid_set(txhdr, 0);
143 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
144 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
145 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
146}
147
148static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
149{
150 char spad_pl[MLXSW_REG_SPAD_LEN];
151 int err;
152
153 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
154 if (err)
155 return err;
156 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
157 return 0;
158}
159
160static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
161 bool is_up)
162{
163 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
164 char paos_pl[MLXSW_REG_PAOS_LEN];
165
166 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
167 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
168 MLXSW_PORT_ADMIN_STATUS_DOWN);
169 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
170}
171
172static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
173 bool *p_is_up)
174{
175 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
176 char paos_pl[MLXSW_REG_PAOS_LEN];
177 u8 oper_status;
178 int err;
179
180 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
182 if (err)
183 return err;
184 oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
185 *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
186 return 0;
187}
188
189static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
190{
191 char sfmr_pl[MLXSW_REG_SFMR_LEN];
192 int err;
193
194 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
195 MLXSW_SP_VFID_BASE + vfid, 0);
196 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
197
198 if (err)
199 return err;
200
201 set_bit(vfid, mlxsw_sp->active_vfids);
202 return 0;
203}
204
205static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
206{
207 char sfmr_pl[MLXSW_REG_SFMR_LEN];
208
209 clear_bit(vfid, mlxsw_sp->active_vfids);
210
211 mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
212 MLXSW_SP_VFID_BASE + vfid, 0);
213 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
214}
215
216static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
217 unsigned char *addr)
218{
219 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
220 char ppad_pl[MLXSW_REG_PPAD_LEN];
221
222 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
223 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
224 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
225}
226
227static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
228{
229 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
230 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
231
232 ether_addr_copy(addr, mlxsw_sp->base_mac);
233 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
234 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
235}
236
237static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
238 u16 vid, enum mlxsw_reg_spms_state state)
239{
240 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
241 char *spms_pl;
242 int err;
243
244 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
245 if (!spms_pl)
246 return -ENOMEM;
247 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
248 mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
249 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
250 kfree(spms_pl);
251 return err;
252}
253
254static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
255{
256 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
257 char pmtu_pl[MLXSW_REG_PMTU_LEN];
258 int max_mtu;
259 int err;
260
261 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
262 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
263 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
264 if (err)
265 return err;
266 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
267
268 if (mtu > max_mtu)
269 return -EINVAL;
270
271 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
272 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
273}
274
275static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
276{
277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
278 char pspa_pl[MLXSW_REG_PSPA_LEN];
279
280 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
281 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
282}
283
284static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
285 bool enable)
286{
287 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
288 char svpe_pl[MLXSW_REG_SVPE_LEN];
289
290 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
291 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
292}
293
294int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
295 enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid,
296 u16 vid)
297{
298 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
299 char svfa_pl[MLXSW_REG_SVFA_LEN];
300
301 mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid,
302 fid, vid);
303 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
304}
305
306static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
307 u16 vid, bool learn_enable)
308{
309 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
310 char *spvmlr_pl;
311 int err;
312
313 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
314 if (!spvmlr_pl)
315 return -ENOMEM;
316 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
317 learn_enable);
318 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
319 kfree(spvmlr_pl);
320 return err;
321}
322
323static int
324mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
325{
326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327 char sspr_pl[MLXSW_REG_SSPR_LEN];
328
329 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
330 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
331}
332
333static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port,
334 bool *p_usable)
335{
336 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
337 char pmlp_pl[MLXSW_REG_PMLP_LEN];
338 int err;
339
340 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
341 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
342 if (err)
343 return err;
344 *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false;
345 return 0;
346}
347
348static int mlxsw_sp_port_open(struct net_device *dev)
349{
350 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
351 int err;
352
353 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
354 if (err)
355 return err;
356 netif_start_queue(dev);
357 return 0;
358}
359
360static int mlxsw_sp_port_stop(struct net_device *dev)
361{
362 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
363
364 netif_stop_queue(dev);
365 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
366}
367
368static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
369 struct net_device *dev)
370{
371 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
372 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
373 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
374 const struct mlxsw_tx_info tx_info = {
375 .local_port = mlxsw_sp_port->local_port,
376 .is_emad = false,
377 };
378 u64 len;
379 int err;
380
381 if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
382 return NETDEV_TX_BUSY;
383
384 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
385 struct sk_buff *skb_orig = skb;
386
387 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
388 if (!skb) {
389 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
390 dev_kfree_skb_any(skb_orig);
391 return NETDEV_TX_OK;
392 }
393 }
394
395 if (eth_skb_pad(skb)) {
396 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
397 return NETDEV_TX_OK;
398 }
399
400 mlxsw_sp_txhdr_construct(skb, &tx_info);
401 len = skb->len;
402 /* Due to a race we might fail here because of a full queue. In that
403 * unlikely case we simply drop the packet.
404 */
405 err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
406
407 if (!err) {
408 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
409 u64_stats_update_begin(&pcpu_stats->syncp);
410 pcpu_stats->tx_packets++;
411 pcpu_stats->tx_bytes += len;
412 u64_stats_update_end(&pcpu_stats->syncp);
413 } else {
414 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
415 dev_kfree_skb_any(skb);
416 }
417 return NETDEV_TX_OK;
418}
419
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100420static void mlxsw_sp_set_rx_mode(struct net_device *dev)
421{
422}
423
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200424static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
425{
426 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
427 struct sockaddr *addr = p;
428 int err;
429
430 if (!is_valid_ether_addr(addr->sa_data))
431 return -EADDRNOTAVAIL;
432
433 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
434 if (err)
435 return err;
436 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
437 return 0;
438}
439
440static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
441{
442 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
443 int err;
444
445 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
446 if (err)
447 return err;
448 dev->mtu = mtu;
449 return 0;
450}
451
452static struct rtnl_link_stats64 *
453mlxsw_sp_port_get_stats64(struct net_device *dev,
454 struct rtnl_link_stats64 *stats)
455{
456 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
457 struct mlxsw_sp_port_pcpu_stats *p;
458 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
459 u32 tx_dropped = 0;
460 unsigned int start;
461 int i;
462
463 for_each_possible_cpu(i) {
464 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
465 do {
466 start = u64_stats_fetch_begin_irq(&p->syncp);
467 rx_packets = p->rx_packets;
468 rx_bytes = p->rx_bytes;
469 tx_packets = p->tx_packets;
470 tx_bytes = p->tx_bytes;
471 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
472
473 stats->rx_packets += rx_packets;
474 stats->rx_bytes += rx_bytes;
475 stats->tx_packets += tx_packets;
476 stats->tx_bytes += tx_bytes;
477 /* tx_dropped is u32, updated without syncp protection. */
478 tx_dropped += p->tx_dropped;
479 }
480 stats->tx_dropped = tx_dropped;
481 return stats;
482}
483
484int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
485 u16 vid_end, bool is_member, bool untagged)
486{
487 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
488 char *spvm_pl;
489 int err;
490
491 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
492 if (!spvm_pl)
493 return -ENOMEM;
494
495 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
496 vid_end, is_member, untagged);
497 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
498 kfree(spvm_pl);
499 return err;
500}
501
502static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
503{
504 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
505 u16 vid, last_visited_vid;
506 int err;
507
508 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
509 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid,
510 vid);
511 if (err) {
512 last_visited_vid = vid;
513 goto err_port_vid_to_fid_set;
514 }
515 }
516
517 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
518 if (err) {
519 last_visited_vid = VLAN_N_VID;
520 goto err_port_vid_to_fid_set;
521 }
522
523 return 0;
524
525err_port_vid_to_fid_set:
526 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
527 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid,
528 vid);
529 return err;
530}
531
532static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
533{
534 enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
535 u16 vid;
536 int err;
537
538 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
539 if (err)
540 return err;
541
542 for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
543 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false,
544 vid, vid);
545 if (err)
546 return err;
547 }
548
549 return 0;
550}
551
552int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
553 u16 vid)
554{
555 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
556 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
557 char *sftr_pl;
558 int err;
559
560 /* VLAN 0 is added to HW filter when device goes up, but it is
561 * reserved in our case, so simply return.
562 */
563 if (!vid)
564 return 0;
565
566 if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
567 netdev_warn(dev, "VID=%d already configured\n", vid);
568 return 0;
569 }
570
571 if (!test_bit(vid, mlxsw_sp->active_vfids)) {
572 err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
573 if (err) {
574 netdev_err(dev, "Failed to create vFID=%d\n",
575 MLXSW_SP_VFID_BASE + vid);
576 return err;
577 }
578
579 sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
580 if (!sftr_pl) {
581 err = -ENOMEM;
582 goto err_flood_table_alloc;
583 }
584 mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
585 MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
586 MLXSW_PORT_CPU_PORT, true);
587 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
588 kfree(sftr_pl);
589 if (err) {
590 netdev_err(dev, "Failed to configure flood table\n");
591 goto err_flood_table_config;
592 }
593 }
594
595 /* In case we fail in the following steps, we intentionally do not
596 * destroy the associated vFID.
597 */
598
599 /* When adding the first VLAN interface on a bridged port we need to
600 * transition all the active 802.1Q bridge VLANs to use explicit
601 * {Port, VID} to FID mappings and set the port's mode to Virtual mode.
602 */
603 if (!mlxsw_sp_port->nr_vfids) {
604 err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
605 if (err) {
606 netdev_err(dev, "Failed to set to Virtual mode\n");
607 return err;
608 }
609 }
610
611 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
612 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
613 true, MLXSW_SP_VFID_BASE + vid, vid);
614 if (err) {
615 netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
616 vid, MLXSW_SP_VFID_BASE + vid);
617 goto err_port_vid_to_fid_set;
618 }
619
620 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
621 if (err) {
622 netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
623 goto err_port_vid_learning_set;
624 }
625
626 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
627 if (err) {
628 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
629 vid);
630 goto err_port_add_vid;
631 }
632
633 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
634 MLXSW_REG_SPMS_STATE_FORWARDING);
635 if (err) {
636 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
637 goto err_port_stp_state_set;
638 }
639
640 mlxsw_sp_port->nr_vfids++;
641 set_bit(vid, mlxsw_sp_port->active_vfids);
642
643 return 0;
644
645err_flood_table_config:
646err_flood_table_alloc:
647 mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
648 return err;
649
650err_port_stp_state_set:
651 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
652err_port_add_vid:
653 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
654err_port_vid_learning_set:
655 mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
656 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
657 MLXSW_SP_VFID_BASE + vid, vid);
658err_port_vid_to_fid_set:
659 mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
660 return err;
661}
662
663int mlxsw_sp_port_kill_vid(struct net_device *dev,
664 __be16 __always_unused proto, u16 vid)
665{
666 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
667 int err;
668
669 /* VLAN 0 is removed from HW filter when device goes down, but
670 * it is reserved in our case, so simply return.
671 */
672 if (!vid)
673 return 0;
674
675 if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
676 netdev_warn(dev, "VID=%d does not exist\n", vid);
677 return 0;
678 }
679
680 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
681 MLXSW_REG_SPMS_STATE_DISCARDING);
682 if (err) {
683 netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
684 return err;
685 }
686
687 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
688 if (err) {
689 netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
690 vid);
691 return err;
692 }
693
694 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
695 if (err) {
696 netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
697 return err;
698 }
699
700 err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
701 MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
702 false, MLXSW_SP_VFID_BASE + vid,
703 vid);
704 if (err) {
705 netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
706 vid, MLXSW_SP_VFID_BASE + vid);
707 return err;
708 }
709
710 /* When removing the last VLAN interface on a bridged port we need to
711 * transition all active 802.1Q bridge VLANs to use VID to FID
712 * mappings and set port's mode to VLAN mode.
713 */
714 if (mlxsw_sp_port->nr_vfids == 1) {
715 err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
716 if (err) {
717 netdev_err(dev, "Failed to set to VLAN mode\n");
718 return err;
719 }
720 }
721
722 mlxsw_sp_port->nr_vfids--;
723 clear_bit(vid, mlxsw_sp_port->active_vfids);
724
725 return 0;
726}
727
728static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
729 .ndo_open = mlxsw_sp_port_open,
730 .ndo_stop = mlxsw_sp_port_stop,
731 .ndo_start_xmit = mlxsw_sp_port_xmit,
Jiri Pirkoc5b9b512015-12-03 12:12:22 +0100732 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200733 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
734 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
735 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
736 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
737 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
738 .ndo_fdb_add = switchdev_port_fdb_add,
739 .ndo_fdb_del = switchdev_port_fdb_del,
740 .ndo_fdb_dump = switchdev_port_fdb_dump,
741 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
742 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
743 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
744};
745
746static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
747 struct ethtool_drvinfo *drvinfo)
748{
749 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
750 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
751
752 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
753 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
754 sizeof(drvinfo->version));
755 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
756 "%d.%d.%d",
757 mlxsw_sp->bus_info->fw_rev.major,
758 mlxsw_sp->bus_info->fw_rev.minor,
759 mlxsw_sp->bus_info->fw_rev.subminor);
760 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
761 sizeof(drvinfo->bus_info));
762}
763
764struct mlxsw_sp_port_hw_stats {
765 char str[ETH_GSTRING_LEN];
766 u64 (*getter)(char *payload);
767};
768
769static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
770 {
771 .str = "a_frames_transmitted_ok",
772 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
773 },
774 {
775 .str = "a_frames_received_ok",
776 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
777 },
778 {
779 .str = "a_frame_check_sequence_errors",
780 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
781 },
782 {
783 .str = "a_alignment_errors",
784 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
785 },
786 {
787 .str = "a_octets_transmitted_ok",
788 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
789 },
790 {
791 .str = "a_octets_received_ok",
792 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
793 },
794 {
795 .str = "a_multicast_frames_xmitted_ok",
796 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
797 },
798 {
799 .str = "a_broadcast_frames_xmitted_ok",
800 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
801 },
802 {
803 .str = "a_multicast_frames_received_ok",
804 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
805 },
806 {
807 .str = "a_broadcast_frames_received_ok",
808 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
809 },
810 {
811 .str = "a_in_range_length_errors",
812 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
813 },
814 {
815 .str = "a_out_of_range_length_field",
816 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
817 },
818 {
819 .str = "a_frame_too_long_errors",
820 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
821 },
822 {
823 .str = "a_symbol_error_during_carrier",
824 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
825 },
826 {
827 .str = "a_mac_control_frames_transmitted",
828 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
829 },
830 {
831 .str = "a_mac_control_frames_received",
832 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
833 },
834 {
835 .str = "a_unsupported_opcodes_received",
836 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
837 },
838 {
839 .str = "a_pause_mac_ctrl_frames_received",
840 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
841 },
842 {
843 .str = "a_pause_mac_ctrl_frames_xmitted",
844 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
845 },
846};
847
848#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
849
850static void mlxsw_sp_port_get_strings(struct net_device *dev,
851 u32 stringset, u8 *data)
852{
853 u8 *p = data;
854 int i;
855
856 switch (stringset) {
857 case ETH_SS_STATS:
858 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
859 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
860 ETH_GSTRING_LEN);
861 p += ETH_GSTRING_LEN;
862 }
863 break;
864 }
865}
866
Ido Schimmel3a66ee32015-11-27 13:45:55 +0100867static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
868 enum ethtool_phys_id_state state)
869{
870 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
871 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
872 char mlcr_pl[MLXSW_REG_MLCR_LEN];
873 bool active;
874
875 switch (state) {
876 case ETHTOOL_ID_ACTIVE:
877 active = true;
878 break;
879 case ETHTOOL_ID_INACTIVE:
880 active = false;
881 break;
882 default:
883 return -EOPNOTSUPP;
884 }
885
886 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
887 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
888}
889
Jiri Pirko56ade8f2015-10-16 14:01:37 +0200890static void mlxsw_sp_port_get_stats(struct net_device *dev,
891 struct ethtool_stats *stats, u64 *data)
892{
893 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
894 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
895 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
896 int i;
897 int err;
898
899 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
900 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
901 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
902 data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
903}
904
905static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
906{
907 switch (sset) {
908 case ETH_SS_STATS:
909 return MLXSW_SP_PORT_HW_STATS_LEN;
910 default:
911 return -EOPNOTSUPP;
912 }
913}
914
915struct mlxsw_sp_port_link_mode {
916 u32 mask;
917 u32 supported;
918 u32 advertised;
919 u32 speed;
920};
921
922static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
923 {
924 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
925 .supported = SUPPORTED_100baseT_Full,
926 .advertised = ADVERTISED_100baseT_Full,
927 .speed = 100,
928 },
929 {
930 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
931 .speed = 100,
932 },
933 {
934 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
935 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
936 .supported = SUPPORTED_1000baseKX_Full,
937 .advertised = ADVERTISED_1000baseKX_Full,
938 .speed = 1000,
939 },
940 {
941 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
942 .supported = SUPPORTED_10000baseT_Full,
943 .advertised = ADVERTISED_10000baseT_Full,
944 .speed = 10000,
945 },
946 {
947 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
948 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
949 .supported = SUPPORTED_10000baseKX4_Full,
950 .advertised = ADVERTISED_10000baseKX4_Full,
951 .speed = 10000,
952 },
953 {
954 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
955 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
956 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
957 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
958 .supported = SUPPORTED_10000baseKR_Full,
959 .advertised = ADVERTISED_10000baseKR_Full,
960 .speed = 10000,
961 },
962 {
963 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
964 .supported = SUPPORTED_20000baseKR2_Full,
965 .advertised = ADVERTISED_20000baseKR2_Full,
966 .speed = 20000,
967 },
968 {
969 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
970 .supported = SUPPORTED_40000baseCR4_Full,
971 .advertised = ADVERTISED_40000baseCR4_Full,
972 .speed = 40000,
973 },
974 {
975 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
976 .supported = SUPPORTED_40000baseKR4_Full,
977 .advertised = ADVERTISED_40000baseKR4_Full,
978 .speed = 40000,
979 },
980 {
981 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
982 .supported = SUPPORTED_40000baseSR4_Full,
983 .advertised = ADVERTISED_40000baseSR4_Full,
984 .speed = 40000,
985 },
986 {
987 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
988 .supported = SUPPORTED_40000baseLR4_Full,
989 .advertised = ADVERTISED_40000baseLR4_Full,
990 .speed = 40000,
991 },
992 {
993 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR |
994 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR |
995 MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
996 .speed = 25000,
997 },
998 {
999 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 |
1000 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 |
1001 MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
1002 .speed = 50000,
1003 },
1004 {
1005 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
1006 .supported = SUPPORTED_56000baseKR4_Full,
1007 .advertised = ADVERTISED_56000baseKR4_Full,
1008 .speed = 56000,
1009 },
1010 {
1011 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
1012 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1013 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1014 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
1015 .speed = 100000,
1016 },
1017};
1018
1019#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
1020
1021static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto)
1022{
1023 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1024 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1025 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1026 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1027 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1028 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1029 return SUPPORTED_FIBRE;
1030
1031 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1032 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1033 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1034 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
1035 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
1036 return SUPPORTED_Backplane;
1037 return 0;
1038}
1039
1040static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto)
1041{
1042 u32 modes = 0;
1043 int i;
1044
1045 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1046 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1047 modes |= mlxsw_sp_port_link_mode[i].supported;
1048 }
1049 return modes;
1050}
1051
1052static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto)
1053{
1054 u32 modes = 0;
1055 int i;
1056
1057 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1058 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
1059 modes |= mlxsw_sp_port_link_mode[i].advertised;
1060 }
1061 return modes;
1062}
1063
1064static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
1065 struct ethtool_cmd *cmd)
1066{
1067 u32 speed = SPEED_UNKNOWN;
1068 u8 duplex = DUPLEX_UNKNOWN;
1069 int i;
1070
1071 if (!carrier_ok)
1072 goto out;
1073
1074 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1075 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
1076 speed = mlxsw_sp_port_link_mode[i].speed;
1077 duplex = DUPLEX_FULL;
1078 break;
1079 }
1080 }
1081out:
1082 ethtool_cmd_speed_set(cmd, speed);
1083 cmd->duplex = duplex;
1084}
1085
1086static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
1087{
1088 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
1089 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
1090 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
1091 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
1092 return PORT_FIBRE;
1093
1094 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
1095 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
1096 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
1097 return PORT_DA;
1098
1099 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
1100 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
1101 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
1102 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
1103 return PORT_NONE;
1104
1105 return PORT_OTHER;
1106}
1107
1108static int mlxsw_sp_port_get_settings(struct net_device *dev,
1109 struct ethtool_cmd *cmd)
1110{
1111 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1112 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1113 char ptys_pl[MLXSW_REG_PTYS_LEN];
1114 u32 eth_proto_cap;
1115 u32 eth_proto_admin;
1116 u32 eth_proto_oper;
1117 int err;
1118
1119 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1120 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1121 if (err) {
1122 netdev_err(dev, "Failed to get proto");
1123 return err;
1124 }
1125 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap,
1126 &eth_proto_admin, &eth_proto_oper);
1127
1128 cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
1129 mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
1130 SUPPORTED_Pause | SUPPORTED_Asym_Pause;
1131 cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
1132 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
1133 eth_proto_oper, cmd);
1134
1135 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1136 cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper);
1137 cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper);
1138
1139 cmd->transceiver = XCVR_INTERNAL;
1140 return 0;
1141}
1142
1143static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising)
1144{
1145 u32 ptys_proto = 0;
1146 int i;
1147
1148 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1149 if (advertising & mlxsw_sp_port_link_mode[i].advertised)
1150 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1151 }
1152 return ptys_proto;
1153}
1154
1155static u32 mlxsw_sp_to_ptys_speed(u32 speed)
1156{
1157 u32 ptys_proto = 0;
1158 int i;
1159
1160 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
1161 if (speed == mlxsw_sp_port_link_mode[i].speed)
1162 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
1163 }
1164 return ptys_proto;
1165}
1166
1167static int mlxsw_sp_port_set_settings(struct net_device *dev,
1168 struct ethtool_cmd *cmd)
1169{
1170 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1171 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1172 char ptys_pl[MLXSW_REG_PTYS_LEN];
1173 u32 speed;
1174 u32 eth_proto_new;
1175 u32 eth_proto_cap;
1176 u32 eth_proto_admin;
1177 bool is_up;
1178 int err;
1179
1180 speed = ethtool_cmd_speed(cmd);
1181
1182 eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
1183 mlxsw_sp_to_ptys_advert_link(cmd->advertising) :
1184 mlxsw_sp_to_ptys_speed(speed);
1185
1186 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
1187 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1188 if (err) {
1189 netdev_err(dev, "Failed to get proto");
1190 return err;
1191 }
1192 mlxsw_reg_ptys_unpack(ptys_pl, &eth_proto_cap, &eth_proto_admin, NULL);
1193
1194 eth_proto_new = eth_proto_new & eth_proto_cap;
1195 if (!eth_proto_new) {
1196 netdev_err(dev, "Not supported proto admin requested");
1197 return -EINVAL;
1198 }
1199 if (eth_proto_new == eth_proto_admin)
1200 return 0;
1201
1202 mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new);
1203 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1204 if (err) {
1205 netdev_err(dev, "Failed to set proto admin");
1206 return err;
1207 }
1208
1209 err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
1210 if (err) {
1211 netdev_err(dev, "Failed to get oper status");
1212 return err;
1213 }
1214 if (!is_up)
1215 return 0;
1216
1217 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1218 if (err) {
1219 netdev_err(dev, "Failed to set admin status");
1220 return err;
1221 }
1222
1223 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1224 if (err) {
1225 netdev_err(dev, "Failed to set admin status");
1226 return err;
1227 }
1228
1229 return 0;
1230}
1231
1232static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
1233 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
1234 .get_link = ethtool_op_get_link,
1235 .get_strings = mlxsw_sp_port_get_strings,
Ido Schimmel3a66ee32015-11-27 13:45:55 +01001236 .set_phys_id = mlxsw_sp_port_set_phys_id,
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001237 .get_ethtool_stats = mlxsw_sp_port_get_stats,
1238 .get_sset_count = mlxsw_sp_port_get_sset_count,
1239 .get_settings = mlxsw_sp_port_get_settings,
1240 .set_settings = mlxsw_sp_port_set_settings,
1241};
1242
1243static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1244{
1245 struct mlxsw_sp_port *mlxsw_sp_port;
1246 struct net_device *dev;
1247 bool usable;
1248 int err;
1249
1250 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1251 if (!dev)
1252 return -ENOMEM;
1253 mlxsw_sp_port = netdev_priv(dev);
1254 mlxsw_sp_port->dev = dev;
1255 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1256 mlxsw_sp_port->local_port = local_port;
1257 mlxsw_sp_port->learning = 1;
1258 mlxsw_sp_port->learning_sync = 1;
Ido Schimmel02930382015-10-28 10:16:58 +01001259 mlxsw_sp_port->uc_flood = 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001260 mlxsw_sp_port->pvid = 1;
1261
1262 mlxsw_sp_port->pcpu_stats =
1263 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1264 if (!mlxsw_sp_port->pcpu_stats) {
1265 err = -ENOMEM;
1266 goto err_alloc_stats;
1267 }
1268
1269 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1270 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1271
1272 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1273 if (err) {
1274 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1275 mlxsw_sp_port->local_port);
1276 goto err_dev_addr_init;
1277 }
1278
1279 netif_carrier_off(dev);
1280
1281 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1282 NETIF_F_HW_VLAN_CTAG_FILTER;
1283
1284 /* Each packet needs to have a Tx header (metadata) on top all other
1285 * headers.
1286 */
1287 dev->hard_header_len += MLXSW_TXHDR_LEN;
1288
1289 err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable);
1290 if (err) {
1291 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n",
1292 mlxsw_sp_port->local_port);
1293 goto err_port_module_check;
1294 }
1295
1296 if (!usable) {
1297 dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n",
1298 mlxsw_sp_port->local_port);
1299 goto port_not_usable;
1300 }
1301
1302 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1303 if (err) {
1304 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1305 mlxsw_sp_port->local_port);
1306 goto err_port_system_port_mapping_set;
1307 }
1308
1309 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
1310 if (err) {
1311 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1312 mlxsw_sp_port->local_port);
1313 goto err_port_swid_set;
1314 }
1315
1316 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1317 if (err) {
1318 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1319 mlxsw_sp_port->local_port);
1320 goto err_port_mtu_set;
1321 }
1322
1323 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1324 if (err)
1325 goto err_port_admin_status_set;
1326
1327 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1328 if (err) {
1329 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1330 mlxsw_sp_port->local_port);
1331 goto err_port_buffers_init;
1332 }
1333
1334 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
1335 err = register_netdev(dev);
1336 if (err) {
1337 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1338 mlxsw_sp_port->local_port);
1339 goto err_register_netdev;
1340 }
1341
1342 err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
1343 if (err)
1344 goto err_port_vlan_init;
1345
1346 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1347 return 0;
1348
1349err_port_vlan_init:
1350 unregister_netdev(dev);
1351err_register_netdev:
1352err_port_buffers_init:
1353err_port_admin_status_set:
1354err_port_mtu_set:
1355err_port_swid_set:
1356err_port_system_port_mapping_set:
1357port_not_usable:
1358err_port_module_check:
1359err_dev_addr_init:
1360 free_percpu(mlxsw_sp_port->pcpu_stats);
1361err_alloc_stats:
1362 free_netdev(dev);
1363 return err;
1364}
1365
1366static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
1367{
1368 u16 vfid;
1369
1370 for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
1371 mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
1372}
1373
1374static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
1375{
1376 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1377
1378 if (!mlxsw_sp_port)
1379 return;
1380 mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
1381 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1382 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
1383 free_percpu(mlxsw_sp_port->pcpu_stats);
1384 free_netdev(mlxsw_sp_port->dev);
1385}
1386
1387static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
1388{
1389 int i;
1390
1391 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++)
1392 mlxsw_sp_port_remove(mlxsw_sp, i);
1393 kfree(mlxsw_sp->ports);
1394}
1395
1396static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
1397{
1398 size_t alloc_size;
1399 int i;
1400 int err;
1401
1402 alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS;
1403 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
1404 if (!mlxsw_sp->ports)
1405 return -ENOMEM;
1406
1407 for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
1408 err = mlxsw_sp_port_create(mlxsw_sp, i);
1409 if (err)
1410 goto err_port_create;
1411 }
1412 return 0;
1413
1414err_port_create:
1415 for (i--; i >= 1; i--)
1416 mlxsw_sp_port_remove(mlxsw_sp, i);
1417 kfree(mlxsw_sp->ports);
1418 return err;
1419}
1420
1421static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
1422 char *pude_pl, void *priv)
1423{
1424 struct mlxsw_sp *mlxsw_sp = priv;
1425 struct mlxsw_sp_port *mlxsw_sp_port;
1426 enum mlxsw_reg_pude_oper_status status;
1427 u8 local_port;
1428
1429 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
1430 mlxsw_sp_port = mlxsw_sp->ports[local_port];
1431 if (!mlxsw_sp_port) {
1432 dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
1433 local_port);
1434 return;
1435 }
1436
1437 status = mlxsw_reg_pude_oper_status_get(pude_pl);
1438 if (status == MLXSW_PORT_OPER_STATUS_UP) {
1439 netdev_info(mlxsw_sp_port->dev, "link up\n");
1440 netif_carrier_on(mlxsw_sp_port->dev);
1441 } else {
1442 netdev_info(mlxsw_sp_port->dev, "link down\n");
1443 netif_carrier_off(mlxsw_sp_port->dev);
1444 }
1445}
1446
1447static struct mlxsw_event_listener mlxsw_sp_pude_event = {
1448 .func = mlxsw_sp_pude_event_func,
1449 .trap_id = MLXSW_TRAP_ID_PUDE,
1450};
1451
1452static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp,
1453 enum mlxsw_event_trap_id trap_id)
1454{
1455 struct mlxsw_event_listener *el;
1456 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1457 int err;
1458
1459 switch (trap_id) {
1460 case MLXSW_TRAP_ID_PUDE:
1461 el = &mlxsw_sp_pude_event;
1462 break;
1463 }
1464 err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp);
1465 if (err)
1466 return err;
1467
1468 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
1469 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1470 if (err)
1471 goto err_event_trap_set;
1472
1473 return 0;
1474
1475err_event_trap_set:
1476 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1477 return err;
1478}
1479
1480static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp,
1481 enum mlxsw_event_trap_id trap_id)
1482{
1483 struct mlxsw_event_listener *el;
1484
1485 switch (trap_id) {
1486 case MLXSW_TRAP_ID_PUDE:
1487 el = &mlxsw_sp_pude_event;
1488 break;
1489 }
1490 mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp);
1491}
1492
1493static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port,
1494 void *priv)
1495{
1496 struct mlxsw_sp *mlxsw_sp = priv;
1497 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1498 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1499
1500 if (unlikely(!mlxsw_sp_port)) {
1501 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
1502 local_port);
1503 return;
1504 }
1505
1506 skb->dev = mlxsw_sp_port->dev;
1507
1508 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1509 u64_stats_update_begin(&pcpu_stats->syncp);
1510 pcpu_stats->rx_packets++;
1511 pcpu_stats->rx_bytes += skb->len;
1512 u64_stats_update_end(&pcpu_stats->syncp);
1513
1514 skb->protocol = eth_type_trans(skb, skb->dev);
1515 netif_receive_skb(skb);
1516}
1517
1518static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
1519 {
1520 .func = mlxsw_sp_rx_listener_func,
1521 .local_port = MLXSW_PORT_DONT_CARE,
1522 .trap_id = MLXSW_TRAP_ID_FDB_MC,
1523 },
1524 /* Traps for specific L2 packet types, not trapped as FDB MC */
1525 {
1526 .func = mlxsw_sp_rx_listener_func,
1527 .local_port = MLXSW_PORT_DONT_CARE,
1528 .trap_id = MLXSW_TRAP_ID_STP,
1529 },
1530 {
1531 .func = mlxsw_sp_rx_listener_func,
1532 .local_port = MLXSW_PORT_DONT_CARE,
1533 .trap_id = MLXSW_TRAP_ID_LACP,
1534 },
1535 {
1536 .func = mlxsw_sp_rx_listener_func,
1537 .local_port = MLXSW_PORT_DONT_CARE,
1538 .trap_id = MLXSW_TRAP_ID_EAPOL,
1539 },
1540 {
1541 .func = mlxsw_sp_rx_listener_func,
1542 .local_port = MLXSW_PORT_DONT_CARE,
1543 .trap_id = MLXSW_TRAP_ID_LLDP,
1544 },
1545 {
1546 .func = mlxsw_sp_rx_listener_func,
1547 .local_port = MLXSW_PORT_DONT_CARE,
1548 .trap_id = MLXSW_TRAP_ID_MMRP,
1549 },
1550 {
1551 .func = mlxsw_sp_rx_listener_func,
1552 .local_port = MLXSW_PORT_DONT_CARE,
1553 .trap_id = MLXSW_TRAP_ID_MVRP,
1554 },
1555 {
1556 .func = mlxsw_sp_rx_listener_func,
1557 .local_port = MLXSW_PORT_DONT_CARE,
1558 .trap_id = MLXSW_TRAP_ID_RPVST,
1559 },
1560 {
1561 .func = mlxsw_sp_rx_listener_func,
1562 .local_port = MLXSW_PORT_DONT_CARE,
1563 .trap_id = MLXSW_TRAP_ID_DHCP,
1564 },
1565 {
1566 .func = mlxsw_sp_rx_listener_func,
1567 .local_port = MLXSW_PORT_DONT_CARE,
1568 .trap_id = MLXSW_TRAP_ID_IGMP_QUERY,
1569 },
1570 {
1571 .func = mlxsw_sp_rx_listener_func,
1572 .local_port = MLXSW_PORT_DONT_CARE,
1573 .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT,
1574 },
1575 {
1576 .func = mlxsw_sp_rx_listener_func,
1577 .local_port = MLXSW_PORT_DONT_CARE,
1578 .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT,
1579 },
1580 {
1581 .func = mlxsw_sp_rx_listener_func,
1582 .local_port = MLXSW_PORT_DONT_CARE,
1583 .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE,
1584 },
1585 {
1586 .func = mlxsw_sp_rx_listener_func,
1587 .local_port = MLXSW_PORT_DONT_CARE,
1588 .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
1589 },
1590};
1591
1592static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
1593{
1594 char htgt_pl[MLXSW_REG_HTGT_LEN];
1595 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1596 int i;
1597 int err;
1598
1599 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX);
1600 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1601 if (err)
1602 return err;
1603
1604 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
1605 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl);
1606 if (err)
1607 return err;
1608
1609 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1610 err = mlxsw_core_rx_listener_register(mlxsw_sp->core,
1611 &mlxsw_sp_rx_listener[i],
1612 mlxsw_sp);
1613 if (err)
1614 goto err_rx_listener_register;
1615
1616 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
1617 mlxsw_sp_rx_listener[i].trap_id);
1618 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1619 if (err)
1620 goto err_rx_trap_set;
1621 }
1622 return 0;
1623
1624err_rx_trap_set:
1625 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1626 &mlxsw_sp_rx_listener[i],
1627 mlxsw_sp);
1628err_rx_listener_register:
1629 for (i--; i >= 0; i--) {
1630 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1631 mlxsw_sp_rx_listener[i].trap_id);
1632 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1633
1634 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1635 &mlxsw_sp_rx_listener[i],
1636 mlxsw_sp);
1637 }
1638 return err;
1639}
1640
1641static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
1642{
1643 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1644 int i;
1645
1646 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
1647 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
1648 mlxsw_sp_rx_listener[i].trap_id);
1649 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
1650
1651 mlxsw_core_rx_listener_unregister(mlxsw_sp->core,
1652 &mlxsw_sp_rx_listener[i],
1653 mlxsw_sp);
1654 }
1655}
1656
1657static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
1658 enum mlxsw_reg_sfgc_type type,
1659 enum mlxsw_reg_sfgc_bridge_type bridge_type)
1660{
1661 enum mlxsw_flood_table_type table_type;
1662 enum mlxsw_sp_flood_table flood_table;
1663 char sfgc_pl[MLXSW_REG_SFGC_LEN];
1664
1665 if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
1666 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
1667 flood_table = 0;
1668 } else {
1669 table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
1670 if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
1671 flood_table = MLXSW_SP_FLOOD_TABLE_UC;
1672 else
1673 flood_table = MLXSW_SP_FLOOD_TABLE_BM;
1674 }
1675
1676 mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
1677 flood_table);
1678 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl);
1679}
1680
1681static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
1682{
1683 int type, err;
1684
1685 /* For non-offloaded netdevs, flood all traffic types to CPU
1686 * port.
1687 */
1688 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1689 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1690 continue;
1691
1692 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1693 MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
1694 if (err)
1695 return err;
1696 }
1697
1698 /* For bridged ports, use one flooding table for unknown unicast
1699 * traffic and a second table for unregistered multicast and
1700 * broadcast.
1701 */
1702 for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
1703 if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
1704 continue;
1705
1706 err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
1707 MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
1708 if (err)
1709 return err;
1710 }
1711
1712 return 0;
1713}
1714
1715static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
1716 const struct mlxsw_bus_info *mlxsw_bus_info)
1717{
1718 struct mlxsw_sp *mlxsw_sp = priv;
1719 int err;
1720
1721 mlxsw_sp->core = mlxsw_core;
1722 mlxsw_sp->bus_info = mlxsw_bus_info;
1723
1724 err = mlxsw_sp_base_mac_get(mlxsw_sp);
1725 if (err) {
1726 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
1727 return err;
1728 }
1729
1730 err = mlxsw_sp_ports_create(mlxsw_sp);
1731 if (err) {
1732 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
1733 goto err_ports_create;
1734 }
1735
1736 err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1737 if (err) {
1738 dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
1739 goto err_event_register;
1740 }
1741
1742 err = mlxsw_sp_traps_init(mlxsw_sp);
1743 if (err) {
1744 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n");
1745 goto err_rx_listener_register;
1746 }
1747
1748 err = mlxsw_sp_flood_init(mlxsw_sp);
1749 if (err) {
1750 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n");
1751 goto err_flood_init;
1752 }
1753
1754 err = mlxsw_sp_buffers_init(mlxsw_sp);
1755 if (err) {
1756 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
1757 goto err_buffers_init;
1758 }
1759
1760 err = mlxsw_sp_switchdev_init(mlxsw_sp);
1761 if (err) {
1762 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
1763 goto err_switchdev_init;
1764 }
1765
1766 return 0;
1767
1768err_switchdev_init:
1769err_buffers_init:
1770err_flood_init:
1771 mlxsw_sp_traps_fini(mlxsw_sp);
1772err_rx_listener_register:
1773 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1774err_event_register:
1775 mlxsw_sp_ports_remove(mlxsw_sp);
1776err_ports_create:
1777 mlxsw_sp_vfids_fini(mlxsw_sp);
1778 return err;
1779}
1780
1781static void mlxsw_sp_fini(void *priv)
1782{
1783 struct mlxsw_sp *mlxsw_sp = priv;
1784
1785 mlxsw_sp_switchdev_fini(mlxsw_sp);
1786 mlxsw_sp_traps_fini(mlxsw_sp);
1787 mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
1788 mlxsw_sp_ports_remove(mlxsw_sp);
1789 mlxsw_sp_vfids_fini(mlxsw_sp);
1790}
1791
1792static struct mlxsw_config_profile mlxsw_sp_config_profile = {
1793 .used_max_vepa_channels = 1,
1794 .max_vepa_channels = 0,
1795 .used_max_lag = 1,
1796 .max_lag = 64,
1797 .used_max_port_per_lag = 1,
1798 .max_port_per_lag = 16,
1799 .used_max_mid = 1,
1800 .max_mid = 7000,
1801 .used_max_pgt = 1,
1802 .max_pgt = 0,
1803 .used_max_system_port = 1,
1804 .max_system_port = 64,
1805 .used_max_vlan_groups = 1,
1806 .max_vlan_groups = 127,
1807 .used_max_regions = 1,
1808 .max_regions = 400,
1809 .used_flood_tables = 1,
1810 .used_flood_mode = 1,
1811 .flood_mode = 3,
1812 .max_fid_offset_flood_tables = 2,
1813 .fid_offset_flood_table_size = VLAN_N_VID - 1,
1814 .max_fid_flood_tables = 1,
1815 .fid_flood_table_size = VLAN_N_VID,
1816 .used_max_ib_mc = 1,
1817 .max_ib_mc = 0,
1818 .used_max_pkey = 1,
1819 .max_pkey = 0,
1820 .swid_config = {
1821 {
1822 .used_type = 1,
1823 .type = MLXSW_PORT_SWID_TYPE_ETH,
1824 }
1825 },
1826};
1827
1828static struct mlxsw_driver mlxsw_sp_driver = {
1829 .kind = MLXSW_DEVICE_KIND_SPECTRUM,
1830 .owner = THIS_MODULE,
1831 .priv_size = sizeof(struct mlxsw_sp),
1832 .init = mlxsw_sp_init,
1833 .fini = mlxsw_sp_fini,
1834 .txhdr_construct = mlxsw_sp_txhdr_construct,
1835 .txhdr_len = MLXSW_TXHDR_LEN,
1836 .profile = &mlxsw_sp_config_profile,
1837};
1838
1839static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
1840{
1841 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
1842}
1843
1844static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
1845{
1846 struct net_device *dev = mlxsw_sp_port->dev;
1847 int err;
1848
1849 /* When port is not bridged untagged packets are tagged with
1850 * PVID=VID=1, thereby creating an implicit VLAN interface in
1851 * the device. Remove it and let bridge code take care of its
1852 * own VLANs.
1853 */
1854 err = mlxsw_sp_port_kill_vid(dev, 0, 1);
1855 if (err)
1856 netdev_err(dev, "Failed to remove VID 1\n");
1857
1858 return err;
1859}
1860
1861static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
1862{
1863 struct net_device *dev = mlxsw_sp_port->dev;
1864 int err;
1865
1866 /* Add implicit VLAN interface in the device, so that untagged
1867 * packets will be classified to the default vFID.
1868 */
1869 err = mlxsw_sp_port_add_vid(dev, 0, 1);
1870 if (err)
1871 netdev_err(dev, "Failed to add VID 1\n");
1872
1873 return err;
1874}
1875
1876static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
1877 struct net_device *br_dev)
1878{
1879 return !mlxsw_sp->master_bridge.dev ||
1880 mlxsw_sp->master_bridge.dev == br_dev;
1881}
1882
1883static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
1884 struct net_device *br_dev)
1885{
1886 mlxsw_sp->master_bridge.dev = br_dev;
1887 mlxsw_sp->master_bridge.ref_count++;
1888}
1889
1890static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
1891 struct net_device *br_dev)
1892{
1893 if (--mlxsw_sp->master_bridge.ref_count == 0)
1894 mlxsw_sp->master_bridge.dev = NULL;
1895}
1896
1897static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
1898 unsigned long event, void *ptr)
1899{
1900 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1901 struct netdev_notifier_changeupper_info *info;
1902 struct mlxsw_sp_port *mlxsw_sp_port;
1903 struct net_device *upper_dev;
1904 struct mlxsw_sp *mlxsw_sp;
1905 int err;
1906
1907 if (!mlxsw_sp_port_dev_check(dev))
1908 return NOTIFY_DONE;
1909
1910 mlxsw_sp_port = netdev_priv(dev);
1911 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1912 info = ptr;
1913
1914 switch (event) {
1915 case NETDEV_PRECHANGEUPPER:
1916 upper_dev = info->upper_dev;
1917 /* HW limitation forbids to put ports to multiple bridges. */
1918 if (info->master && info->linking &&
1919 netif_is_bridge_master(upper_dev) &&
1920 !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
1921 return NOTIFY_BAD;
1922 break;
1923 case NETDEV_CHANGEUPPER:
1924 upper_dev = info->upper_dev;
1925 if (info->master &&
1926 netif_is_bridge_master(upper_dev)) {
1927 if (info->linking) {
1928 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
1929 if (err)
1930 netdev_err(dev, "Failed to join bridge\n");
1931 mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
Jiri Pirko0d9b9702015-10-28 10:16:56 +01001932 mlxsw_sp_port->bridged = 1;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001933 } else {
1934 err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
1935 if (err)
1936 netdev_err(dev, "Failed to leave bridge\n");
Jiri Pirko0d9b9702015-10-28 10:16:56 +01001937 mlxsw_sp_port->bridged = 0;
Jiri Pirko56ade8f2015-10-16 14:01:37 +02001938 mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
1939 }
1940 }
1941 break;
1942 }
1943
1944 return NOTIFY_DONE;
1945}
1946
1947static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
1948 .notifier_call = mlxsw_sp_netdevice_event,
1949};
1950
1951static int __init mlxsw_sp_module_init(void)
1952{
1953 int err;
1954
1955 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1956 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
1957 if (err)
1958 goto err_core_driver_register;
1959 return 0;
1960
1961err_core_driver_register:
1962 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1963 return err;
1964}
1965
1966static void __exit mlxsw_sp_module_exit(void)
1967{
1968 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
1969 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
1970}
1971
1972module_init(mlxsw_sp_module_init);
1973module_exit(mlxsw_sp_module_exit);
1974
1975MODULE_LICENSE("Dual BSD/GPL");
1976MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1977MODULE_DESCRIPTION("Mellanox Spectrum driver");
1978MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM);