blob: 430b53e7d941def09220a1c97a2e82d288304595 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Device handling code
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070016#include <linux/netpoll.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080017#include <linux/etherdevice.h>
Stephen Hemmingeredb5e462005-12-21 19:00:58 -080018#include <linux/ethtool.h>
WANG Congc06ee962010-05-06 00:48:24 -070019#include <linux/list.h>
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020020#include <linux/netfilter_bridge.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080021
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080022#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "br_private.h"
24
Vlad Yasevich161f65b2013-05-22 07:49:34 +000025#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
26 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
27
Pablo Neira Ayuso1a4ba642015-03-10 10:27:18 +010028const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
29EXPORT_SYMBOL_GPL(nf_br_ops);
30
Nikolay Aleksandrovc6894de2016-01-15 19:03:54 +010031static struct lock_class_key bridge_netdev_addr_lock_key;
32
stephen hemmingereeaf61d2010-07-27 08:26:30 +000033/* net device transmit always called with BH disabled */
Stephen Hemminger6fef4c02009-08-31 19:50:41 +000034netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070035{
36 struct net_bridge *br = netdev_priv(dev);
37 const unsigned char *dest = skb->data;
38 struct net_bridge_fdb_entry *dst;
Herbert Xuc4fcb782010-02-27 19:41:48 +000039 struct net_bridge_mdb_entry *mdst;
Li RongQing8f849852014-01-04 13:57:59 +080040 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
Pablo Neira Ayuso1a4ba642015-03-10 10:27:18 +010041 const struct nf_br_ops *nf_ops;
Vlad Yasevich78851982013-02-13 12:00:14 +000042 u16 vid = 0;
stephen hemminger14bb4782010-03-02 13:32:09 +000043
Stephen Hemmingerc03307e2012-08-14 08:19:33 -070044 rcu_read_lock();
Pablo Neira Ayuso1a4ba642015-03-10 10:27:18 +010045 nf_ops = rcu_dereference(nf_br_ops);
46 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
Stephen Hemmingerc03307e2012-08-14 08:19:33 -070047 rcu_read_unlock();
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020048 return NETDEV_TX_OK;
49 }
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020050
Eric Dumazet406818f2010-06-23 13:00:48 -070051 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000052 brstats->tx_packets++;
53 brstats->tx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070054 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Herbert Xu6088a532010-02-27 19:41:42 +000056 BR_INPUT_SKB_CB(skb)->brdev = dev;
57
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070058 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 skb_pull(skb, ETH_HLEN);
60
Nikolay Aleksandrov907b1e62015-10-12 21:47:02 +020061 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
Toshiaki Makita12464bb2014-03-27 21:46:55 +090062 goto out;
63
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030064 if (is_broadcast_ether_addr(dest)) {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020065 br_flood(br, skb, BR_PKT_BROADCAST, false, true);
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030066 } else if (is_multicast_ether_addr(dest)) {
Herbert Xu91d2c342010-06-10 16:12:50 +000067 if (unlikely(netpoll_tx_running(dev))) {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020068 br_flood(br, skb, BR_PKT_MULTICAST, false, true);
Herbert Xu91d2c342010-06-10 16:12:50 +000069 goto out;
70 }
Vlad Yasevich06499092013-10-28 15:45:07 -040071 if (br_multicast_rcv(br, NULL, skb, vid)) {
Herbert Xu6d1d1d32010-07-29 01:12:31 +000072 kfree_skb(skb);
Herbert Xuc4fcb782010-02-27 19:41:48 +000073 goto out;
Herbert Xu6d1d1d32010-07-29 01:12:31 +000074 }
Herbert Xuc4fcb782010-02-27 19:41:48 +000075
Cong Wangfbca58a2013-03-07 03:05:33 +000076 mdst = br_mdb_get(br, skb, vid);
Linus Lüssingb00589a2013-08-01 01:06:20 +020077 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
Linus Lüssingcc0fdd82013-08-30 17:28:17 +020078 br_multicast_querier_exists(br, eth_hdr(skb)))
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030079 br_multicast_flood(mdst, skb, false, true);
Herbert Xuc4fcb782010-02-27 19:41:48 +000080 else
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020081 br_flood(br, skb, BR_PKT_MULTICAST, false, true);
Nikolay Aleksandrovbfd0aea2017-02-13 14:59:09 +010082 } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030083 br_forward(dst->dst, skb, false, true);
84 } else {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020085 br_flood(br, skb, BR_PKT_UNICAST, false, true);
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030086 }
Herbert Xuc4fcb782010-02-27 19:41:48 +000087out:
stephen hemmingereeaf61d2010-07-27 08:26:30 +000088 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +000089 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Nikolay Aleksandrovc6894de2016-01-15 19:03:54 +010092static void br_set_lockdep_class(struct net_device *dev)
93{
94 lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
95}
96
stephen hemmingerbb900b22011-04-04 14:03:32 +000097static int br_dev_init(struct net_device *dev)
98{
99 struct net_bridge *br = netdev_priv(dev);
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400100 int err;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000101
WANG Cong1c213bd2014-02-13 11:46:28 -0800102 br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000103 if (!br->stats)
104 return -ENOMEM;
105
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400106 err = br_vlan_init(br);
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200107 if (err) {
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400108 free_percpu(br->stats);
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200109 return err;
110 }
111
112 err = br_multicast_init_stats(br);
113 if (err) {
114 free_percpu(br->stats);
115 br_vlan_flush(br);
116 }
Nikolay Aleksandrovc6894de2016-01-15 19:03:54 +0100117 br_set_lockdep_class(dev);
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400118
119 return err;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000120}
121
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300122static void br_dev_uninit(struct net_device *dev)
123{
124 struct net_bridge *br = netdev_priv(dev);
125
Xin Longb1b9d362017-04-25 22:58:37 +0800126 br_multicast_dev_del(br);
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300127 br_multicast_uninit_stats(br);
128 br_vlan_flush(br);
129 free_percpu(br->stats);
130}
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132static int br_dev_open(struct net_device *dev)
133{
Stephen Hemminger81d35302005-05-29 14:15:17 -0700134 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000136 netdev_update_features(dev);
Stephen Hemminger81d35302005-05-29 14:15:17 -0700137 netif_start_queue(dev);
138 br_stp_enable_bridge(br);
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800139 br_multicast_open(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 return 0;
142}
143
144static void br_dev_set_multicast_list(struct net_device *dev)
145{
146}
147
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400148static void br_dev_change_rx_flags(struct net_device *dev, int change)
149{
150 if (change & IFF_PROMISC)
151 br_manage_promisc(netdev_priv(dev));
152}
153
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154static int br_dev_stop(struct net_device *dev)
155{
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800156 struct net_bridge *br = netdev_priv(dev);
157
158 br_stp_disable_bridge(br);
159 br_multicast_stop(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 netif_stop_queue(dev);
162
163 return 0;
164}
165
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800166static void br_get_stats64(struct net_device *dev,
167 struct rtnl_link_stats64 *stats)
stephen hemminger14bb4782010-03-02 13:32:09 +0000168{
169 struct net_bridge *br = netdev_priv(dev);
Li RongQing8f849852014-01-04 13:57:59 +0800170 struct pcpu_sw_netstats tmp, sum = { 0 };
stephen hemminger14bb4782010-03-02 13:32:09 +0000171 unsigned int cpu;
172
173 for_each_possible_cpu(cpu) {
Eric Dumazet406818f2010-06-23 13:00:48 -0700174 unsigned int start;
Li RongQing8f849852014-01-04 13:57:59 +0800175 const struct pcpu_sw_netstats *bstats
stephen hemminger14bb4782010-03-02 13:32:09 +0000176 = per_cpu_ptr(br->stats, cpu);
Eric Dumazet406818f2010-06-23 13:00:48 -0700177 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700178 start = u64_stats_fetch_begin_irq(&bstats->syncp);
Eric Dumazet406818f2010-06-23 13:00:48 -0700179 memcpy(&tmp, bstats, sizeof(tmp));
Eric W. Biederman57a77442014-03-13 21:26:42 -0700180 } while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
Eric Dumazet406818f2010-06-23 13:00:48 -0700181 sum.tx_bytes += tmp.tx_bytes;
182 sum.tx_packets += tmp.tx_packets;
183 sum.rx_bytes += tmp.rx_bytes;
184 sum.rx_packets += tmp.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000185 }
186
187 stats->tx_bytes = sum.tx_bytes;
188 stats->tx_packets = sum.tx_packets;
189 stats->rx_bytes = sum.rx_bytes;
190 stats->rx_packets = sum.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000191}
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193static int br_change_mtu(struct net_device *dev, int new_mtu)
194{
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700195 struct net_bridge *br = netdev_priv(dev);
Jarod Wilson91572082016-10-20 13:55:20 -0400196 if (new_mtu > br_min_mtu(br))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 return -EINVAL;
198
199 dev->mtu = new_mtu;
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700200
Pablo Neira Ayuso34666d42014-09-18 11:29:03 +0200201#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700202 /* remember the MTU in the rtable for PMTU */
David S. Millerdefb3512010-12-08 21:16:57 -0800203 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700204#endif
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 return 0;
207}
208
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700209/* Allow setting mac address to any valid ethernet address. */
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800210static int br_set_mac_address(struct net_device *dev, void *p)
211{
212 struct net_bridge *br = netdev_priv(dev);
213 struct sockaddr *addr = p;
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700214
215 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka7ca1e112012-02-21 02:07:52 +0000216 return -EADDRNOTAVAIL;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800217
218 spin_lock_bh(&br->lock);
Joe Perches9a7b6ef92012-05-08 18:56:49 +0000219 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
Toshiaki Makitaa3ebb7e2014-02-07 16:48:20 +0900220 /* Mac address will be changed in br_stp_change_bridge_id(). */
stephen hemminger43598812011-12-08 07:17:49 +0000221 br_stp_change_bridge_id(br, addr->sa_data);
222 }
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800223 spin_unlock_bh(&br->lock);
224
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700225 return 0;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800226}
227
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800228static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
229{
Jiri Pirko7826d432013-01-06 00:44:26 +0000230 strlcpy(info->driver, "bridge", sizeof(info->driver));
231 strlcpy(info->version, BR_VERSION, sizeof(info->version));
232 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
233 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800234}
235
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000236static netdev_features_t br_fix_features(struct net_device *dev,
237 netdev_features_t features)
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800238{
239 struct net_bridge *br = netdev_priv(dev);
240
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000241 return br_features_recompute(br, features);
Jesse Gross361ff8a2010-10-20 13:56:08 +0000242}
243
WANG Congc06ee962010-05-06 00:48:24 -0700244#ifdef CONFIG_NET_POLL_CONTROLLER
WANG Congc06ee962010-05-06 00:48:24 -0700245static void br_poll_controller(struct net_device *br_dev)
246{
WANG Congc06ee962010-05-06 00:48:24 -0700247}
248
Herbert Xu91d2c342010-06-10 16:12:50 +0000249static void br_netpoll_cleanup(struct net_device *dev)
WANG Congc06ee962010-05-06 00:48:24 -0700250{
stephen hemmingercfb478d2010-05-10 09:31:08 +0000251 struct net_bridge *br = netdev_priv(dev);
Amerigo Wang4e3828c2012-08-10 01:24:44 +0000252 struct net_bridge_port *p;
WANG Congc06ee962010-05-06 00:48:24 -0700253
Amerigo Wang4e3828c2012-08-10 01:24:44 +0000254 list_for_each_entry(p, &br->port_list, list)
Herbert Xu91d2c342010-06-10 16:12:50 +0000255 br_netpoll_disable(p);
WANG Congc06ee962010-05-06 00:48:24 -0700256}
257
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700258static int __br_netpoll_enable(struct net_bridge_port *p)
stephen hemmingercfb478d2010-05-10 09:31:08 +0000259{
Herbert Xu91d2c342010-06-10 16:12:50 +0000260 struct netpoll *np;
stephen hemminger93d8bf92013-07-24 11:51:41 -0700261 int err;
262
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700263 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
Herbert Xu91d2c342010-06-10 16:12:50 +0000264 if (!np)
stephen hemminger93d8bf92013-07-24 11:51:41 -0700265 return -ENOMEM;
Herbert Xu91d2c342010-06-10 16:12:50 +0000266
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700267 err = __netpoll_setup(np, p->dev);
Herbert Xu91d2c342010-06-10 16:12:50 +0000268 if (err) {
269 kfree(np);
stephen hemminger93d8bf92013-07-24 11:51:41 -0700270 return err;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000271 }
Herbert Xu91d2c342010-06-10 16:12:50 +0000272
273 p->np = np;
Herbert Xu91d2c342010-06-10 16:12:50 +0000274 return err;
275}
276
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700277int br_netpoll_enable(struct net_bridge_port *p)
Cong Wangdbe17302014-02-06 15:00:52 -0800278{
279 if (!p->br->dev->npinfo)
280 return 0;
281
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700282 return __br_netpoll_enable(p);
Cong Wangdbe17302014-02-06 15:00:52 -0800283}
284
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700285static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
Cong Wangdbe17302014-02-06 15:00:52 -0800286{
287 struct net_bridge *br = netdev_priv(dev);
288 struct net_bridge_port *p;
289 int err = 0;
290
291 list_for_each_entry(p, &br->port_list, list) {
292 if (!p->dev)
293 continue;
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700294 err = __br_netpoll_enable(p);
Cong Wangdbe17302014-02-06 15:00:52 -0800295 if (err)
296 goto fail;
297 }
298
299out:
300 return err;
301
302fail:
303 br_netpoll_cleanup(dev);
304 goto out;
305}
306
Herbert Xu91d2c342010-06-10 16:12:50 +0000307void br_netpoll_disable(struct net_bridge_port *p)
308{
309 struct netpoll *np = p->np;
310
311 if (!np)
312 return;
313
314 p->np = NULL;
315
Neil Horman2cde6ac2013-02-11 10:25:30 +0000316 __netpoll_free_async(np);
WANG Congc06ee962010-05-06 00:48:24 -0700317}
318
319#endif
320
Jiri Pirkoafc61512011-02-13 09:33:42 +0000321static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
322
323{
324 struct net_bridge *br = netdev_priv(dev);
325
326 return br_add_if(br, slave_dev);
327}
328
329static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
330{
331 struct net_bridge *br = netdev_priv(dev);
332
333 return br_del_if(br, slave_dev);
334}
335
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800336static const struct ethtool_ops br_ethtool_ops = {
Stephen Hemmingere4119a42008-08-15 19:51:07 -0700337 .get_drvinfo = br_getinfo,
338 .get_link = ethtool_op_get_link,
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800339};
340
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800341static const struct net_device_ops br_netdev_ops = {
342 .ndo_open = br_dev_open,
343 .ndo_stop = br_dev_stop,
stephen hemmingerbb900b22011-04-04 14:03:32 +0000344 .ndo_init = br_dev_init,
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300345 .ndo_uninit = br_dev_uninit,
Stephen Hemminger00829822008-11-20 20:14:53 -0800346 .ndo_start_xmit = br_dev_xmit,
Eric Dumazet406818f2010-06-23 13:00:48 -0700347 .ndo_get_stats64 = br_get_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800348 .ndo_set_mac_address = br_set_mac_address,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000349 .ndo_set_rx_mode = br_dev_set_multicast_list,
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400350 .ndo_change_rx_flags = br_dev_change_rx_flags,
Stephen Hemminger00829822008-11-20 20:14:53 -0800351 .ndo_change_mtu = br_change_mtu,
352 .ndo_do_ioctl = br_dev_ioctl,
WANG Congc06ee962010-05-06 00:48:24 -0700353#ifdef CONFIG_NET_POLL_CONTROLLER
Herbert Xu91d2c342010-06-10 16:12:50 +0000354 .ndo_netpoll_setup = br_netpoll_setup,
WANG Congc06ee962010-05-06 00:48:24 -0700355 .ndo_netpoll_cleanup = br_netpoll_cleanup,
356 .ndo_poll_controller = br_poll_controller,
357#endif
Jiri Pirkoafc61512011-02-13 09:33:42 +0000358 .ndo_add_slave = br_add_slave,
359 .ndo_del_slave = br_del_slave,
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000360 .ndo_fix_features = br_fix_features,
John Fastabend77162022012-04-15 06:43:56 +0000361 .ndo_fdb_add = br_fdb_add,
362 .ndo_fdb_del = br_fdb_delete,
363 .ndo_fdb_dump = br_fdb_dump,
John Fastabende5a55a82012-10-24 08:12:57 +0000364 .ndo_bridge_getlink = br_getlink,
365 .ndo_bridge_setlink = br_setlink,
Vlad Yasevich407af322013-02-13 12:00:12 +0000366 .ndo_bridge_dellink = br_dellink,
Toshiaki Makita66780532015-07-31 15:03:26 +0900367 .ndo_features_check = passthru_features_check,
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800368};
369
stephen hemmingerbb900b22011-04-04 14:03:32 +0000370static struct device_type br_type = {
371 .name = "bridge",
372};
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374void br_dev_setup(struct net_device *dev)
375{
stephen hemmingerbb900b22011-04-04 14:03:32 +0000376 struct net_bridge *br = netdev_priv(dev);
377
Danny Kukawka7ce5d222012-02-15 06:45:40 +0000378 eth_hw_addr_random(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 ether_setup(dev);
380
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800381 dev->netdev_ops = &br_netdev_ops;
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300382 dev->destructor = free_netdev;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000383 dev->ethtool_ops = &br_ethtool_ops;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000384 SET_NETDEV_DEVTYPE(dev, &br_type);
Phil Sutterccecb2a2015-08-18 10:30:37 +0200385 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800386
Vlad Yasevich161f65b2013-05-22 07:49:34 +0000387 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
Toshiaki Makita1c5abb62014-06-10 20:59:22 +0900388 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
389 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
390 NETIF_F_HW_VLAN_STAG_TX;
Vlad Yasevich161f65b2013-05-22 07:49:34 +0000391 dev->vlan_features = COMMON_FEATURES;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000392
393 br->dev = dev;
394 spin_lock_init(&br->lock);
395 INIT_LIST_HEAD(&br->port_list);
396 spin_lock_init(&br->hash_lock);
397
398 br->bridge_id.prio[0] = 0x80;
399 br->bridge_id.prio[1] = 0x00;
400
Joe Perchese5a727f2014-02-23 00:05:25 -0800401 ether_addr_copy(br->group_addr, eth_reserved_addr_base);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000402
stephen hemmingerbb900b22011-04-04 14:03:32 +0000403 br->stp_enabled = BR_NO_STP;
stephen hemminger515853c2011-10-03 18:14:46 +0000404 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900405 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
stephen hemminger515853c2011-10-03 18:14:46 +0000406
stephen hemmingerbb900b22011-04-04 14:03:32 +0000407 br->designated_root = br->bridge_id;
408 br->bridge_max_age = br->max_age = 20 * HZ;
409 br->bridge_hello_time = br->hello_time = 2 * HZ;
410 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
Vivien Didelot34d8acd2016-12-10 13:44:29 -0500411 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
Jarod Wilson91572082016-10-20 13:55:20 -0400412 dev->max_mtu = ETH_MAX_MTU;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000413
414 br_netfilter_rtable_init(br);
415 br_stp_timer_init(br);
416 br_multicast_init(br);
Nikolay Aleksandrovf7cdee82017-02-04 18:05:07 +0100417 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}