blob: b063050b63e2926eb88845b56b45ab708cc9af3e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Device handling code
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070016#include <linux/netpoll.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080017#include <linux/etherdevice.h>
Stephen Hemmingeredb5e462005-12-21 19:00:58 -080018#include <linux/ethtool.h>
WANG Congc06ee962010-05-06 00:48:24 -070019#include <linux/list.h>
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020020#include <linux/netfilter_bridge.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/uaccess.h>
23#include "br_private.h"
24
Vlad Yasevich161f65b2013-05-22 07:49:34 +000025#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
26 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
27
stephen hemmingereeaf61d2010-07-27 08:26:30 +000028/* net device transmit always called with BH disabled */
Stephen Hemminger6fef4c02009-08-31 19:50:41 +000029netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070030{
31 struct net_bridge *br = netdev_priv(dev);
32 const unsigned char *dest = skb->data;
33 struct net_bridge_fdb_entry *dst;
Herbert Xuc4fcb782010-02-27 19:41:48 +000034 struct net_bridge_mdb_entry *mdst;
Li RongQing8f849852014-01-04 13:57:59 +080035 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
Vlad Yasevich78851982013-02-13 12:00:14 +000036 u16 vid = 0;
stephen hemminger14bb4782010-03-02 13:32:09 +000037
Stephen Hemmingerc03307e2012-08-14 08:19:33 -070038 rcu_read_lock();
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020039#ifdef CONFIG_BRIDGE_NETFILTER
40 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
41 br_nf_pre_routing_finish_bridge_slow(skb);
Stephen Hemmingerc03307e2012-08-14 08:19:33 -070042 rcu_read_unlock();
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020043 return NETDEV_TX_OK;
44 }
45#endif
46
Eric Dumazet406818f2010-06-23 13:00:48 -070047 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000048 brstats->tx_packets++;
49 brstats->tx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070050 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Vlad Yasevich78851982013-02-13 12:00:14 +000052 if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
Vlad Yasevicha37b85c2013-02-13 12:00:10 +000053 goto out;
54
Herbert Xu6088a532010-02-27 19:41:42 +000055 BR_INPUT_SKB_CB(skb)->brdev = dev;
56
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070057 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 skb_pull(skb, ETH_HLEN);
59
Herbert Xu44661462011-07-05 13:58:33 +000060 if (is_broadcast_ether_addr(dest))
Vlad Yasevich867a5942013-06-05 10:08:01 -040061 br_flood_deliver(br, skb, false);
Herbert Xu44661462011-07-05 13:58:33 +000062 else if (is_multicast_ether_addr(dest)) {
Herbert Xu91d2c342010-06-10 16:12:50 +000063 if (unlikely(netpoll_tx_running(dev))) {
Vlad Yasevich867a5942013-06-05 10:08:01 -040064 br_flood_deliver(br, skb, false);
Herbert Xu91d2c342010-06-10 16:12:50 +000065 goto out;
66 }
Vlad Yasevich06499092013-10-28 15:45:07 -040067 if (br_multicast_rcv(br, NULL, skb, vid)) {
Herbert Xu6d1d1d32010-07-29 01:12:31 +000068 kfree_skb(skb);
Herbert Xuc4fcb782010-02-27 19:41:48 +000069 goto out;
Herbert Xu6d1d1d32010-07-29 01:12:31 +000070 }
Herbert Xuc4fcb782010-02-27 19:41:48 +000071
Cong Wangfbca58a2013-03-07 03:05:33 +000072 mdst = br_mdb_get(br, skb, vid);
Linus Lüssingb00589a2013-08-01 01:06:20 +020073 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
Linus Lüssingcc0fdd82013-08-30 17:28:17 +020074 br_multicast_querier_exists(br, eth_hdr(skb)))
Herbert Xuc4fcb782010-02-27 19:41:48 +000075 br_multicast_deliver(mdst, skb);
76 else
Vlad Yasevich867a5942013-06-05 10:08:01 -040077 br_flood_deliver(br, skb, false);
Vlad Yasevich2ba071e2013-02-13 12:00:16 +000078 } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 br_deliver(dst->dst, skb);
80 else
Vlad Yasevich867a5942013-06-05 10:08:01 -040081 br_flood_deliver(br, skb, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082
Herbert Xuc4fcb782010-02-27 19:41:48 +000083out:
stephen hemmingereeaf61d2010-07-27 08:26:30 +000084 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +000085 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086}
87
stephen hemmingerbb900b22011-04-04 14:03:32 +000088static int br_dev_init(struct net_device *dev)
89{
90 struct net_bridge *br = netdev_priv(dev);
91
WANG Cong1c213bd2014-02-13 11:46:28 -080092 br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
stephen hemmingerbb900b22011-04-04 14:03:32 +000093 if (!br->stats)
94 return -ENOMEM;
95
96 return 0;
97}
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099static int br_dev_open(struct net_device *dev)
100{
Stephen Hemminger81d35302005-05-29 14:15:17 -0700101 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000103 netdev_update_features(dev);
Stephen Hemminger81d35302005-05-29 14:15:17 -0700104 netif_start_queue(dev);
105 br_stp_enable_bridge(br);
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800106 br_multicast_open(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 return 0;
109}
110
111static void br_dev_set_multicast_list(struct net_device *dev)
112{
113}
114
115static int br_dev_stop(struct net_device *dev)
116{
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800117 struct net_bridge *br = netdev_priv(dev);
118
119 br_stp_disable_bridge(br);
120 br_multicast_stop(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 netif_stop_queue(dev);
123
124 return 0;
125}
126
Eric Dumazet28172732010-07-07 14:58:56 -0700127static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
128 struct rtnl_link_stats64 *stats)
stephen hemminger14bb4782010-03-02 13:32:09 +0000129{
130 struct net_bridge *br = netdev_priv(dev);
Li RongQing8f849852014-01-04 13:57:59 +0800131 struct pcpu_sw_netstats tmp, sum = { 0 };
stephen hemminger14bb4782010-03-02 13:32:09 +0000132 unsigned int cpu;
133
134 for_each_possible_cpu(cpu) {
Eric Dumazet406818f2010-06-23 13:00:48 -0700135 unsigned int start;
Li RongQing8f849852014-01-04 13:57:59 +0800136 const struct pcpu_sw_netstats *bstats
stephen hemminger14bb4782010-03-02 13:32:09 +0000137 = per_cpu_ptr(br->stats, cpu);
Eric Dumazet406818f2010-06-23 13:00:48 -0700138 do {
Kevin Groenevelde3906482012-07-21 06:30:50 +0000139 start = u64_stats_fetch_begin_bh(&bstats->syncp);
Eric Dumazet406818f2010-06-23 13:00:48 -0700140 memcpy(&tmp, bstats, sizeof(tmp));
Kevin Groenevelde3906482012-07-21 06:30:50 +0000141 } while (u64_stats_fetch_retry_bh(&bstats->syncp, start));
Eric Dumazet406818f2010-06-23 13:00:48 -0700142 sum.tx_bytes += tmp.tx_bytes;
143 sum.tx_packets += tmp.tx_packets;
144 sum.rx_bytes += tmp.rx_bytes;
145 sum.rx_packets += tmp.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000146 }
147
148 stats->tx_bytes = sum.tx_bytes;
149 stats->tx_packets = sum.tx_packets;
150 stats->rx_bytes = sum.rx_bytes;
151 stats->rx_packets = sum.rx_packets;
152
153 return stats;
154}
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static int br_change_mtu(struct net_device *dev, int new_mtu)
157{
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700158 struct net_bridge *br = netdev_priv(dev);
159 if (new_mtu < 68 || new_mtu > br_min_mtu(br))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 return -EINVAL;
161
162 dev->mtu = new_mtu;
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700163
164#ifdef CONFIG_BRIDGE_NETFILTER
165 /* remember the MTU in the rtable for PMTU */
David S. Millerdefb3512010-12-08 21:16:57 -0800166 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700167#endif
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 return 0;
170}
171
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700172/* Allow setting mac address to any valid ethernet address. */
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800173static int br_set_mac_address(struct net_device *dev, void *p)
174{
175 struct net_bridge *br = netdev_priv(dev);
176 struct sockaddr *addr = p;
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700177
178 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka7ca1e112012-02-21 02:07:52 +0000179 return -EADDRNOTAVAIL;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800180
181 spin_lock_bh(&br->lock);
Joe Perches9a7b6ef92012-05-08 18:56:49 +0000182 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
Toshiaki Makitaa3ebb7e2014-02-07 16:48:20 +0900183 /* Mac address will be changed in br_stp_change_bridge_id(). */
stephen hemminger43598812011-12-08 07:17:49 +0000184 br_stp_change_bridge_id(br, addr->sa_data);
185 }
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800186 spin_unlock_bh(&br->lock);
187
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700188 return 0;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800189}
190
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800191static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
192{
Jiri Pirko7826d432013-01-06 00:44:26 +0000193 strlcpy(info->driver, "bridge", sizeof(info->driver));
194 strlcpy(info->version, BR_VERSION, sizeof(info->version));
195 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
196 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800197}
198
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000199static netdev_features_t br_fix_features(struct net_device *dev,
200 netdev_features_t features)
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800201{
202 struct net_bridge *br = netdev_priv(dev);
203
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000204 return br_features_recompute(br, features);
Jesse Gross361ff8a2010-10-20 13:56:08 +0000205}
206
WANG Congc06ee962010-05-06 00:48:24 -0700207#ifdef CONFIG_NET_POLL_CONTROLLER
WANG Congc06ee962010-05-06 00:48:24 -0700208static void br_poll_controller(struct net_device *br_dev)
209{
WANG Congc06ee962010-05-06 00:48:24 -0700210}
211
Herbert Xu91d2c342010-06-10 16:12:50 +0000212static void br_netpoll_cleanup(struct net_device *dev)
WANG Congc06ee962010-05-06 00:48:24 -0700213{
stephen hemmingercfb478d2010-05-10 09:31:08 +0000214 struct net_bridge *br = netdev_priv(dev);
Amerigo Wang4e3828c2012-08-10 01:24:44 +0000215 struct net_bridge_port *p;
WANG Congc06ee962010-05-06 00:48:24 -0700216
Amerigo Wang4e3828c2012-08-10 01:24:44 +0000217 list_for_each_entry(p, &br->port_list, list)
Herbert Xu91d2c342010-06-10 16:12:50 +0000218 br_netpoll_disable(p);
WANG Congc06ee962010-05-06 00:48:24 -0700219}
220
Cong Wangdbe17302014-02-06 15:00:52 -0800221static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
stephen hemmingercfb478d2010-05-10 09:31:08 +0000222{
Herbert Xu91d2c342010-06-10 16:12:50 +0000223 struct netpoll *np;
stephen hemminger93d8bf92013-07-24 11:51:41 -0700224 int err;
225
Amerigo Wang47be03a22012-08-10 01:24:37 +0000226 np = kzalloc(sizeof(*p->np), gfp);
Herbert Xu91d2c342010-06-10 16:12:50 +0000227 if (!np)
stephen hemminger93d8bf92013-07-24 11:51:41 -0700228 return -ENOMEM;
Herbert Xu91d2c342010-06-10 16:12:50 +0000229
Amerigo Wang47be03a22012-08-10 01:24:37 +0000230 err = __netpoll_setup(np, p->dev, gfp);
Herbert Xu91d2c342010-06-10 16:12:50 +0000231 if (err) {
232 kfree(np);
stephen hemminger93d8bf92013-07-24 11:51:41 -0700233 return err;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000234 }
Herbert Xu91d2c342010-06-10 16:12:50 +0000235
236 p->np = np;
Herbert Xu91d2c342010-06-10 16:12:50 +0000237 return err;
238}
239
Cong Wangdbe17302014-02-06 15:00:52 -0800240int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
241{
242 if (!p->br->dev->npinfo)
243 return 0;
244
245 return __br_netpoll_enable(p, gfp);
246}
247
248static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
249 gfp_t gfp)
250{
251 struct net_bridge *br = netdev_priv(dev);
252 struct net_bridge_port *p;
253 int err = 0;
254
255 list_for_each_entry(p, &br->port_list, list) {
256 if (!p->dev)
257 continue;
258 err = __br_netpoll_enable(p, gfp);
259 if (err)
260 goto fail;
261 }
262
263out:
264 return err;
265
266fail:
267 br_netpoll_cleanup(dev);
268 goto out;
269}
270
Herbert Xu91d2c342010-06-10 16:12:50 +0000271void br_netpoll_disable(struct net_bridge_port *p)
272{
273 struct netpoll *np = p->np;
274
275 if (!np)
276 return;
277
278 p->np = NULL;
279
Neil Horman2cde6ac2013-02-11 10:25:30 +0000280 __netpoll_free_async(np);
WANG Congc06ee962010-05-06 00:48:24 -0700281}
282
283#endif
284
Jiri Pirkoafc61512011-02-13 09:33:42 +0000285static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
286
287{
288 struct net_bridge *br = netdev_priv(dev);
289
290 return br_add_if(br, slave_dev);
291}
292
293static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
294{
295 struct net_bridge *br = netdev_priv(dev);
296
297 return br_del_if(br, slave_dev);
298}
299
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800300static const struct ethtool_ops br_ethtool_ops = {
Stephen Hemmingere4119a42008-08-15 19:51:07 -0700301 .get_drvinfo = br_getinfo,
302 .get_link = ethtool_op_get_link,
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800303};
304
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800305static const struct net_device_ops br_netdev_ops = {
306 .ndo_open = br_dev_open,
307 .ndo_stop = br_dev_stop,
stephen hemmingerbb900b22011-04-04 14:03:32 +0000308 .ndo_init = br_dev_init,
Stephen Hemminger00829822008-11-20 20:14:53 -0800309 .ndo_start_xmit = br_dev_xmit,
Eric Dumazet406818f2010-06-23 13:00:48 -0700310 .ndo_get_stats64 = br_get_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800311 .ndo_set_mac_address = br_set_mac_address,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000312 .ndo_set_rx_mode = br_dev_set_multicast_list,
Stephen Hemminger00829822008-11-20 20:14:53 -0800313 .ndo_change_mtu = br_change_mtu,
314 .ndo_do_ioctl = br_dev_ioctl,
WANG Congc06ee962010-05-06 00:48:24 -0700315#ifdef CONFIG_NET_POLL_CONTROLLER
Herbert Xu91d2c342010-06-10 16:12:50 +0000316 .ndo_netpoll_setup = br_netpoll_setup,
WANG Congc06ee962010-05-06 00:48:24 -0700317 .ndo_netpoll_cleanup = br_netpoll_cleanup,
318 .ndo_poll_controller = br_poll_controller,
319#endif
Jiri Pirkoafc61512011-02-13 09:33:42 +0000320 .ndo_add_slave = br_add_slave,
321 .ndo_del_slave = br_del_slave,
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000322 .ndo_fix_features = br_fix_features,
John Fastabend77162022012-04-15 06:43:56 +0000323 .ndo_fdb_add = br_fdb_add,
324 .ndo_fdb_del = br_fdb_delete,
325 .ndo_fdb_dump = br_fdb_dump,
John Fastabende5a55a82012-10-24 08:12:57 +0000326 .ndo_bridge_getlink = br_getlink,
327 .ndo_bridge_setlink = br_setlink,
Vlad Yasevich407af322013-02-13 12:00:12 +0000328 .ndo_bridge_dellink = br_dellink,
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800329};
330
stephen hemminger14bb4782010-03-02 13:32:09 +0000331static void br_dev_free(struct net_device *dev)
332{
333 struct net_bridge *br = netdev_priv(dev);
334
335 free_percpu(br->stats);
336 free_netdev(dev);
337}
338
stephen hemmingerbb900b22011-04-04 14:03:32 +0000339static struct device_type br_type = {
340 .name = "bridge",
341};
342
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343void br_dev_setup(struct net_device *dev)
344{
stephen hemmingerbb900b22011-04-04 14:03:32 +0000345 struct net_bridge *br = netdev_priv(dev);
346
Danny Kukawka7ce5d222012-02-15 06:45:40 +0000347 eth_hw_addr_random(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 ether_setup(dev);
349
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800350 dev->netdev_ops = &br_netdev_ops;
stephen hemminger14bb4782010-03-02 13:32:09 +0000351 dev->destructor = br_dev_free;
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900352 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000353 SET_NETDEV_DEVTYPE(dev, &br_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 dev->tx_queue_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 dev->priv_flags = IFF_EBRIDGE;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800356
Vlad Yasevich161f65b2013-05-22 07:49:34 +0000357 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
358 NETIF_F_HW_VLAN_CTAG_TX;
359 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
360 dev->vlan_features = COMMON_FEATURES;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000361
362 br->dev = dev;
363 spin_lock_init(&br->lock);
364 INIT_LIST_HEAD(&br->port_list);
365 spin_lock_init(&br->hash_lock);
366
367 br->bridge_id.prio[0] = 0x80;
368 br->bridge_id.prio[1] = 0x00;
369
Joe Perchese5a727f2014-02-23 00:05:25 -0800370 ether_addr_copy(br->group_addr, eth_reserved_addr_base);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000371
stephen hemmingerbb900b22011-04-04 14:03:32 +0000372 br->stp_enabled = BR_NO_STP;
stephen hemminger515853c2011-10-03 18:14:46 +0000373 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
374
stephen hemmingerbb900b22011-04-04 14:03:32 +0000375 br->designated_root = br->bridge_id;
376 br->bridge_max_age = br->max_age = 20 * HZ;
377 br->bridge_hello_time = br->hello_time = 2 * HZ;
378 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
379 br->ageing_time = 300 * HZ;
380
381 br_netfilter_rtable_init(br);
382 br_stp_timer_init(br);
383 br_multicast_init(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384}