blob: d6e5929458b1c8c21f426ebca5d0827e8712444c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Device handling code
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070016#include <linux/netpoll.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080017#include <linux/etherdevice.h>
Stephen Hemmingeredb5e462005-12-21 19:00:58 -080018#include <linux/ethtool.h>
WANG Congc06ee962010-05-06 00:48:24 -070019#include <linux/list.h>
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020020#include <linux/netfilter_bridge.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/uaccess.h>
23#include "br_private.h"
24
stephen hemmingereeaf61d2010-07-27 08:26:30 +000025/* net device transmit always called with BH disabled */
Stephen Hemminger6fef4c02009-08-31 19:50:41 +000026netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
28 struct net_bridge *br = netdev_priv(dev);
29 const unsigned char *dest = skb->data;
30 struct net_bridge_fdb_entry *dst;
Herbert Xuc4fcb782010-02-27 19:41:48 +000031 struct net_bridge_mdb_entry *mdst;
stephen hemminger14bb4782010-03-02 13:32:09 +000032 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
33
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020034#ifdef CONFIG_BRIDGE_NETFILTER
35 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
36 br_nf_pre_routing_finish_bridge_slow(skb);
37 return NETDEV_TX_OK;
38 }
39#endif
40
Eric Dumazet406818f2010-06-23 13:00:48 -070041 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000042 brstats->tx_packets++;
43 brstats->tx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070044 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Herbert Xu6088a532010-02-27 19:41:42 +000046 BR_INPUT_SKB_CB(skb)->brdev = dev;
47
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070048 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 skb_pull(skb, ETH_HLEN);
50
stephen hemmingereeaf61d2010-07-27 08:26:30 +000051 rcu_read_lock();
Herbert Xu44661462011-07-05 13:58:33 +000052 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb);
54 else if (is_multicast_ether_addr(dest)) {
Herbert Xu91d2c342010-06-10 16:12:50 +000055 if (unlikely(netpoll_tx_running(dev))) {
56 br_flood_deliver(br, skb);
57 goto out;
58 }
Herbert Xu6d1d1d32010-07-29 01:12:31 +000059 if (br_multicast_rcv(br, NULL, skb)) {
60 kfree_skb(skb);
Herbert Xuc4fcb782010-02-27 19:41:48 +000061 goto out;
Herbert Xu6d1d1d32010-07-29 01:12:31 +000062 }
Herbert Xuc4fcb782010-02-27 19:41:48 +000063
64 mdst = br_mdb_get(br, skb);
YOSHIFUJI Hideaki / 吉藤英明32dec5d2010-03-15 21:51:18 +000065 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
Herbert Xuc4fcb782010-02-27 19:41:48 +000066 br_multicast_deliver(mdst, skb);
67 else
68 br_flood_deliver(br, skb);
69 } else if ((dst = __br_fdb_get(br, dest)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 br_deliver(dst->dst, skb);
71 else
Herbert Xue081e1e2007-09-16 16:20:48 -070072 br_flood_deliver(br, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Herbert Xuc4fcb782010-02-27 19:41:48 +000074out:
stephen hemmingereeaf61d2010-07-27 08:26:30 +000075 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +000076 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077}
78
stephen hemmingerbb900b22011-04-04 14:03:32 +000079static int br_dev_init(struct net_device *dev)
80{
81 struct net_bridge *br = netdev_priv(dev);
82
83 br->stats = alloc_percpu(struct br_cpu_netstats);
84 if (!br->stats)
85 return -ENOMEM;
86
87 return 0;
88}
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090static int br_dev_open(struct net_device *dev)
91{
Stephen Hemminger81d35302005-05-29 14:15:17 -070092 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Michał Mirosławc4d27ef2011-04-22 06:31:16 +000094 netdev_update_features(dev);
Stephen Hemminger81d35302005-05-29 14:15:17 -070095 netif_start_queue(dev);
96 br_stp_enable_bridge(br);
Herbert Xu3fe2d7c2010-02-28 00:49:38 -080097 br_multicast_open(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098
99 return 0;
100}
101
102static void br_dev_set_multicast_list(struct net_device *dev)
103{
104}
105
106static int br_dev_stop(struct net_device *dev)
107{
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800108 struct net_bridge *br = netdev_priv(dev);
109
110 br_stp_disable_bridge(br);
111 br_multicast_stop(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 netif_stop_queue(dev);
114
115 return 0;
116}
117
Eric Dumazet28172732010-07-07 14:58:56 -0700118static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
119 struct rtnl_link_stats64 *stats)
stephen hemminger14bb4782010-03-02 13:32:09 +0000120{
121 struct net_bridge *br = netdev_priv(dev);
Eric Dumazet406818f2010-06-23 13:00:48 -0700122 struct br_cpu_netstats tmp, sum = { 0 };
stephen hemminger14bb4782010-03-02 13:32:09 +0000123 unsigned int cpu;
124
125 for_each_possible_cpu(cpu) {
Eric Dumazet406818f2010-06-23 13:00:48 -0700126 unsigned int start;
stephen hemminger14bb4782010-03-02 13:32:09 +0000127 const struct br_cpu_netstats *bstats
128 = per_cpu_ptr(br->stats, cpu);
Eric Dumazet406818f2010-06-23 13:00:48 -0700129 do {
130 start = u64_stats_fetch_begin(&bstats->syncp);
131 memcpy(&tmp, bstats, sizeof(tmp));
132 } while (u64_stats_fetch_retry(&bstats->syncp, start));
133 sum.tx_bytes += tmp.tx_bytes;
134 sum.tx_packets += tmp.tx_packets;
135 sum.rx_bytes += tmp.rx_bytes;
136 sum.rx_packets += tmp.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000137 }
138
139 stats->tx_bytes = sum.tx_bytes;
140 stats->tx_packets = sum.tx_packets;
141 stats->rx_bytes = sum.rx_bytes;
142 stats->rx_packets = sum.rx_packets;
143
144 return stats;
145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147static int br_change_mtu(struct net_device *dev, int new_mtu)
148{
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700149 struct net_bridge *br = netdev_priv(dev);
150 if (new_mtu < 68 || new_mtu > br_min_mtu(br))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 return -EINVAL;
152
153 dev->mtu = new_mtu;
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700154
155#ifdef CONFIG_BRIDGE_NETFILTER
156 /* remember the MTU in the rtable for PMTU */
David S. Millerdefb3512010-12-08 21:16:57 -0800157 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700158#endif
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 return 0;
161}
162
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700163/* Allow setting mac address to any valid ethernet address. */
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800164static int br_set_mac_address(struct net_device *dev, void *p)
165{
166 struct net_bridge *br = netdev_priv(dev);
167 struct sockaddr *addr = p;
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700168
169 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka7ca1e112012-02-21 02:07:52 +0000170 return -EADDRNOTAVAIL;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800171
172 spin_lock_bh(&br->lock);
stephen hemminger43598812011-12-08 07:17:49 +0000173 if (compare_ether_addr(dev->dev_addr, addr->sa_data)) {
Danny Kukawka7ce5d222012-02-15 06:45:40 +0000174 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
stephen hemminger43598812011-12-08 07:17:49 +0000175 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
176 br_fdb_change_mac_address(br, addr->sa_data);
177 br_stp_change_bridge_id(br, addr->sa_data);
178 }
Stephen Hemminger92c05742008-06-17 16:10:06 -0700179 br->flags |= BR_SET_MAC_ADDR;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800180 spin_unlock_bh(&br->lock);
181
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700182 return 0;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800183}
184
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800185static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
186{
187 strcpy(info->driver, "bridge");
188 strcpy(info->version, BR_VERSION);
189 strcpy(info->fw_version, "N/A");
190 strcpy(info->bus_info, "N/A");
191}
192
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000193static netdev_features_t br_fix_features(struct net_device *dev,
194 netdev_features_t features)
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800195{
196 struct net_bridge *br = netdev_priv(dev);
197
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000198 return br_features_recompute(br, features);
Jesse Gross361ff8a2010-10-20 13:56:08 +0000199}
200
WANG Congc06ee962010-05-06 00:48:24 -0700201#ifdef CONFIG_NET_POLL_CONTROLLER
WANG Congc06ee962010-05-06 00:48:24 -0700202static void br_poll_controller(struct net_device *br_dev)
203{
WANG Congc06ee962010-05-06 00:48:24 -0700204}
205
Herbert Xu91d2c342010-06-10 16:12:50 +0000206static void br_netpoll_cleanup(struct net_device *dev)
WANG Congc06ee962010-05-06 00:48:24 -0700207{
stephen hemmingercfb478d2010-05-10 09:31:08 +0000208 struct net_bridge *br = netdev_priv(dev);
WANG Congc06ee962010-05-06 00:48:24 -0700209 struct net_bridge_port *p, *n;
WANG Congc06ee962010-05-06 00:48:24 -0700210
WANG Congc06ee962010-05-06 00:48:24 -0700211 list_for_each_entry_safe(p, n, &br->port_list, list) {
Herbert Xu91d2c342010-06-10 16:12:50 +0000212 br_netpoll_disable(p);
WANG Congc06ee962010-05-06 00:48:24 -0700213 }
214}
215
Herbert Xu91d2c342010-06-10 16:12:50 +0000216static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
WANG Congc06ee962010-05-06 00:48:24 -0700217{
Herbert Xu91d2c342010-06-10 16:12:50 +0000218 struct net_bridge *br = netdev_priv(dev);
219 struct net_bridge_port *p, *n;
220 int err = 0;
221
222 list_for_each_entry_safe(p, n, &br->port_list, list) {
223 if (!p->dev)
224 continue;
225
226 err = br_netpoll_enable(p);
227 if (err)
228 goto fail;
229 }
230
231out:
232 return err;
233
234fail:
235 br_netpoll_cleanup(dev);
236 goto out;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000237}
238
Herbert Xu91d2c342010-06-10 16:12:50 +0000239int br_netpoll_enable(struct net_bridge_port *p)
stephen hemmingercfb478d2010-05-10 09:31:08 +0000240{
Herbert Xu91d2c342010-06-10 16:12:50 +0000241 struct netpoll *np;
242 int err = 0;
243
244 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
245 err = -ENOMEM;
246 if (!np)
247 goto out;
248
249 np->dev = p->dev;
WANG Congcefa9992011-06-19 16:13:01 -0700250 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
Herbert Xu91d2c342010-06-10 16:12:50 +0000251
252 err = __netpoll_setup(np);
253 if (err) {
254 kfree(np);
255 goto out;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000256 }
Herbert Xu91d2c342010-06-10 16:12:50 +0000257
258 p->np = np;
259
260out:
261 return err;
262}
263
264void br_netpoll_disable(struct net_bridge_port *p)
265{
266 struct netpoll *np = p->np;
267
268 if (!np)
269 return;
270
271 p->np = NULL;
272
273 /* Wait for transmitting packets to finish before freeing. */
274 synchronize_rcu_bh();
275
276 __netpoll_cleanup(np);
277 kfree(np);
WANG Congc06ee962010-05-06 00:48:24 -0700278}
279
280#endif
281
Jiri Pirkoafc61512011-02-13 09:33:42 +0000282static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
283
284{
285 struct net_bridge *br = netdev_priv(dev);
286
287 return br_add_if(br, slave_dev);
288}
289
290static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
291{
292 struct net_bridge *br = netdev_priv(dev);
293
294 return br_del_if(br, slave_dev);
295}
296
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800297static const struct ethtool_ops br_ethtool_ops = {
Stephen Hemmingere4119a42008-08-15 19:51:07 -0700298 .get_drvinfo = br_getinfo,
299 .get_link = ethtool_op_get_link,
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800300};
301
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800302static const struct net_device_ops br_netdev_ops = {
303 .ndo_open = br_dev_open,
304 .ndo_stop = br_dev_stop,
stephen hemmingerbb900b22011-04-04 14:03:32 +0000305 .ndo_init = br_dev_init,
Stephen Hemminger00829822008-11-20 20:14:53 -0800306 .ndo_start_xmit = br_dev_xmit,
Eric Dumazet406818f2010-06-23 13:00:48 -0700307 .ndo_get_stats64 = br_get_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800308 .ndo_set_mac_address = br_set_mac_address,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000309 .ndo_set_rx_mode = br_dev_set_multicast_list,
Stephen Hemminger00829822008-11-20 20:14:53 -0800310 .ndo_change_mtu = br_change_mtu,
311 .ndo_do_ioctl = br_dev_ioctl,
WANG Congc06ee962010-05-06 00:48:24 -0700312#ifdef CONFIG_NET_POLL_CONTROLLER
Herbert Xu91d2c342010-06-10 16:12:50 +0000313 .ndo_netpoll_setup = br_netpoll_setup,
WANG Congc06ee962010-05-06 00:48:24 -0700314 .ndo_netpoll_cleanup = br_netpoll_cleanup,
315 .ndo_poll_controller = br_poll_controller,
316#endif
Jiri Pirkoafc61512011-02-13 09:33:42 +0000317 .ndo_add_slave = br_add_slave,
318 .ndo_del_slave = br_del_slave,
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000319 .ndo_fix_features = br_fix_features,
John Fastabend77162022012-04-15 06:43:56 +0000320 .ndo_fdb_add = br_fdb_add,
321 .ndo_fdb_del = br_fdb_delete,
322 .ndo_fdb_dump = br_fdb_dump,
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800323};
324
stephen hemminger14bb4782010-03-02 13:32:09 +0000325static void br_dev_free(struct net_device *dev)
326{
327 struct net_bridge *br = netdev_priv(dev);
328
329 free_percpu(br->stats);
330 free_netdev(dev);
331}
332
stephen hemmingerbb900b22011-04-04 14:03:32 +0000333static struct device_type br_type = {
334 .name = "bridge",
335};
336
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337void br_dev_setup(struct net_device *dev)
338{
stephen hemmingerbb900b22011-04-04 14:03:32 +0000339 struct net_bridge *br = netdev_priv(dev);
340
Danny Kukawka7ce5d222012-02-15 06:45:40 +0000341 eth_hw_addr_random(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 ether_setup(dev);
343
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800344 dev->netdev_ops = &br_netdev_ops;
stephen hemminger14bb4782010-03-02 13:32:09 +0000345 dev->destructor = br_dev_free;
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900346 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000347 SET_NETDEV_DEVTYPE(dev, &br_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 dev->tx_queue_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 dev->priv_flags = IFF_EBRIDGE;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800350
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900351 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
Michał Mirosław34324dc2011-11-15 15:29:55 +0000352 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM | NETIF_F_LLTX |
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000353 NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
354 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
Michał Mirosław34324dc2011-11-15 15:29:55 +0000355 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000356 NETIF_F_HW_VLAN_TX;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000357
358 br->dev = dev;
359 spin_lock_init(&br->lock);
360 INIT_LIST_HEAD(&br->port_list);
361 spin_lock_init(&br->hash_lock);
362
363 br->bridge_id.prio[0] = 0x80;
364 br->bridge_id.prio[1] = 0x00;
365
366 memcpy(br->group_addr, br_group_address, ETH_ALEN);
367
stephen hemmingerbb900b22011-04-04 14:03:32 +0000368 br->stp_enabled = BR_NO_STP;
stephen hemminger515853c2011-10-03 18:14:46 +0000369 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
370
stephen hemmingerbb900b22011-04-04 14:03:32 +0000371 br->designated_root = br->bridge_id;
372 br->bridge_max_age = br->max_age = 20 * HZ;
373 br->bridge_hello_time = br->hello_time = 2 * HZ;
374 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
375 br->ageing_time = 300 * HZ;
376
377 br_netfilter_rtable_init(br);
378 br_stp_timer_init(br);
379 br_multicast_init(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}