blob: cf09fe591fc20cc73a2fdd20d13860f88c33134d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Device handling code
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070016#include <linux/netpoll.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080017#include <linux/etherdevice.h>
Stephen Hemmingeredb5e462005-12-21 19:00:58 -080018#include <linux/ethtool.h>
WANG Congc06ee962010-05-06 00:48:24 -070019#include <linux/list.h>
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020020#include <linux/netfilter_bridge.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <asm/uaccess.h>
23#include "br_private.h"
24
stephen hemmingereeaf61d2010-07-27 08:26:30 +000025/* net device transmit always called with BH disabled */
Stephen Hemminger6fef4c02009-08-31 19:50:41 +000026netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070027{
28 struct net_bridge *br = netdev_priv(dev);
29 const unsigned char *dest = skb->data;
30 struct net_bridge_fdb_entry *dst;
Herbert Xuc4fcb782010-02-27 19:41:48 +000031 struct net_bridge_mdb_entry *mdst;
stephen hemminger14bb4782010-03-02 13:32:09 +000032 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
33
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020034#ifdef CONFIG_BRIDGE_NETFILTER
35 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
36 br_nf_pre_routing_finish_bridge_slow(skb);
37 return NETDEV_TX_OK;
38 }
39#endif
40
Eric Dumazet406818f2010-06-23 13:00:48 -070041 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000042 brstats->tx_packets++;
43 brstats->tx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070044 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Herbert Xu6088a532010-02-27 19:41:42 +000046 BR_INPUT_SKB_CB(skb)->brdev = dev;
47
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070048 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 skb_pull(skb, ETH_HLEN);
50
stephen hemmingereeaf61d2010-07-27 08:26:30 +000051 rcu_read_lock();
stephen hemminger7180f772010-04-27 07:13:06 +000052 if (is_multicast_ether_addr(dest)) {
Herbert Xu91d2c342010-06-10 16:12:50 +000053 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb);
55 goto out;
56 }
Herbert Xu6d1d1d32010-07-29 01:12:31 +000057 if (br_multicast_rcv(br, NULL, skb)) {
58 kfree_skb(skb);
Herbert Xuc4fcb782010-02-27 19:41:48 +000059 goto out;
Herbert Xu6d1d1d32010-07-29 01:12:31 +000060 }
Herbert Xuc4fcb782010-02-27 19:41:48 +000061
62 mdst = br_mdb_get(br, skb);
YOSHIFUJI Hideaki / 吉藤英明32dec5d2010-03-15 21:51:18 +000063 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
Herbert Xuc4fcb782010-02-27 19:41:48 +000064 br_multicast_deliver(mdst, skb);
65 else
66 br_flood_deliver(br, skb);
67 } else if ((dst = __br_fdb_get(br, dest)) != NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 br_deliver(dst->dst, skb);
69 else
Herbert Xue081e1e2007-09-16 16:20:48 -070070 br_flood_deliver(br, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Herbert Xuc4fcb782010-02-27 19:41:48 +000072out:
stephen hemmingereeaf61d2010-07-27 08:26:30 +000073 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +000074 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
77static int br_dev_open(struct net_device *dev)
78{
Stephen Hemminger81d35302005-05-29 14:15:17 -070079 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Stephen Hemminger81d35302005-05-29 14:15:17 -070081 br_features_recompute(br);
82 netif_start_queue(dev);
83 br_stp_enable_bridge(br);
Herbert Xu3fe2d7c2010-02-28 00:49:38 -080084 br_multicast_open(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86 return 0;
87}
88
89static void br_dev_set_multicast_list(struct net_device *dev)
90{
91}
92
93static int br_dev_stop(struct net_device *dev)
94{
Herbert Xu3fe2d7c2010-02-28 00:49:38 -080095 struct net_bridge *br = netdev_priv(dev);
96
97 br_stp_disable_bridge(br);
98 br_multicast_stop(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
100 netif_stop_queue(dev);
101
102 return 0;
103}
104
Eric Dumazet28172732010-07-07 14:58:56 -0700105static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
106 struct rtnl_link_stats64 *stats)
stephen hemminger14bb4782010-03-02 13:32:09 +0000107{
108 struct net_bridge *br = netdev_priv(dev);
Eric Dumazet406818f2010-06-23 13:00:48 -0700109 struct br_cpu_netstats tmp, sum = { 0 };
stephen hemminger14bb4782010-03-02 13:32:09 +0000110 unsigned int cpu;
111
112 for_each_possible_cpu(cpu) {
Eric Dumazet406818f2010-06-23 13:00:48 -0700113 unsigned int start;
stephen hemminger14bb4782010-03-02 13:32:09 +0000114 const struct br_cpu_netstats *bstats
115 = per_cpu_ptr(br->stats, cpu);
Eric Dumazet406818f2010-06-23 13:00:48 -0700116 do {
117 start = u64_stats_fetch_begin(&bstats->syncp);
118 memcpy(&tmp, bstats, sizeof(tmp));
119 } while (u64_stats_fetch_retry(&bstats->syncp, start));
120 sum.tx_bytes += tmp.tx_bytes;
121 sum.tx_packets += tmp.tx_packets;
122 sum.rx_bytes += tmp.rx_bytes;
123 sum.rx_packets += tmp.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000124 }
125
126 stats->tx_bytes = sum.tx_bytes;
127 stats->tx_packets = sum.tx_packets;
128 stats->rx_bytes = sum.rx_bytes;
129 stats->rx_packets = sum.rx_packets;
130
131 return stats;
132}
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134static int br_change_mtu(struct net_device *dev, int new_mtu)
135{
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700136 struct net_bridge *br = netdev_priv(dev);
137 if (new_mtu < 68 || new_mtu > br_min_mtu(br))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 return -EINVAL;
139
140 dev->mtu = new_mtu;
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700141
142#ifdef CONFIG_BRIDGE_NETFILTER
143 /* remember the MTU in the rtable for PMTU */
Changli Gaod8d1f302010-06-10 23:31:35 -0700144 br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700145#endif
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 return 0;
148}
149
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700150/* Allow setting mac address to any valid ethernet address. */
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800151static int br_set_mac_address(struct net_device *dev, void *p)
152{
153 struct net_bridge *br = netdev_priv(dev);
154 struct sockaddr *addr = p;
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700155
156 if (!is_valid_ether_addr(addr->sa_data))
157 return -EINVAL;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800158
159 spin_lock_bh(&br->lock);
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700160 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
161 br_stp_change_bridge_id(br, addr->sa_data);
Stephen Hemminger92c05742008-06-17 16:10:06 -0700162 br->flags |= BR_SET_MAC_ADDR;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800163 spin_unlock_bh(&br->lock);
164
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700165 return 0;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800166}
167
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800168static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
169{
170 strcpy(info->driver, "bridge");
171 strcpy(info->version, BR_VERSION);
172 strcpy(info->fw_version, "N/A");
173 strcpy(info->bus_info, "N/A");
174}
175
176static int br_set_sg(struct net_device *dev, u32 data)
177{
178 struct net_bridge *br = netdev_priv(dev);
179
180 if (data)
181 br->feature_mask |= NETIF_F_SG;
182 else
183 br->feature_mask &= ~NETIF_F_SG;
184
185 br_features_recompute(br);
186 return 0;
187}
188
189static int br_set_tso(struct net_device *dev, u32 data)
190{
191 struct net_bridge *br = netdev_priv(dev);
192
193 if (data)
194 br->feature_mask |= NETIF_F_TSO;
195 else
196 br->feature_mask &= ~NETIF_F_TSO;
197
198 br_features_recompute(br);
199 return 0;
200}
201
202static int br_set_tx_csum(struct net_device *dev, u32 data)
203{
204 struct net_bridge *br = netdev_priv(dev);
205
206 if (data)
Herbert Xu2c6cc0d2006-06-17 22:06:45 -0700207 br->feature_mask |= NETIF_F_NO_CSUM;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800208 else
Herbert Xu2c6cc0d2006-06-17 22:06:45 -0700209 br->feature_mask &= ~NETIF_F_ALL_CSUM;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800210
211 br_features_recompute(br);
212 return 0;
213}
214
WANG Congc06ee962010-05-06 00:48:24 -0700215#ifdef CONFIG_NET_POLL_CONTROLLER
WANG Congc06ee962010-05-06 00:48:24 -0700216static void br_poll_controller(struct net_device *br_dev)
217{
WANG Congc06ee962010-05-06 00:48:24 -0700218}
219
Herbert Xu91d2c342010-06-10 16:12:50 +0000220static void br_netpoll_cleanup(struct net_device *dev)
WANG Congc06ee962010-05-06 00:48:24 -0700221{
stephen hemmingercfb478d2010-05-10 09:31:08 +0000222 struct net_bridge *br = netdev_priv(dev);
WANG Congc06ee962010-05-06 00:48:24 -0700223 struct net_bridge_port *p, *n;
WANG Congc06ee962010-05-06 00:48:24 -0700224
WANG Congc06ee962010-05-06 00:48:24 -0700225 list_for_each_entry_safe(p, n, &br->port_list, list) {
Herbert Xu91d2c342010-06-10 16:12:50 +0000226 br_netpoll_disable(p);
WANG Congc06ee962010-05-06 00:48:24 -0700227 }
228}
229
Herbert Xu91d2c342010-06-10 16:12:50 +0000230static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
WANG Congc06ee962010-05-06 00:48:24 -0700231{
Herbert Xu91d2c342010-06-10 16:12:50 +0000232 struct net_bridge *br = netdev_priv(dev);
233 struct net_bridge_port *p, *n;
234 int err = 0;
235
236 list_for_each_entry_safe(p, n, &br->port_list, list) {
237 if (!p->dev)
238 continue;
239
240 err = br_netpoll_enable(p);
241 if (err)
242 goto fail;
243 }
244
245out:
246 return err;
247
248fail:
249 br_netpoll_cleanup(dev);
250 goto out;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000251}
252
Herbert Xu91d2c342010-06-10 16:12:50 +0000253int br_netpoll_enable(struct net_bridge_port *p)
stephen hemmingercfb478d2010-05-10 09:31:08 +0000254{
Herbert Xu91d2c342010-06-10 16:12:50 +0000255 struct netpoll *np;
256 int err = 0;
257
258 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
259 err = -ENOMEM;
260 if (!np)
261 goto out;
262
263 np->dev = p->dev;
264
265 err = __netpoll_setup(np);
266 if (err) {
267 kfree(np);
268 goto out;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000269 }
Herbert Xu91d2c342010-06-10 16:12:50 +0000270
271 p->np = np;
272
273out:
274 return err;
275}
276
277void br_netpoll_disable(struct net_bridge_port *p)
278{
279 struct netpoll *np = p->np;
280
281 if (!np)
282 return;
283
284 p->np = NULL;
285
286 /* Wait for transmitting packets to finish before freeing. */
287 synchronize_rcu_bh();
288
289 __netpoll_cleanup(np);
290 kfree(np);
WANG Congc06ee962010-05-06 00:48:24 -0700291}
292
293#endif
294
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800295static const struct ethtool_ops br_ethtool_ops = {
Stephen Hemmingere4119a42008-08-15 19:51:07 -0700296 .get_drvinfo = br_getinfo,
297 .get_link = ethtool_op_get_link,
298 .get_tx_csum = ethtool_op_get_tx_csum,
299 .set_tx_csum = br_set_tx_csum,
300 .get_sg = ethtool_op_get_sg,
301 .set_sg = br_set_sg,
302 .get_tso = ethtool_op_get_tso,
303 .set_tso = br_set_tso,
304 .get_ufo = ethtool_op_get_ufo,
Sridhar Samudrala72dad212009-10-07 12:41:17 +0000305 .set_ufo = ethtool_op_set_ufo,
Stephen Hemmingere4119a42008-08-15 19:51:07 -0700306 .get_flags = ethtool_op_get_flags,
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800307};
308
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800309static const struct net_device_ops br_netdev_ops = {
310 .ndo_open = br_dev_open,
311 .ndo_stop = br_dev_stop,
Stephen Hemminger00829822008-11-20 20:14:53 -0800312 .ndo_start_xmit = br_dev_xmit,
Eric Dumazet406818f2010-06-23 13:00:48 -0700313 .ndo_get_stats64 = br_get_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800314 .ndo_set_mac_address = br_set_mac_address,
315 .ndo_set_multicast_list = br_dev_set_multicast_list,
316 .ndo_change_mtu = br_change_mtu,
317 .ndo_do_ioctl = br_dev_ioctl,
WANG Congc06ee962010-05-06 00:48:24 -0700318#ifdef CONFIG_NET_POLL_CONTROLLER
Herbert Xu91d2c342010-06-10 16:12:50 +0000319 .ndo_netpoll_setup = br_netpoll_setup,
WANG Congc06ee962010-05-06 00:48:24 -0700320 .ndo_netpoll_cleanup = br_netpoll_cleanup,
321 .ndo_poll_controller = br_poll_controller,
322#endif
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800323};
324
stephen hemminger14bb4782010-03-02 13:32:09 +0000325static void br_dev_free(struct net_device *dev)
326{
327 struct net_bridge *br = netdev_priv(dev);
328
329 free_percpu(br->stats);
330 free_netdev(dev);
331}
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333void br_dev_setup(struct net_device *dev)
334{
Stephen Hemminger3ae41252007-12-16 13:35:51 -0800335 random_ether_addr(dev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 ether_setup(dev);
337
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800338 dev->netdev_ops = &br_netdev_ops;
stephen hemminger14bb4782010-03-02 13:32:09 +0000339 dev->destructor = br_dev_free;
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900340 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 dev->tx_queue_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 dev->priv_flags = IFF_EBRIDGE;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800343
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900344 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
Alexey Dobriyan4aa678b2008-09-08 16:19:58 -0700345 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
Herbert Xub63365a2008-10-23 01:11:29 -0700346 NETIF_F_NETNS_LOCAL | NETIF_F_GSO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}