blob: e3a8ea1bcbe2fba148c1a538cf3783a80415cc49 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Userspace interface
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/netdevice.h>
stephen hemminger77f98592011-09-30 14:37:26 +000016#include <linux/etherdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070017#include <linux/netpoll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/ethtool.h>
19#include <linux/if_arp.h>
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/rtnetlink.h>
Kris Katterjohn46f25df2006-01-05 16:35:42 -080023#include <linux/if_ether.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Andrew Lunnc6e970a2017-03-28 23:45:06 +020025#include <net/dsa.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <net/sock.h>
Vlad Yasevich407af322013-02-13 12:00:12 +000027#include <linux/if_vlan.h>
Jiri Pirko56607382015-10-14 19:40:53 +020028#include <net/switchdev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
30#include "br_private.h"
31
32/*
33 * Determine initial path cost based on speed.
34 * using recommendations from 802.1d standard
35 *
Matthew Wilcox61a44b92007-07-31 14:00:02 -070036 * Since driver might sleep need to not be holding any locks.
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 */
Stephen Hemminger4433f422005-12-20 15:19:51 -080038static int port_cost(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070039{
David Decotigny702b26a2016-02-24 10:58:09 -080040 struct ethtool_link_ksettings ecmd;
Stephen Hemmingerb4a488d2007-08-30 22:16:22 -070041
David Decotigny702b26a2016-02-24 10:58:09 -080042 if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
43 switch (ecmd.base.speed) {
Jiri Pirkofa3df922011-09-01 03:29:38 +000044 case SPEED_10000:
45 return 2;
46 case SPEED_1000:
47 return 4;
48 case SPEED_100:
49 return 19;
50 case SPEED_10:
51 return 100;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 }
53 }
54
55 /* Old silly heuristics based on name */
56 if (!strncmp(dev->name, "lec", 3))
57 return 7;
58
59 if (!strncmp(dev->name, "plip", 4))
60 return 2500;
61
62 return 100; /* assume old 10Mbps */
63}
64
Stephen Hemminger4433f422005-12-20 15:19:51 -080065
tanxiaojun1a81a2e2013-12-16 21:32:46 +080066/* Check for port carrier transitions. */
Nikolay Aleksandrovfaa1cd82018-05-03 13:47:24 +030067void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
Stephen Hemminger4433f422005-12-20 15:19:51 -080068{
Stephen Hemminger269def72007-02-22 01:10:18 -080069 struct net_device *dev = p->dev;
70 struct net_bridge *br = p->br;
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -080071
stephen hemminger8f3359b2013-04-13 14:06:07 +000072 if (!(p->flags & BR_ADMIN_COST) &&
73 netif_running(dev) && netif_oper_up(dev))
Stephen Hemminger6e86b892006-03-03 17:14:51 -080074 p->path_cost = port_cost(dev);
Stephen Hemminger4433f422005-12-20 15:19:51 -080075
Nikolay Aleksandrovfaa1cd82018-05-03 13:47:24 +030076 *notified = false;
stephen hemmingeraa7c6e52010-08-24 13:12:56 +000077 if (!netif_running(br->dev))
78 return;
79
80 spin_lock_bh(&br->lock);
stephen hemminger576eb622012-12-28 18:15:22 +000081 if (netif_running(dev) && netif_oper_up(dev)) {
Nikolay Aleksandrovfaa1cd82018-05-03 13:47:24 +030082 if (p->state == BR_STATE_DISABLED) {
stephen hemmingeraa7c6e52010-08-24 13:12:56 +000083 br_stp_enable_port(p);
Nikolay Aleksandrovfaa1cd82018-05-03 13:47:24 +030084 *notified = true;
85 }
stephen hemmingeraa7c6e52010-08-24 13:12:56 +000086 } else {
Nikolay Aleksandrovfaa1cd82018-05-03 13:47:24 +030087 if (p->state != BR_STATE_DISABLED) {
stephen hemmingeraa7c6e52010-08-24 13:12:56 +000088 br_stp_disable_port(p);
Nikolay Aleksandrovfaa1cd82018-05-03 13:47:24 +030089 *notified = true;
90 }
Stephen Hemminger4433f422005-12-20 15:19:51 -080091 }
stephen hemmingeraa7c6e52010-08-24 13:12:56 +000092 spin_unlock_bh(&br->lock);
Stephen Hemminger4433f422005-12-20 15:19:51 -080093}
94
Vlad Yasevich2796d0c2014-05-16 09:59:20 -040095static void br_port_set_promisc(struct net_bridge_port *p)
96{
97 int err = 0;
98
99 if (br_promisc_port(p))
100 return;
101
102 err = dev_set_promiscuity(p->dev, 1);
103 if (err)
104 return;
105
106 br_fdb_unsync_static(p->br, p);
107 p->flags |= BR_PROMISC;
108}
109
110static void br_port_clear_promisc(struct net_bridge_port *p)
111{
112 int err;
113
114 /* Check if the port is already non-promisc or if it doesn't
115 * support UNICAST filtering. Without unicast filtering support
116 * we'll end up re-enabling promisc mode anyway, so just check for
117 * it here.
118 */
119 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
120 return;
121
122 /* Since we'll be clearing the promisc mode, program the port
123 * first so that we don't have interruption in traffic.
124 */
125 err = br_fdb_sync_static(p->br, p);
126 if (err)
127 return;
128
129 dev_set_promiscuity(p->dev, -1);
130 p->flags &= ~BR_PROMISC;
131}
132
133/* When a port is added or removed or when certain port flags
134 * change, this function is called to automatically manage
135 * promiscuity setting of all the bridge ports. We are always called
136 * under RTNL so can skip using rcu primitives.
137 */
138void br_manage_promisc(struct net_bridge *br)
139{
140 struct net_bridge_port *p;
141 bool set_all = false;
142
143 /* If vlan filtering is disabled or bridge interface is placed
144 * into promiscuous mode, place all ports in promiscuous mode.
145 */
Ido Schimmel1f514452017-05-26 08:37:23 +0200146 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400147 set_all = true;
148
149 list_for_each_entry(p, &br->port_list, list) {
150 if (set_all) {
151 br_port_set_promisc(p);
152 } else {
153 /* If the number of auto-ports is <= 1, then all other
154 * ports will have their output configuration
155 * statically specified through fdbs. Since ingress
156 * on the auto-port becomes forwarding/egress to other
157 * ports and egress configuration is statically known,
158 * we can say that ingress configuration of the
159 * auto-port is also statically known.
160 * This lets us disable promiscuous mode and write
161 * this config to hw.
162 */
Toshiaki Makitae0a47d12014-06-05 20:53:32 +0900163 if (br->auto_cnt == 0 ||
164 (br->auto_cnt == 1 && br_auto_port(p)))
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400165 br_port_clear_promisc(p);
166 else
167 br_port_set_promisc(p);
168 }
169 }
170}
171
Vlad Yaseviche028e4b2014-05-16 09:59:16 -0400172static void nbp_update_port_count(struct net_bridge *br)
173{
174 struct net_bridge_port *p;
175 u32 cnt = 0;
176
177 list_for_each_entry(p, &br->port_list, list) {
178 if (br_auto_port(p))
179 cnt++;
180 }
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400181 if (br->auto_cnt != cnt) {
182 br->auto_cnt = cnt;
183 br_manage_promisc(br);
184 }
185}
186
187static void nbp_delete_promisc(struct net_bridge_port *p)
188{
stephen hemminger025559e2014-05-16 20:46:17 -0700189 /* If port is currently promiscuous, unset promiscuity.
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400190 * Otherwise, it is a static port so remove all addresses
191 * from it.
192 */
193 dev_set_allmulti(p->dev, -1);
194 if (br_promisc_port(p))
195 dev_set_promiscuity(p->dev, -1);
196 else
197 br_fdb_unsync_static(p->br, p);
Vlad Yaseviche028e4b2014-05-16 09:59:16 -0400198}
199
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800200static void release_nbp(struct kobject *kobj)
201{
202 struct net_bridge_port *p
203 = container_of(kobj, struct net_bridge_port, kobj);
204 kfree(p);
205}
206
207static struct kobj_type brport_ktype = {
208#ifdef CONFIG_SYSFS
209 .sysfs_ops = &brport_sysfs_ops,
210#endif
211 .release = release_nbp,
212};
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214static void destroy_nbp(struct net_bridge_port *p)
215{
216 struct net_device *dev = p->dev;
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 p->br = NULL;
219 p->dev = NULL;
220 dev_put(dev);
221
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800222 kobject_put(&p->kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
225static void destroy_nbp_rcu(struct rcu_head *head)
226{
227 struct net_bridge_port *p =
228 container_of(head, struct net_bridge_port, rcu);
229 destroy_nbp(p);
230}
231
Paolo Abeni45493d42016-02-26 10:45:38 +0100232static unsigned get_max_headroom(struct net_bridge *br)
233{
234 unsigned max_headroom = 0;
235 struct net_bridge_port *p;
236
237 list_for_each_entry(p, &br->port_list, list) {
238 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
239
240 if (dev_headroom > max_headroom)
241 max_headroom = dev_headroom;
242 }
243
244 return max_headroom;
245}
246
247static void update_headroom(struct net_bridge *br, int new_hr)
248{
249 struct net_bridge_port *p;
250
251 list_for_each_entry(p, &br->port_list, list)
252 netdev_set_rx_headroom(p->dev, new_hr);
253
254 br->dev->needed_headroom = new_hr;
255}
256
Stephen Hemminger3f4cfc22006-01-31 17:44:07 -0800257/* Delete port(interface) from bridge is done in two steps.
258 * via RCU. First step, marks device as down. That deletes
259 * all the timers and stops new packets from flowing through.
260 *
261 * Final cleanup doesn't occur until after all CPU's finished
262 * processing packets.
263 *
264 * Protected from multiple admin operations by RTNL mutex
265 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266static void del_nbp(struct net_bridge_port *p)
267{
268 struct net_bridge *br = p->br;
269 struct net_device *dev = p->dev;
270
Randy Dunlapb3bcb722010-05-18 12:26:27 -0700271 sysfs_remove_link(br->ifobj, p->dev->name);
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800272
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400273 nbp_delete_promisc(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
275 spin_lock_bh(&br->lock);
276 br_stp_disable_port(p);
277 spin_unlock_bh(&br->lock);
278
Nikolay Aleksandrov92899062017-11-01 12:18:13 +0200279 br_ifinfo_notify(RTM_DELLINK, NULL, p);
Stephen Hemmingerb86c4502007-03-22 14:08:46 -0700280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 list_del_rcu(&p->list);
Paolo Abeni45493d42016-02-26 10:45:38 +0100282 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
283 update_headroom(br, get_max_headroom(br));
284 netdev_reset_rx_headroom(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Nikolay Aleksandrovf409d0e2015-10-12 21:47:05 +0200286 nbp_vlan_flush(p);
Nikolay Aleksandrov1ea2d022015-06-23 05:28:16 -0700287 br_fdb_delete_by_port(br, p, 0, 1);
Jiri Pirko56607382015-10-14 19:40:53 +0200288 switchdev_deferred_process();
289
Vlad Yaseviche028e4b2014-05-16 09:59:16 -0400290 nbp_update_port_count(br);
291
Jiri Pirko0f495792014-09-05 15:51:28 +0200292 netdev_upper_dev_unlink(dev, br->dev);
293
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000294 dev->priv_flags &= ~IFF_BRIDGE_PORT;
295
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000296 netdev_rx_handler_unregister(dev);
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800297
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800298 br_multicast_del_port(p);
299
Stephen Hemminger125a12c2006-03-03 17:16:15 -0800300 kobject_uevent(&p->kobj, KOBJ_REMOVE);
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800301 kobject_del(&p->kobj);
302
Herbert Xu91d2c342010-06-10 16:12:50 +0000303 br_netpoll_disable(p);
304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 call_rcu(&p->rcu, destroy_nbp_rcu);
306}
307
stephen hemminger1ce5cce2011-10-06 11:19:41 +0000308/* Delete bridge device */
309void br_dev_delete(struct net_device *dev, struct list_head *head)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
stephen hemminger1ce5cce2011-10-06 11:19:41 +0000311 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 struct net_bridge_port *p, *n;
313
314 list_for_each_entry_safe(p, n, &br->port_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 del_nbp(p);
316 }
317
Roopa Prabhu821f1b22017-10-06 22:12:37 -0700318 br_recalculate_neigh_suppress_enabled(br);
319
Nikolay Aleksandrov1ea2d022015-06-23 05:28:16 -0700320 br_fdb_delete_by_port(br, NULL, 0, 1);
Ding Tianhongf8730422013-12-07 22:12:05 +0800321
Nikolay Aleksandrovf7cdee82017-02-04 18:05:07 +0100322 cancel_delayed_work_sync(&br->gc_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 br_sysfs_delbr(br->dev);
Eric Dumazet8c56ba02009-10-28 05:35:35 +0000325 unregister_netdevice_queue(br->dev, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328/* find an available port number */
329static int find_portno(struct net_bridge *br)
330{
331 int index;
332 struct net_bridge_port *p;
333 unsigned long *inuse;
334
Stephen Hemminger3b781fa2006-03-20 22:56:50 -0800335 inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 GFP_KERNEL);
337 if (!inuse)
338 return -ENOMEM;
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 set_bit(0, inuse); /* zero is reserved */
341 list_for_each_entry(p, &br->port_list, list) {
342 set_bit(p->port_no, inuse);
343 }
344 index = find_first_zero_bit(inuse, BR_MAX_PORTS);
345 kfree(inuse);
346
347 return (index >= BR_MAX_PORTS) ? -EXFULL : index;
348}
349
Stephen Hemminger4433f422005-12-20 15:19:51 -0800350/* called with RTNL but without bridge lock */
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900351static struct net_bridge_port *new_nbp(struct net_bridge *br,
Stephen Hemminger4433f422005-12-20 15:19:51 -0800352 struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 struct net_bridge_port *p;
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200355 int index, err;
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 index = find_portno(br);
358 if (index < 0)
359 return ERR_PTR(index);
360
Stephen Hemmingercee48542006-03-20 22:57:03 -0800361 p = kzalloc(sizeof(*p), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 if (p == NULL)
363 return ERR_PTR(-ENOMEM);
364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 p->br = br;
366 dev_hold(dev);
367 p->dev = dev;
Stephen Hemminger4433f422005-12-20 15:19:51 -0800368 p->path_cost = port_cost(dev);
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900369 p->priority = 0x8000 >> BR_PORT_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 p->port_no = index;
Mike Manning99f906e2017-04-26 14:48:09 +0100371 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 br_init_port(p);
Florian Fainelli775dd692014-09-30 16:13:19 -0700373 br_set_state(p, BR_STATE_DISABLED);
Stephen Hemmingerd32439c2006-03-03 17:15:34 -0800374 br_stp_port_timer_init(p);
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200375 err = br_multicast_add_port(p);
376 if (err) {
377 dev_put(dev);
378 kfree(p);
379 p = ERR_PTR(err);
380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 return p;
383}
384
Alexey Dobriyan4aa678b2008-09-08 16:19:58 -0700385int br_add_bridge(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386{
387 struct net_device *dev;
Eric Dumazet11f3a6b2011-08-22 06:05:59 +0000388 int res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Tom Gundersenc835a672014-07-14 16:37:24 +0200390 dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
stephen hemmingerbb900b22011-04-04 14:03:32 +0000391 br_dev_setup);
392
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900393 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 return -ENOMEM;
395
stephen hemmingerbb900b22011-04-04 14:03:32 +0000396 dev_net_set(dev, net);
stephen hemminger149ddd82012-06-26 05:48:45 +0000397 dev->rtnl_link_ops = &br_link_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Eric Dumazet11f3a6b2011-08-22 06:05:59 +0000399 res = register_netdev(dev);
400 if (res)
401 free_netdev(dev);
402 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
Alexey Dobriyan4aa678b2008-09-08 16:19:58 -0700405int br_del_bridge(struct net *net, const char *name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
407 struct net_device *dev;
408 int ret = 0;
409
410 rtnl_lock();
Alexey Dobriyan4aa678b2008-09-08 16:19:58 -0700411 dev = __dev_get_by_name(net, name);
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900412 if (dev == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 ret = -ENXIO; /* Could not find device */
414
415 else if (!(dev->priv_flags & IFF_EBRIDGE)) {
416 /* Attempt to delete non bridge device! */
417 ret = -EPERM;
418 }
419
420 else if (dev->flags & IFF_UP) {
421 /* Not shutdown yet. */
422 ret = -EBUSY;
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900425 else
stephen hemminger1ce5cce2011-10-06 11:19:41 +0000426 br_dev_delete(dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
428 rtnl_unlock();
429 return ret;
430}
431
Nikolay Aleksandrov804b8542018-03-30 13:46:19 +0300432/* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
433static int br_mtu_min(const struct net_bridge *br)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 const struct net_bridge_port *p;
Nikolay Aleksandrovf40aa232018-03-30 13:46:18 +0300436 int ret_mtu = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Nikolay Aleksandrov804b8542018-03-30 13:46:19 +0300438 list_for_each_entry(p, &br->port_list, list)
439 if (!ret_mtu || ret_mtu > p->dev->mtu)
Nikolay Aleksandrovf40aa232018-03-30 13:46:18 +0300440 ret_mtu = p->dev->mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Nikolay Aleksandrovf40aa232018-03-30 13:46:18 +0300442 return ret_mtu ? ret_mtu : ETH_DATA_LEN;
Chas Williams419d14a2018-03-22 11:34:06 -0400443}
444
Nikolay Aleksandrov804b8542018-03-30 13:46:19 +0300445void br_mtu_auto_adjust(struct net_bridge *br)
446{
447 ASSERT_RTNL();
448
449 /* if the bridge MTU was manually configured don't mess with it */
450 if (br->mtu_set_by_user)
451 return;
452
453 /* change to the minimum MTU and clear the flag which was set by
454 * the bridge ndo_change_mtu callback
455 */
456 dev_set_mtu(br->dev, br_mtu_min(br));
457 br->mtu_set_by_user = false;
458}
459
Eric Dumazetae74f102016-03-21 09:55:11 -0700460static void br_set_gso_limits(struct net_bridge *br)
461{
462 unsigned int gso_max_size = GSO_MAX_SIZE;
463 u16 gso_max_segs = GSO_MAX_SEGS;
464 const struct net_bridge_port *p;
465
466 list_for_each_entry(p, &br->port_list, list) {
467 gso_max_size = min(gso_max_size, p->dev->gso_max_size);
468 gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
469 }
470 br->dev->gso_max_size = gso_max_size;
471 br->dev->gso_max_segs = gso_max_segs;
472}
473
Stephen Hemminger81d35302005-05-29 14:15:17 -0700474/*
475 * Recomputes features using slave's features
476 */
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000477netdev_features_t br_features_recompute(struct net_bridge *br,
478 netdev_features_t features)
Stephen Hemminger81d35302005-05-29 14:15:17 -0700479{
480 struct net_bridge_port *p;
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000481 netdev_features_t mask;
Stephen Hemminger81d35302005-05-29 14:15:17 -0700482
Herbert Xub63365a2008-10-23 01:11:29 -0700483 if (list_empty(&br->port_list))
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000484 return features;
Herbert Xub63365a2008-10-23 01:11:29 -0700485
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000486 mask = features;
Herbert Xub63365a2008-10-23 01:11:29 -0700487 features &= ~NETIF_F_ONE_FOR_ALL;
Stephen Hemminger81d35302005-05-29 14:15:17 -0700488
489 list_for_each_entry(p, &br->port_list, list) {
Herbert Xub63365a2008-10-23 01:11:29 -0700490 features = netdev_increment_features(features,
491 p->dev->features, mask);
Stephen Hemminger81d35302005-05-29 14:15:17 -0700492 }
Toshiaki Makitaf902e882015-01-09 14:16:40 +0900493 features = netdev_add_tso_features(features, mask);
Stephen Hemminger81d35302005-05-29 14:15:17 -0700494
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000495 return features;
Stephen Hemminger81d35302005-05-29 14:15:17 -0700496}
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498/* called with RTNL */
David Ahernca752be2017-10-04 17:48:50 -0700499int br_add_if(struct net_bridge *br, struct net_device *dev,
500 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
502 struct net_bridge_port *p;
503 int err = 0;
Paolo Abeni45493d42016-02-26 10:45:38 +0100504 unsigned br_hr, dev_hr;
stephen hemmingeredf947f2011-03-24 13:24:01 +0000505 bool changed_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Florian Fainelli8db0a2e2015-01-16 09:56:02 -0800507 /* Don't allow bridging non-ethernet like devices, or DSA-enabled
508 * master network devices since the bridge layer rx_handler prevents
509 * the DSA fake ethertype handler to be invoked, so we do not strip off
510 * the DSA switch tag protocol header and the bridge layer just return
511 * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
512 */
Stephen Hemminger1056bd52009-11-05 20:46:52 -0800513 if ((dev->flags & IFF_LOOPBACK) ||
stephen hemminger77f98592011-09-30 14:37:26 +0000514 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
Florian Fainelli8db0a2e2015-01-16 09:56:02 -0800515 !is_valid_ether_addr(dev->dev_addr) ||
516 netdev_uses_dsa(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 return -EINVAL;
518
Stephen Hemminger1056bd52009-11-05 20:46:52 -0800519 /* No bridging of bridges */
David Ahernca752be2017-10-04 17:48:50 -0700520 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
521 NL_SET_ERR_MSG(extack,
522 "Can not enslave a bridge to a bridge");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 return -ELOOP;
David Ahernca752be2017-10-04 17:48:50 -0700524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Stephen Hemminger1056bd52009-11-05 20:46:52 -0800526 /* Device is already being bridged */
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000527 if (br_port_exists(dev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 return -EBUSY;
529
Johannes Bergad4bb6f2009-11-19 00:56:30 +0100530 /* No bridging devices that dislike that (e.g. wireless) */
David Ahernca752be2017-10-04 17:48:50 -0700531 if (dev->priv_flags & IFF_DONT_BRIDGE) {
532 NL_SET_ERR_MSG(extack,
533 "Device does not allow enslaving to a bridge");
Johannes Bergad4bb6f2009-11-19 00:56:30 +0100534 return -EOPNOTSUPP;
David Ahernca752be2017-10-04 17:48:50 -0700535 }
Johannes Bergad4bb6f2009-11-19 00:56:30 +0100536
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800537 p = new_nbp(br, dev);
538 if (IS_ERR(p))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return PTR_ERR(p);
540
Amerigo Wangbb8ed632011-05-19 21:39:11 +0000541 call_netdevice_notifiers(NETDEV_JOIN, dev);
542
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400543 err = dev_set_allmulti(dev, 1);
Wang Chenbc3f9072008-07-14 20:53:13 -0700544 if (err)
545 goto put_back;
546
Greg Kroah-Hartmane32cc732007-12-17 23:05:35 -0700547 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
548 SYSFS_BRIDGE_PORT_ATTR);
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800549 if (err)
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800550 goto err1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800552 err = br_sysfs_addif(p);
553 if (err)
554 goto err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700556 err = br_netpoll_enable(p);
stephen hemminger93d8bf92013-07-24 11:51:41 -0700557 if (err)
Herbert Xu91d2c342010-06-10 16:12:50 +0000558 goto err3;
559
Jiri Pirko0f495792014-09-05 15:51:28 +0200560 err = netdev_rx_handler_register(dev, br_handle_frame, p);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000561 if (err)
Gao feng9b1536c2012-12-19 23:41:43 +0000562 goto err4;
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000563
Jiri Pirko0f495792014-09-05 15:51:28 +0200564 dev->priv_flags |= IFF_BRIDGE_PORT;
565
David Ahernca752be2017-10-04 17:48:50 -0700566 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
Jiri Pirkoafc61512011-02-13 09:33:42 +0000567 if (err)
Gao feng9b1536c2012-12-19 23:41:43 +0000568 goto err5;
Jiri Pirkoafc61512011-02-13 09:33:42 +0000569
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200570 err = nbp_switchdev_mark_set(p);
571 if (err)
572 goto err6;
573
Ben Hutchings0187bdf2008-06-19 16:15:47 -0700574 dev_disable_lro(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800576 list_add_rcu(&p->list, &br->port_list);
577
Vlad Yaseviche028e4b2014-05-16 09:59:16 -0400578 nbp_update_port_count(br);
579
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000580 netdev_update_features(br->dev);
581
Paolo Abeni45493d42016-02-26 10:45:38 +0100582 br_hr = br->dev->needed_headroom;
583 dev_hr = netdev_get_fwd_headroom(dev);
584 if (br_hr < dev_hr)
585 update_headroom(br, dev_hr);
586 else
587 netdev_set_rx_headroom(dev, br_hr);
Florian Fainellifd094802013-08-27 12:03:53 +0100588
Toshiaki Makitaa4b816d2014-02-07 16:48:21 +0900589 if (br_fdb_insert(br, p, dev->dev_addr, 0))
590 netdev_err(dev, "failed insert local address bridge forwarding table\n");
591
Elad Raz08474cc2016-01-06 13:01:04 +0100592 err = nbp_vlan_init(p);
593 if (err) {
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400594 netdev_err(dev, "failed to initialize vlan filtering on this port\n");
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200595 goto err7;
Elad Raz08474cc2016-01-06 13:01:04 +0100596 }
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400597
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800598 spin_lock_bh(&br->lock);
stephen hemmingeredf947f2011-03-24 13:24:01 +0000599 changed_addr = br_stp_recalculate_bridge_id(br);
Aji Srinivasde790592007-03-07 16:10:53 -0800600
stephen hemminger576eb622012-12-28 18:15:22 +0000601 if (netif_running(dev) && netif_oper_up(dev) &&
Aji Srinivasde790592007-03-07 16:10:53 -0800602 (br->dev->flags & IFF_UP))
603 br_stp_enable_port(p);
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800604 spin_unlock_bh(&br->lock);
605
Nikolay Aleksandrov92899062017-11-01 12:18:13 +0200606 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
Stephen Hemmingerb86c4502007-03-22 14:08:46 -0700607
stephen hemmingeredf947f2011-03-24 13:24:01 +0000608 if (changed_addr)
stephen hemminger56139fc2011-07-22 07:47:08 +0000609 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
stephen hemmingeredf947f2011-03-24 13:24:01 +0000610
Nikolay Aleksandrov804b8542018-03-30 13:46:19 +0300611 br_mtu_auto_adjust(br);
Eric Dumazetae74f102016-03-21 09:55:11 -0700612 br_set_gso_limits(br);
Stephen Hemminger269def72007-02-22 01:10:18 -0800613
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800614 kobject_uevent(&p->kobj, KOBJ_ADD);
615
616 return 0;
Jiri Pirkoafc61512011-02-13 09:33:42 +0000617
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200618err7:
Elad Raz08474cc2016-01-06 13:01:04 +0100619 list_del_rcu(&p->list);
620 br_fdb_delete_by_port(br, p, 0, 1);
621 nbp_update_port_count(br);
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200622err6:
Elad Raz08474cc2016-01-06 13:01:04 +0100623 netdev_upper_dev_unlink(dev, br->dev);
Gao feng9b1536c2012-12-19 23:41:43 +0000624err5:
Jiri Pirko0f495792014-09-05 15:51:28 +0200625 dev->priv_flags &= ~IFF_BRIDGE_PORT;
626 netdev_rx_handler_unregister(dev);
Gao feng9b1536c2012-12-19 23:41:43 +0000627err4:
628 br_netpoll_disable(p);
Herbert Xu91d2c342010-06-10 16:12:50 +0000629err3:
630 sysfs_remove_link(br->ifobj, p->dev->name);
Stephen Hemmingerbab1dee2006-02-09 17:10:12 -0800631err2:
Xiaotian Fengc587aea2009-07-23 23:06:32 +0000632 kobject_put(&p->kobj);
Jeff Hansen30df94f2009-09-28 12:54:25 -0700633 p = NULL; /* kobject_put frees */
stephen hemminger77f98592011-09-30 14:37:26 +0000634err1:
wangweidong019ee792014-05-29 10:15:30 +0800635 dev_set_allmulti(dev, -1);
Volodymyr G Lukiianyk43af8532008-04-29 03:17:42 -0700636put_back:
637 dev_put(dev);
Wang Chenbc3f9072008-07-14 20:53:13 -0700638 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 return err;
640}
641
642/* called with RTNL */
643int br_del_if(struct net_bridge *br, struct net_device *dev)
644{
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000645 struct net_bridge_port *p;
Andrei Warkentin9be6dd62011-08-05 11:04:10 +0000646 bool changed_addr;
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900647
Eric Dumazetec1e5612010-11-15 06:38:14 +0000648 p = br_port_get_rtnl(dev);
stephen hemmingerb5ed54e2010-11-15 06:38:13 +0000649 if (!p || p->br != br)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return -EINVAL;
651
Amerigo Wangd30362c2012-08-10 01:24:43 +0000652 /* Since more than one interface can be attached to a bridge,
653 * there still maybe an alternate path for netconsole to use;
654 * therefore there is no reason for a NETDEV_RELEASE event.
655 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 del_nbp(p);
657
Nikolay Aleksandrov804b8542018-03-30 13:46:19 +0300658 br_mtu_auto_adjust(br);
Eric Dumazetae74f102016-03-21 09:55:11 -0700659 br_set_gso_limits(br);
Venkat Venkatsubra4c906c22015-03-13 07:08:22 -0700660
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 spin_lock_bh(&br->lock);
Andrei Warkentin9be6dd62011-08-05 11:04:10 +0000662 changed_addr = br_stp_recalculate_bridge_id(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 spin_unlock_bh(&br->lock);
664
Andrei Warkentin9be6dd62011-08-05 11:04:10 +0000665 if (changed_addr)
666 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
667
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000668 netdev_update_features(br->dev);
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return 0;
671}
Vlad Yaseviche028e4b2014-05-16 09:59:16 -0400672
673void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
674{
675 struct net_bridge *br = p->br;
676
677 if (mask & BR_AUTO_MASK)
678 nbp_update_port_count(br);
Roopa Prabhu821f1b22017-10-06 22:12:37 -0700679
680 if (mask & BR_NEIGH_SUPPRESS)
681 br_recalculate_neigh_suppress_enabled(br);
Vlad Yaseviche028e4b2014-05-16 09:59:16 -0400682}