blob: 267b46af407f9e097162aa85590a80b84c4dc556 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Handle incoming frames
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/kernel.h>
16#include <linux/netdevice.h>
17#include <linux/etherdevice.h>
18#include <linux/netfilter_bridge.h>
Kyeyoon Park95850112014-10-23 14:49:17 -070019#include <linux/neighbour.h>
20#include <net/arp.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040021#include <linux/export.h>
Vlad Yasevicha37b85c2013-02-13 12:00:10 +000022#include <linux/rculist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include "br_private.h"
24
Eric Dumazeta386f992010-11-15 06:38:11 +000025/* Hook for brouter */
26br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
27EXPORT_SYMBOL(br_should_route_hook);
28
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -050029static int
30br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
Eric W. Biederman04eb4482015-09-15 20:04:15 -050031{
Florian Westphal9bce26f2017-03-13 17:38:17 +010032 br_drop_fake_rtable(skb);
Eric W. Biederman04eb4482015-09-15 20:04:15 -050033 return netif_receive_skb(skb);
34}
35
Herbert Xu68b7c892010-02-27 19:41:40 +000036static int br_pass_frame_up(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037{
Herbert Xu68b7c892010-02-27 19:41:40 +000038 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
stephen hemminger14bb4782010-03-02 13:32:09 +000039 struct net_bridge *br = netdev_priv(brdev);
Nikolay Aleksandrov2594e902015-09-25 19:00:11 +020040 struct net_bridge_vlan_group *vg;
Li RongQing8f849852014-01-04 13:57:59 +080041 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Eric Dumazet406818f2010-06-23 13:00:48 -070043 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000044 brstats->rx_packets++;
45 brstats->rx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070046 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Nikolay Aleksandrov907b1e62015-10-12 21:47:02 +020048 vg = br_vlan_group_rcu(br);
Vlad Yasevich85f46c62013-02-13 12:00:11 +000049 /* Bridge is just like any other port. Make sure the
50 * packet is allowed except in promisc modue when someone
51 * may be running packet capture.
52 */
53 if (!(brdev->flags & IFF_PROMISC) &&
Nikolay Aleksandrov2594e902015-09-25 19:00:11 +020054 !br_allowed_egress(vg, skb)) {
Vlad Yasevich85f46c62013-02-13 12:00:11 +000055 kfree_skb(skb);
56 return NET_RX_DROP;
57 }
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 indev = skb->dev;
Pavel Emelyanova339f1c2008-05-21 14:13:47 -070060 skb->dev = brdev;
Nikolay Aleksandrov2594e902015-09-25 19:00:11 +020061 skb = br_handle_vlan(br, vg, skb);
Vlad Yasevichfc92f742014-03-27 21:51:18 -040062 if (!skb)
63 return NET_RX_DROP;
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +020064 /* update the multicast stats if the packet is IGMP/MLD */
Nikolay Aleksandrova65056e2016-07-06 12:12:21 -070065 br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +020066 BR_MCAST_DIR_TX);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Eric W. Biederman29a26a52015-09-15 20:04:16 -050068 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
69 dev_net(indev), NULL, skb, indev, NULL,
Eric W. Biederman04eb4482015-09-15 20:04:15 -050070 br_netif_receive_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Kyeyoon Park95850112014-10-23 14:49:17 -070073static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
Jouni Malinen842a9ae2015-03-04 12:54:21 +020074 u16 vid, struct net_bridge_port *p)
Kyeyoon Park95850112014-10-23 14:49:17 -070075{
76 struct net_device *dev = br->dev;
77 struct neighbour *n;
78 struct arphdr *parp;
79 u8 *arpptr, *sha;
80 __be32 sip, tip;
81
Jouni Malinen842a9ae2015-03-04 12:54:21 +020082 BR_INPUT_SKB_CB(skb)->proxyarp_replied = false;
83
Nikolay Aleksandrov85a3d4a2016-08-30 17:44:29 +020084 if ((dev->flags & IFF_NOARP) ||
85 !pskb_may_pull(skb, arp_hdr_len(dev)))
Kyeyoon Park95850112014-10-23 14:49:17 -070086 return;
87
Kyeyoon Park95850112014-10-23 14:49:17 -070088 parp = arp_hdr(skb);
89
90 if (parp->ar_pro != htons(ETH_P_IP) ||
91 parp->ar_op != htons(ARPOP_REQUEST) ||
92 parp->ar_hln != dev->addr_len ||
93 parp->ar_pln != 4)
94 return;
95
96 arpptr = (u8 *)parp + sizeof(struct arphdr);
97 sha = arpptr;
98 arpptr += dev->addr_len; /* sha */
99 memcpy(&sip, arpptr, sizeof(sip));
100 arpptr += sizeof(sip);
101 arpptr += dev->addr_len; /* tha */
102 memcpy(&tip, arpptr, sizeof(tip));
103
104 if (ipv4_is_loopback(tip) ||
105 ipv4_is_multicast(tip))
106 return;
107
108 n = neigh_lookup(&arp_tbl, &tip, dev);
109 if (n) {
110 struct net_bridge_fdb_entry *f;
111
112 if (!(n->nud_state & NUD_VALID)) {
113 neigh_release(n);
114 return;
115 }
116
117 f = __br_fdb_get(br, n->ha, vid);
Jouni Malinen842a9ae2015-03-04 12:54:21 +0200118 if (f && ((p->flags & BR_PROXYARP) ||
119 (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) {
Kyeyoon Park95850112014-10-23 14:49:17 -0700120 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
121 sha, n->ha, sha);
Jouni Malinen842a9ae2015-03-04 12:54:21 +0200122 BR_INPUT_SKB_CB(skb)->proxyarp_replied = true;
123 }
Kyeyoon Park95850112014-10-23 14:49:17 -0700124
125 neigh_release(n);
126 }
127}
128
stephen hemmingereeaf61d2010-07-27 08:26:30 +0000129/* note: already called with rcu_read_lock */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500130int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131{
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000132 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300133 const unsigned char *dest = eth_hdr(skb)->h_dest;
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200134 enum br_pkt_type pkt_type = BR_PKT_UNICAST;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300135 struct net_bridge_fdb_entry *dst = NULL;
Herbert Xuc4fcb782010-02-27 19:41:48 +0000136 struct net_bridge_mdb_entry *mdst;
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200137 bool local_rcv, mcast_hit = false;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300138 struct net_bridge *br;
Vlad Yasevich78851982013-02-13 12:00:14 +0000139 u16 vid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800141 if (!p || p->state == BR_STATE_DISABLED)
142 goto drop;
Stephen Hemminger85967bb2005-05-29 14:15:55 -0700143
Nikolay Aleksandrov907b1e62015-10-12 21:47:02 +0200144 if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid))
Toshiaki Makitaeb707612014-04-09 17:00:30 +0900145 goto out;
Vlad Yasevicha37b85c2013-02-13 12:00:10 +0000146
Ido Schimmel6bc506b2016-08-25 18:42:37 +0200147 nbp_switchdev_frame_mark(p, skb);
148
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800149 /* insert into forwarding database after filtering to avoid spoofing */
150 br = p->br;
Vlad Yasevich9ba18892013-06-05 10:08:00 -0400151 if (p->flags & BR_LEARNING)
Toshiaki Makitaa5642ab2014-02-07 16:48:18 +0900152 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800153
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200154 local_rcv = !!(br->dev->flags & IFF_PROMISC);
155 if (is_multicast_ether_addr(dest)) {
156 /* by definition the broadcast is also a multicast address */
157 if (is_broadcast_ether_addr(dest)) {
158 pkt_type = BR_PKT_BROADCAST;
159 local_rcv = true;
160 } else {
161 pkt_type = BR_PKT_MULTICAST;
162 if (br_multicast_rcv(br, p, skb, vid))
163 goto drop;
164 }
165 }
Herbert Xuc4fcb782010-02-27 19:41:48 +0000166
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800167 if (p->state == BR_STATE_LEARNING)
168 goto drop;
Stephen Hemminger0e5eaba2005-12-21 19:00:18 -0800169
Herbert Xu68b7c892010-02-27 19:41:40 +0000170 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
171
Jouni Malinen842a9ae2015-03-04 12:54:21 +0200172 if (IS_ENABLED(CONFIG_INET) && skb->protocol == htons(ETH_P_ARP))
173 br_do_proxy_arp(skb, br, vid, p);
Kyeyoon Park95850112014-10-23 14:49:17 -0700174
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200175 switch (pkt_type) {
176 case BR_PKT_MULTICAST:
Cong Wangfbca58a2013-03-07 03:05:33 +0000177 mdst = br_mdb_get(br, skb, vid);
Linus Lüssingb00589a2013-08-01 01:06:20 +0200178 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
Linus Lüssingcc0fdd82013-08-30 17:28:17 +0200179 br_multicast_querier_exists(br, eth_hdr(skb))) {
Herbert Xu8a870172011-02-12 01:05:42 -0800180 if ((mdst && mdst->mglist) ||
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300181 br_multicast_is_router(br)) {
Nikolay Aleksandrovb35c5f62016-07-14 06:10:01 +0300182 local_rcv = true;
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300183 br->dev->stats.multicast++;
184 }
185 mcast_hit = true;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300186 } else {
Nikolay Aleksandrovb35c5f62016-07-14 06:10:01 +0300187 local_rcv = true;
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300188 br->dev->stats.multicast++;
Nikolay Aleksandrov46c07722016-07-14 06:09:59 +0300189 }
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200190 break;
191 case BR_PKT_UNICAST:
192 dst = __br_fdb_get(br, dest, vid);
193 default:
194 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 }
196
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300197 if (dst) {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200198 if (dst->is_local)
199 return br_pass_frame_up(skb);
200
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300201 dst->used = jiffies;
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +0300202 br_forward(dst->dst, skb, local_rcv, false);
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300203 } else {
204 if (!mcast_hit)
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200205 br_flood(br, skb, pkt_type, local_rcv, false);
Nikolay Aleksandrove151aab2016-07-14 06:10:00 +0300206 else
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +0300207 br_multicast_flood(mdst, skb, local_rcv, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209
Nikolay Aleksandrovb35c5f62016-07-14 06:10:01 +0300210 if (local_rcv)
211 return br_pass_frame_up(skb);
Herbert Xu87557c12010-02-27 19:41:39 +0000212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213out:
214 return 0;
Stephen Hemmingerb3f1be42006-02-09 17:08:52 -0800215drop:
216 kfree_skb(skb);
217 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
Pablo Neira Ayuso34666d42014-09-18 11:29:03 +0200219EXPORT_SYMBOL_GPL(br_handle_frame_finish);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Ido Schimmel56fae402016-06-07 12:06:58 +0300221static void __br_handle_local_finish(struct sk_buff *skb)
Stephen Hemmingercf0f02d2006-03-20 22:59:06 -0800222{
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000223 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
Vlad Yasevich2ba071e2013-02-13 12:00:16 +0000224 u16 vid = 0;
Stephen Hemmingercf0f02d2006-03-20 22:59:06 -0800225
Toshiaki Makitae0d79682014-05-26 15:15:53 +0900226 /* check if vlan is allowed, to avoid spoofing */
227 if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
Toshiaki Makitaa5642ab2014-02-07 16:48:18 +0900228 br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
Ido Schimmel56fae402016-06-07 12:06:58 +0300229}
230
231/* note: already called with rcu_read_lock */
232static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
233{
234 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
235
236 __br_handle_local_finish(skb);
Florian Westphal8626c562016-03-12 11:14:42 +0100237
238 BR_INPUT_SKB_CB(skb)->brdev = p->br->dev;
239 br_pass_frame_up(skb);
240 return 0;
Stephen Hemmingercf0f02d2006-03-20 22:59:06 -0800241}
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243/*
Stephen Hemminger6229e362007-03-21 13:38:47 -0700244 * Return NULL if skb is handled
stephen hemmingereeaf61d2010-07-27 08:26:30 +0000245 * note: already called with rcu_read_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 */
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000247rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000249 struct net_bridge_port *p;
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000250 struct sk_buff *skb = *pskb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 const unsigned char *dest = eth_hdr(skb)->h_dest;
Eric Dumazeta386f992010-11-15 06:38:11 +0000252 br_should_route_hook_t *rhook;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Simon Hormanc2368e72010-08-22 17:35:32 +0000254 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000255 return RX_HANDLER_PASS;
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
Stephen Hemminger467aea02007-03-21 13:42:06 -0700258 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Herbert Xu7b995652007-10-14 00:39:01 -0700260 skb = skb_share_check(skb, GFP_ATOMIC);
261 if (!skb)
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000262 return RX_HANDLER_CONSUMED;
Herbert Xu7b995652007-10-14 00:39:01 -0700263
Jiri Pirkof350a0a82010-06-15 06:50:45 +0000264 p = br_port_get_rcu(skb->dev);
Jiri Pirkoab95bfe2010-06-01 21:52:08 +0000265
Ben Hutchings46acc462012-11-01 09:11:11 +0000266 if (unlikely(is_link_local_ether_addr(dest))) {
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900267 u16 fwd_mask = p->br->group_fwd_mask_required;
268
stephen hemminger515853c2011-10-03 18:14:46 +0000269 /*
270 * See IEEE 802.1D Table 7-10 Reserved addresses
271 *
272 * Assignment Value
273 * Bridge Group Address 01-80-C2-00-00-00
274 * (MAC Control) 802.3 01-80-C2-00-00-01
275 * (Link Aggregation) 802.3 01-80-C2-00-00-02
276 * 802.1X PAE address 01-80-C2-00-00-03
277 *
278 * 802.1AB LLDP 01-80-C2-00-00-0E
279 *
280 * Others reserved for future standardization
281 */
282 switch (dest[5]) {
283 case 0x00: /* Bridge Group Address */
284 /* If STP is turned off,
285 then must forward to keep loop detection */
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900286 if (p->br->stp_enabled == BR_NO_STP ||
287 fwd_mask & (1u << dest[5]))
stephen hemminger515853c2011-10-03 18:14:46 +0000288 goto forward;
Ido Schimmel56fae402016-06-07 12:06:58 +0300289 *pskb = skb;
290 __br_handle_local_finish(skb);
291 return RX_HANDLER_PASS;
stephen hemminger515853c2011-10-03 18:14:46 +0000292
293 case 0x01: /* IEEE MAC (Pause) */
Stephen Hemminger2111f8b2007-04-25 22:05:55 -0700294 goto drop;
295
Ido Schimmelbaedbe52016-07-22 14:56:20 +0300296 case 0x0E: /* 802.1AB LLDP */
297 fwd_mask |= p->br->group_fwd_mask;
298 if (fwd_mask & (1u << dest[5]))
299 goto forward;
300 *pskb = skb;
301 __br_handle_local_finish(skb);
302 return RX_HANDLER_PASS;
303
stephen hemminger515853c2011-10-03 18:14:46 +0000304 default:
305 /* Allow selective forwarding for most other protocols */
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900306 fwd_mask |= p->br->group_fwd_mask;
307 if (fwd_mask & (1u << dest[5]))
stephen hemminger515853c2011-10-03 18:14:46 +0000308 goto forward;
309 }
Stephen Hemmingera598f6a2009-05-15 06:10:13 +0000310
stephen hemminger515853c2011-10-03 18:14:46 +0000311 /* Deliver packet to local host only */
Florian Westphal8626c562016-03-12 11:14:42 +0100312 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev),
313 NULL, skb, skb->dev, NULL, br_handle_local_finish);
314 return RX_HANDLER_CONSUMED;
Stephen Hemminger2111f8b2007-04-25 22:05:55 -0700315 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Stephen Hemmingera598f6a2009-05-15 06:10:13 +0000317forward:
Stephen Hemminger467aea02007-03-21 13:42:06 -0700318 switch (p->state) {
319 case BR_STATE_FORWARDING:
Pavel Emelyanov82de3822007-11-29 23:58:58 +1100320 rhook = rcu_dereference(br_should_route_hook);
Eric Dumazeta386f992010-11-15 06:38:11 +0000321 if (rhook) {
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000322 if ((*rhook)(skb)) {
323 *pskb = skb;
324 return RX_HANDLER_PASS;
325 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 dest = eth_hdr(skb)->h_dest;
327 }
Stephen Hemminger467aea02007-03-21 13:42:06 -0700328 /* fall through */
329 case BR_STATE_LEARNING:
Joe Perches9a7b6ef92012-05-08 18:56:49 +0000330 if (ether_addr_equal(p->br->dev->dev_addr, dest))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 skb->pkt_type = PACKET_HOST;
332
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500333 NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING,
334 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 br_handle_frame_finish);
Stephen Hemminger467aea02007-03-21 13:42:06 -0700336 break;
337 default:
338drop:
339 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 }
Jiri Pirko8a4eb572011-03-12 03:14:39 +0000341 return RX_HANDLER_CONSUMED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}