blob: a2098e3de500d4ab34c3d3b14435eddb5089b354 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Forwarding decision
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
Herbert Xu025d89c2010-02-27 19:41:43 +000014#include <linux/err.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/kernel.h>
17#include <linux/netdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070018#include <linux/netpoll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Stephen Hemminger85ca7192006-04-26 02:39:19 -070020#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/netfilter_bridge.h>
22#include "br_private.h"
23
David S. Miller87faf3c2010-03-16 14:37:47 -070024static int deliver_clone(const struct net_bridge_port *prev,
25 struct sk_buff *skb,
Michael Braun7f7708f2010-03-16 00:26:22 -070026 void (*__packet_hook)(const struct net_bridge_port *p,
27 struct sk_buff *skb));
28
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070029/* Don't forward packets to originating port or forwarding diasabled */
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +090030static inline int should_deliver(const struct net_bridge_port *p,
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 const struct sk_buff *skb)
32{
Fischer, Anna3982d3d2009-08-13 06:55:16 +000033 return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
34 p->state == BR_STATE_FORWARDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035}
36
Stephen Hemminger85ca7192006-04-26 02:39:19 -070037static inline unsigned packet_length(const struct sk_buff *skb)
38{
39 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
40}
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042int br_dev_queue_push_xmit(struct sk_buff *skb)
43{
Changli Gaof88de8d2010-12-25 03:41:30 +000044 /* ip_fragment doesn't copy the MAC header */
45 if (nf_bridge_maybe_copy_header(skb) ||
46 (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 kfree_skb(skb);
Changli Gaof88de8d2010-12-25 03:41:30 +000048 } else {
49 skb_push(skb, ETH_HLEN);
Peter Huang (Peng)a881e962012-04-19 20:12:51 +000050 br_drop_fake_rtable(skb);
Changli Gaof88de8d2010-12-25 03:41:30 +000051 dev_queue_xmit(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 }
53
54 return 0;
55}
56
57int br_forward_finish(struct sk_buff *skb)
58{
Jan Engelhardt713aefa2010-03-23 04:07:21 +010059 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070060 br_dev_queue_push_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
64static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
65{
66 skb->dev = to->dev;
Herbert Xu91d2c342010-06-10 16:12:50 +000067
68 if (unlikely(netpoll_tx_running(to->dev))) {
69 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
70 kfree_skb(skb);
71 else {
72 skb_push(skb, ETH_HLEN);
73 br_netpoll_send_skb(to, skb);
74 }
75 return;
76 }
77
Jan Engelhardt713aefa2010-03-23 04:07:21 +010078 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
79 br_forward_finish);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
83{
84 struct net_device *indev;
85
Herbert Xu4906f992009-02-09 15:07:18 -080086 if (skb_warn_if_lro(skb)) {
87 kfree_skb(skb);
88 return;
89 }
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 indev = skb->dev;
92 skb->dev = to->dev;
Herbert Xu35fc92a2007-03-26 23:22:20 -070093 skb_forward_csum(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Jan Engelhardt713aefa2010-03-23 04:07:21 +010095 NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
96 br_forward_finish);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097}
98
99/* called with rcu_read_lock */
100void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
101{
stephen hemminger43598812011-12-08 07:17:49 +0000102 if (to && should_deliver(to, skb)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 __br_deliver(to, skb);
104 return;
105 }
106
107 kfree_skb(skb);
108}
109
110/* called with rcu_read_lock */
Michael Braun7f7708f2010-03-16 00:26:22 -0700111void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112{
Herbert Xu4906f992009-02-09 15:07:18 -0800113 if (should_deliver(to, skb)) {
Michael Braun7f7708f2010-03-16 00:26:22 -0700114 if (skb0)
115 deliver_clone(to, skb, __br_forward);
116 else
117 __br_forward(to, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 return;
119 }
120
Michael Braun7f7708f2010-03-16 00:26:22 -0700121 if (!skb0)
122 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123}
124
David S. Miller87faf3c2010-03-16 14:37:47 -0700125static int deliver_clone(const struct net_bridge_port *prev,
126 struct sk_buff *skb,
Herbert Xu025d89c2010-02-27 19:41:43 +0000127 void (*__packet_hook)(const struct net_bridge_port *p,
128 struct sk_buff *skb))
129{
Herbert Xufed396a2010-06-15 21:43:07 -0700130 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
131
Herbert Xu025d89c2010-02-27 19:41:43 +0000132 skb = skb_clone(skb, GFP_ATOMIC);
133 if (!skb) {
Herbert Xu025d89c2010-02-27 19:41:43 +0000134 dev->stats.tx_dropped++;
135 return -ENOMEM;
136 }
137
138 __packet_hook(prev, skb);
139 return 0;
140}
141
142static struct net_bridge_port *maybe_deliver(
143 struct net_bridge_port *prev, struct net_bridge_port *p,
144 struct sk_buff *skb,
145 void (*__packet_hook)(const struct net_bridge_port *p,
146 struct sk_buff *skb))
147{
148 int err;
149
150 if (!should_deliver(p, skb))
151 return prev;
152
153 if (!prev)
154 goto out;
155
156 err = deliver_clone(prev, skb, __packet_hook);
157 if (err)
158 return ERR_PTR(err);
159
160out:
161 return p;
162}
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164/* called under bridge lock */
Herbert Xue081e1e2007-09-16 16:20:48 -0700165static void br_flood(struct net_bridge *br, struct sk_buff *skb,
Herbert Xub33084b2010-02-27 19:41:41 +0000166 struct sk_buff *skb0,
167 void (*__packet_hook)(const struct net_bridge_port *p,
168 struct sk_buff *skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 struct net_bridge_port *p;
171 struct net_bridge_port *prev;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 prev = NULL;
174
175 list_for_each_entry_rcu(p, &br->port_list, list) {
Herbert Xu025d89c2010-02-27 19:41:43 +0000176 prev = maybe_deliver(prev, p, skb, __packet_hook);
177 if (IS_ERR(prev))
178 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 }
180
Herbert Xub33084b2010-02-27 19:41:41 +0000181 if (!prev)
182 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183
Herbert Xu025d89c2010-02-27 19:41:43 +0000184 if (skb0)
185 deliver_clone(prev, skb, __packet_hook);
186 else
187 __packet_hook(prev, skb);
Herbert Xub33084b2010-02-27 19:41:41 +0000188 return;
189
190out:
191 if (!skb0)
192 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195
196/* called with rcu_read_lock */
Herbert Xue081e1e2007-09-16 16:20:48 -0700197void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Herbert Xub33084b2010-02-27 19:41:41 +0000199 br_flood(br, skb, NULL, __br_deliver);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
202/* called under bridge lock */
Herbert Xub33084b2010-02-27 19:41:41 +0000203void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
204 struct sk_buff *skb2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Herbert Xub33084b2010-02-27 19:41:41 +0000206 br_flood(br, skb, skb2, __br_forward);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
Herbert Xu5cb5e942010-02-27 19:41:46 +0000208
209#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
210/* called with rcu_read_lock */
211static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
212 struct sk_buff *skb, struct sk_buff *skb0,
213 void (*__packet_hook)(
214 const struct net_bridge_port *p,
215 struct sk_buff *skb))
216{
217 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
218 struct net_bridge *br = netdev_priv(dev);
stephen hemmingerafe01592010-04-27 15:01:07 +0000219 struct net_bridge_port *prev = NULL;
Herbert Xu5cb5e942010-02-27 19:41:46 +0000220 struct net_bridge_port_group *p;
221 struct hlist_node *rp;
222
Eric Dumazete8051682010-11-15 06:38:10 +0000223 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
stephen hemminger83f6a742010-04-27 15:01:06 +0000224 p = mdst ? rcu_dereference(mdst->ports) : NULL;
Herbert Xu5cb5e942010-02-27 19:41:46 +0000225 while (p || rp) {
stephen hemmingerafe01592010-04-27 15:01:07 +0000226 struct net_bridge_port *port, *lport, *rport;
227
Herbert Xu5cb5e942010-02-27 19:41:46 +0000228 lport = p ? p->port : NULL;
229 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
230 NULL;
231
232 port = (unsigned long)lport > (unsigned long)rport ?
233 lport : rport;
234
235 prev = maybe_deliver(prev, port, skb, __packet_hook);
236 if (IS_ERR(prev))
237 goto out;
238
239 if ((unsigned long)lport >= (unsigned long)port)
stephen hemminger83f6a742010-04-27 15:01:06 +0000240 p = rcu_dereference(p->next);
Herbert Xu5cb5e942010-02-27 19:41:46 +0000241 if ((unsigned long)rport >= (unsigned long)port)
Eric Dumazete8051682010-11-15 06:38:10 +0000242 rp = rcu_dereference(hlist_next_rcu(rp));
Herbert Xu5cb5e942010-02-27 19:41:46 +0000243 }
244
245 if (!prev)
246 goto out;
247
248 if (skb0)
249 deliver_clone(prev, skb, __packet_hook);
250 else
251 __packet_hook(prev, skb);
252 return;
253
254out:
255 if (!skb0)
256 kfree_skb(skb);
257}
258
259/* called with rcu_read_lock */
260void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
261 struct sk_buff *skb)
262{
263 br_multicast_flood(mdst, skb, NULL, __br_deliver);
264}
265
266/* called with rcu_read_lock */
267void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
268 struct sk_buff *skb, struct sk_buff *skb2)
269{
270 br_multicast_flood(mdst, skb, skb2, __br_forward);
271}
272#endif