blob: 864fbbc7b24d0eb1d9769723cdf385c60aba410c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Forwarding decision
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
Stephen Hemminger85ca7192006-04-26 02:39:19 -070019#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/netfilter_bridge.h>
21#include "br_private.h"
22
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070023/* Don't forward packets to originating port or forwarding diasabled */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024static inline int should_deliver(const struct net_bridge_port *p,
25 const struct sk_buff *skb)
26{
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070027 return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070028}
29
Stephen Hemminger85ca7192006-04-26 02:39:19 -070030static inline unsigned packet_length(const struct sk_buff *skb)
31{
32 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
33}
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035int br_dev_queue_push_xmit(struct sk_buff *skb)
36{
Herbert Xu79671682006-06-22 02:40:14 -070037 /* drop mtu oversized packets except gso */
Herbert Xu89114af2006-07-08 13:34:32 -070038 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 kfree_skb(skb);
40 else {
41#ifdef CONFIG_BRIDGE_NETFILTER
42 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
Stephen Hemminger3a138132006-08-26 20:28:30 -070043 if (nf_bridge_maybe_copy_header(skb))
44 kfree_skb(skb);
45 else
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#endif
Stephen Hemminger3a138132006-08-26 20:28:30 -070047 {
48 skb_push(skb, ETH_HLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Stephen Hemminger3a138132006-08-26 20:28:30 -070050 dev_queue_xmit(skb);
51 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 }
53
54 return 0;
55}
56
57int br_forward_finish(struct sk_buff *skb)
58{
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070059 return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
60 br_dev_queue_push_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
64static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
65{
66 skb->dev = to->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
68 br_forward_finish);
69}
70
71static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
72{
73 struct net_device *indev;
74
75 indev = skb->dev;
76 skb->dev = to->dev;
77 skb->ip_summed = CHECKSUM_NONE;
78
79 NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
80 br_forward_finish);
81}
82
83/* called with rcu_read_lock */
84void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
85{
86 if (should_deliver(to, skb)) {
87 __br_deliver(to, skb);
88 return;
89 }
90
91 kfree_skb(skb);
92}
93
94/* called with rcu_read_lock */
95void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
96{
97 if (should_deliver(to, skb)) {
98 __br_forward(to, skb);
99 return;
100 }
101
102 kfree_skb(skb);
103}
104
105/* called under bridge lock */
106static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
107 void (*__packet_hook)(const struct net_bridge_port *p,
108 struct sk_buff *skb))
109{
110 struct net_bridge_port *p;
111 struct net_bridge_port *prev;
112
113 if (clone) {
114 struct sk_buff *skb2;
115
116 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
117 br->statistics.tx_dropped++;
118 return;
119 }
120
121 skb = skb2;
122 }
123
124 prev = NULL;
125
126 list_for_each_entry_rcu(p, &br->port_list, list) {
127 if (should_deliver(p, skb)) {
128 if (prev != NULL) {
129 struct sk_buff *skb2;
130
131 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
132 br->statistics.tx_dropped++;
133 kfree_skb(skb);
134 return;
135 }
136
137 __packet_hook(prev, skb2);
138 }
139
140 prev = p;
141 }
142 }
143
144 if (prev != NULL) {
145 __packet_hook(prev, skb);
146 return;
147 }
148
149 kfree_skb(skb);
150}
151
152
153/* called with rcu_read_lock */
154void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone)
155{
156 br_flood(br, skb, clone, __br_deliver);
157}
158
159/* called under bridge lock */
160void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone)
161{
162 br_flood(br, skb, clone, __br_forward);
163}