blob: 3e45c1a1aa96124f1bf525346aecf14b704f4311 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Forwarding decision
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * $Id: br_forward.c,v 1.4 2001/08/14 22:05:57 davem Exp $
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <linux/kernel.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
Stephen Hemminger85ca7192006-04-26 02:39:19 -070019#include <linux/if_vlan.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/netfilter_bridge.h>
21#include "br_private.h"
22
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070023/* Don't forward packets to originating port or forwarding diasabled */
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +090024static inline int should_deliver(const struct net_bridge_port *p,
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 const struct sk_buff *skb)
26{
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070027 return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -070028}
29
Stephen Hemminger85ca7192006-04-26 02:39:19 -070030static inline unsigned packet_length(const struct sk_buff *skb)
31{
32 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
33}
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035int br_dev_queue_push_xmit(struct sk_buff *skb)
36{
Herbert Xu79671682006-06-22 02:40:14 -070037 /* drop mtu oversized packets except gso */
Herbert Xu89114af2006-07-08 13:34:32 -070038 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 kfree_skb(skb);
40 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
Stephen Hemminger3a138132006-08-26 20:28:30 -070042 if (nf_bridge_maybe_copy_header(skb))
43 kfree_skb(skb);
Stephen Hemminger07317622006-08-29 17:48:17 -070044 else {
Stephen Hemminger3a138132006-08-26 20:28:30 -070045 skb_push(skb, ETH_HLEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046
Stephen Hemminger3a138132006-08-26 20:28:30 -070047 dev_queue_xmit(skb);
48 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 }
50
51 return 0;
52}
53
54int br_forward_finish(struct sk_buff *skb)
55{
Stephen Hemminger9ef513b2006-05-25 15:58:54 -070056 return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
57 br_dev_queue_push_xmit);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059}
60
61static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
62{
63 skb->dev = to->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
65 br_forward_finish);
66}
67
68static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
69{
70 struct net_device *indev;
71
72 indev = skb->dev;
73 skb->dev = to->dev;
74 skb->ip_summed = CHECKSUM_NONE;
75
76 NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
77 br_forward_finish);
78}
79
80/* called with rcu_read_lock */
81void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
82{
83 if (should_deliver(to, skb)) {
84 __br_deliver(to, skb);
85 return;
86 }
87
88 kfree_skb(skb);
89}
90
91/* called with rcu_read_lock */
92void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
93{
94 if (should_deliver(to, skb)) {
95 __br_forward(to, skb);
96 return;
97 }
98
99 kfree_skb(skb);
100}
101
102/* called under bridge lock */
103static void br_flood(struct net_bridge *br, struct sk_buff *skb, int clone,
YOSHIFUJI Hideaki9d6f2292007-02-09 23:24:35 +0900104 void (*__packet_hook)(const struct net_bridge_port *p,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 struct sk_buff *skb))
106{
107 struct net_bridge_port *p;
108 struct net_bridge_port *prev;
109
110 if (clone) {
111 struct sk_buff *skb2;
112
113 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
114 br->statistics.tx_dropped++;
115 return;
116 }
117
118 skb = skb2;
119 }
120
121 prev = NULL;
122
123 list_for_each_entry_rcu(p, &br->port_list, list) {
124 if (should_deliver(p, skb)) {
125 if (prev != NULL) {
126 struct sk_buff *skb2;
127
128 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
129 br->statistics.tx_dropped++;
130 kfree_skb(skb);
131 return;
132 }
133
134 __packet_hook(prev, skb2);
135 }
136
137 prev = p;
138 }
139 }
140
141 if (prev != NULL) {
142 __packet_hook(prev, skb);
143 return;
144 }
145
146 kfree_skb(skb);
147}
148
149
150/* called with rcu_read_lock */
151void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, int clone)
152{
153 br_flood(br, skb, clone, __br_deliver);
154}
155
156/* called under bridge lock */
157void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, int clone)
158{
159 br_flood(br, skb, clone, __br_forward);
160}