blob: 5940366fac4870b85c442ab02c38e6637707a022 [file] [log] [blame]
Patrick McHardy7750f402008-07-08 03:23:36 -07001#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
Herbert Xu4ead4432009-03-01 00:11:52 -08004#include <linux/netpoll.h>
Patrick McHardy7750f402008-07-08 03:23:36 -07005#include "vlan.h"
6
Jiri Pirkobcc6d472011-04-07 19:48:33 +00007bool vlan_do_receive(struct sk_buff **skbp)
Patrick McHardy7750f402008-07-08 03:23:36 -07008{
Jesse Gross3701e512010-10-20 13:56:06 +00009 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
Pedro Garciaad1afb02010-07-18 15:38:44 -070011 struct net_device *vlan_dev;
Eric Dumazet4af429d2010-11-10 23:42:00 +000012 struct vlan_pcpu_stats *rx_stats;
Pedro Garciaad1afb02010-07-18 15:38:44 -070013
Jesse Gross3701e512010-10-20 13:56:06 +000014 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) {
16 if (vlan_id)
17 skb->pkt_type = PACKET_OTHERHOST;
18 return false;
Eric Dumazet173e79f2010-09-30 02:16:44 +000019 }
Patrick McHardy9b22ea52008-11-04 14:49:57 -080020
Jesse Gross3701e512010-10-20 13:56:06 +000021 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 if (unlikely(!skb))
23 return false;
Herbert Xue1c096e2009-01-06 10:50:09 -080024
Jesse Gross3701e512010-10-20 13:56:06 +000025 skb->dev = vlan_dev;
Jiri Pirko0b5c9db2011-06-10 06:56:58 +000026 if (skb->pkt_type == PACKET_OTHERHOST) {
27 /* Our lower layer thinks this is not local, let's make sure.
28 * This allows the VLAN to have a different MAC than the
29 * underlying device, and still route correctly. */
30 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31 vlan_dev->dev_addr))
32 skb->pkt_type = PACKET_HOST;
33 }
34
35 if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
37
38 /*
39 * vlan_insert_tag expect skb->data pointing to mac header.
40 * So change skb->data before calling it and change back to
41 * original position later
42 */
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45 if (!skb)
46 return false;
47 skb_pull(skb, offset + VLAN_HLEN);
48 skb_reset_mac_len(skb);
49 }
50
Jesse Gross3701e512010-10-20 13:56:06 +000051 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
Patrick McHardybc1d0412008-07-14 22:49:30 -070052 skb->vlan_tci = 0;
Patrick McHardy7750f402008-07-08 03:23:36 -070053
Eric Dumazet4af429d2010-11-10 23:42:00 +000054 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
Eric Dumazet97932412009-11-17 04:53:09 +000055
Eric Dumazet9618e2f2010-06-24 00:55:06 +000056 u64_stats_update_begin(&rx_stats->syncp);
Eric Dumazet97932412009-11-17 04:53:09 +000057 rx_stats->rx_packets++;
58 rx_stats->rx_bytes += skb->len;
Jiri Pirko0b5c9db2011-06-10 06:56:58 +000059 if (skb->pkt_type == PACKET_MULTICAST)
Eric Dumazet9618e2f2010-06-24 00:55:06 +000060 rx_stats->rx_multicast++;
Eric Dumazet9618e2f2010-06-24 00:55:06 +000061 u64_stats_update_end(&rx_stats->syncp);
Jesse Gross3701e512010-10-20 13:56:06 +000062
63 return true;
Patrick McHardy7750f402008-07-08 03:23:36 -070064}
Patrick McHardy22d1ba72008-07-08 03:23:57 -070065
Jiri Pirkocec9c132011-07-20 04:54:05 +000066/* Must be invoked with rcu_read_lock or with RTNL. */
67struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
68 u16 vlan_id)
69{
70 struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
71
72 if (grp) {
73 return vlan_group_get_device(grp, vlan_id);
74 } else {
75 /*
76 * Bonding slaves do not have grp assigned to themselves.
77 * Grp is assigned to bonding master instead.
78 */
79 if (netif_is_bond_slave(real_dev))
80 return __vlan_find_dev_deep(real_dev->master, vlan_id);
81 }
82
83 return NULL;
84}
85EXPORT_SYMBOL(__vlan_find_dev_deep);
86
Patrick McHardy22d1ba72008-07-08 03:23:57 -070087struct net_device *vlan_dev_real_dev(const struct net_device *dev)
88{
89 return vlan_dev_info(dev)->real_dev;
90}
Ben Greear116cb422009-01-26 12:37:53 -080091EXPORT_SYMBOL(vlan_dev_real_dev);
Patrick McHardy22d1ba72008-07-08 03:23:57 -070092
93u16 vlan_dev_vlan_id(const struct net_device *dev)
94{
95 return vlan_dev_info(dev)->vlan_id;
96}
Ben Greear116cb422009-01-26 12:37:53 -080097EXPORT_SYMBOL(vlan_dev_vlan_id);
Herbert Xue1c096e2009-01-06 10:50:09 -080098
Jesse Gross3701e512010-10-20 13:56:06 +000099/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
100int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
101 u16 vlan_tci, int polling)
Herbert Xue1c096e2009-01-06 10:50:09 -0800102{
Eric Dumazetb93ab832009-11-13 06:33:11 +0000103 __vlan_hwaccel_put_tag(skb, vlan_tci);
Jesse Gross3701e512010-10-20 13:56:06 +0000104 return polling ? netif_receive_skb(skb) : netif_rx(skb);
Herbert Xue1c096e2009-01-06 10:50:09 -0800105}
Jesse Gross3701e512010-10-20 13:56:06 +0000106EXPORT_SYMBOL(__vlan_hwaccel_rx);
Herbert Xue1c096e2009-01-06 10:50:09 -0800107
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -0700108gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
109 unsigned int vlan_tci, struct sk_buff *skb)
Herbert Xue1c096e2009-01-06 10:50:09 -0800110{
Jesse Gross3701e512010-10-20 13:56:06 +0000111 __vlan_hwaccel_put_tag(skb, vlan_tci);
112 return napi_gro_receive(napi, skb);
Herbert Xue1c096e2009-01-06 10:50:09 -0800113}
114EXPORT_SYMBOL(vlan_gro_receive);
115
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -0700116gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
117 unsigned int vlan_tci)
Herbert Xue1c096e2009-01-06 10:50:09 -0800118{
Jesse Gross3701e512010-10-20 13:56:06 +0000119 __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
120 return napi_gro_frags(napi);
Herbert Xue1c096e2009-01-06 10:50:09 -0800121}
122EXPORT_SYMBOL(vlan_gro_frags);
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000123
Jiri Pirko0b5c9db2011-06-10 06:56:58 +0000124static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000125{
Jiri Pirko0b5c9db2011-06-10 06:56:58 +0000126 if (skb_cow(skb, skb_headroom(skb)) < 0)
127 return NULL;
128 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
129 skb->mac_header += VLAN_HLEN;
130 skb_reset_mac_len(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000131 return skb;
132}
133
134static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
135{
136 __be16 proto;
137 unsigned char *rawp;
138
139 /*
140 * Was a VLAN packet, grab the encapsulated protocol, which the layer
141 * three protocols care about.
142 */
143
144 proto = vhdr->h_vlan_encapsulated_proto;
145 if (ntohs(proto) >= 1536) {
146 skb->protocol = proto;
147 return;
148 }
149
150 rawp = skb->data;
151 if (*(unsigned short *) rawp == 0xFFFF)
152 /*
153 * This is a magic hack to spot IPX packets. Older Novell
154 * breaks the protocol design and runs IPX over 802.3 without
155 * an 802.2 LLC layer. We look for FFFF which isn't a used
156 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
157 * but does for the rest.
158 */
159 skb->protocol = htons(ETH_P_802_3);
160 else
161 /*
162 * Real 802.2 LLC
163 */
164 skb->protocol = htons(ETH_P_802_2);
165}
166
167struct sk_buff *vlan_untag(struct sk_buff *skb)
168{
169 struct vlan_hdr *vhdr;
170 u16 vlan_tci;
171
172 if (unlikely(vlan_tx_tag_present(skb))) {
173 /* vlan_tci is already set-up so leave this for another time */
174 return skb;
175 }
176
177 skb = skb_share_check(skb, GFP_ATOMIC);
178 if (unlikely(!skb))
179 goto err_free;
180
181 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
182 goto err_free;
183
184 vhdr = (struct vlan_hdr *) skb->data;
185 vlan_tci = ntohs(vhdr->h_vlan_TCI);
186 __vlan_hwaccel_put_tag(skb, vlan_tci);
187
188 skb_pull_rcsum(skb, VLAN_HLEN);
189 vlan_set_encap_proto(skb, vhdr);
190
Jiri Pirko0b5c9db2011-06-10 06:56:58 +0000191 skb = vlan_reorder_header(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000192 if (unlikely(!skb))
193 goto err_free;
194
195 return skb;
196
197err_free:
198 kfree_skb(skb);
199 return NULL;
200}