blob: fcc684678af67e644c6cc043dacee9f4159fb5b4 [file] [log] [blame]
Patrick McHardy7750f402008-07-08 03:23:36 -07001#include <linux/skbuff.h>
2#include <linux/netdevice.h>
3#include <linux/if_vlan.h>
Herbert Xu4ead4432009-03-01 00:11:52 -08004#include <linux/netpoll.h>
Patrick McHardy7750f402008-07-08 03:23:36 -07005#include "vlan.h"
6
Jiri Pirkobcc6d472011-04-07 19:48:33 +00007bool vlan_do_receive(struct sk_buff **skbp)
Patrick McHardy7750f402008-07-08 03:23:36 -07008{
Jesse Gross3701e512010-10-20 13:56:06 +00009 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
Pedro Garciaad1afb02010-07-18 15:38:44 -070011 struct net_device *vlan_dev;
Eric Dumazet4af429d2010-11-10 23:42:00 +000012 struct vlan_pcpu_stats *rx_stats;
Pedro Garciaad1afb02010-07-18 15:38:44 -070013
Jesse Gross3701e512010-10-20 13:56:06 +000014 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) {
16 if (vlan_id)
17 skb->pkt_type = PACKET_OTHERHOST;
18 return false;
Eric Dumazet173e79f2010-09-30 02:16:44 +000019 }
Patrick McHardy9b22ea52008-11-04 14:49:57 -080020
Jesse Gross3701e512010-10-20 13:56:06 +000021 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
22 if (unlikely(!skb))
23 return false;
Herbert Xue1c096e2009-01-06 10:50:09 -080024
Jesse Gross3701e512010-10-20 13:56:06 +000025 skb->dev = vlan_dev;
Jiri Pirko0b5c9db2011-06-10 06:56:58 +000026 if (skb->pkt_type == PACKET_OTHERHOST) {
27 /* Our lower layer thinks this is not local, let's make sure.
28 * This allows the VLAN to have a different MAC than the
29 * underlying device, and still route correctly. */
30 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31 vlan_dev->dev_addr))
32 skb->pkt_type = PACKET_HOST;
33 }
34
35 if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
37
38 /*
39 * vlan_insert_tag expect skb->data pointing to mac header.
40 * So change skb->data before calling it and change back to
41 * original position later
42 */
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45 if (!skb)
46 return false;
47 skb_pull(skb, offset + VLAN_HLEN);
48 skb_reset_mac_len(skb);
49 }
50
Jesse Gross3701e512010-10-20 13:56:06 +000051 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
Patrick McHardybc1d0412008-07-14 22:49:30 -070052 skb->vlan_tci = 0;
Patrick McHardy7750f402008-07-08 03:23:36 -070053
Eric Dumazet4af429d2010-11-10 23:42:00 +000054 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
Eric Dumazet97932412009-11-17 04:53:09 +000055
Eric Dumazet9618e2f2010-06-24 00:55:06 +000056 u64_stats_update_begin(&rx_stats->syncp);
Eric Dumazet97932412009-11-17 04:53:09 +000057 rx_stats->rx_packets++;
58 rx_stats->rx_bytes += skb->len;
Jiri Pirko0b5c9db2011-06-10 06:56:58 +000059 if (skb->pkt_type == PACKET_MULTICAST)
Eric Dumazet9618e2f2010-06-24 00:55:06 +000060 rx_stats->rx_multicast++;
Eric Dumazet9618e2f2010-06-24 00:55:06 +000061 u64_stats_update_end(&rx_stats->syncp);
Jesse Gross3701e512010-10-20 13:56:06 +000062
63 return true;
Patrick McHardy7750f402008-07-08 03:23:36 -070064}
Patrick McHardy22d1ba72008-07-08 03:23:57 -070065
66struct net_device *vlan_dev_real_dev(const struct net_device *dev)
67{
68 return vlan_dev_info(dev)->real_dev;
69}
Ben Greear116cb422009-01-26 12:37:53 -080070EXPORT_SYMBOL(vlan_dev_real_dev);
Patrick McHardy22d1ba72008-07-08 03:23:57 -070071
72u16 vlan_dev_vlan_id(const struct net_device *dev)
73{
74 return vlan_dev_info(dev)->vlan_id;
75}
Ben Greear116cb422009-01-26 12:37:53 -080076EXPORT_SYMBOL(vlan_dev_vlan_id);
Herbert Xue1c096e2009-01-06 10:50:09 -080077
Jesse Gross3701e512010-10-20 13:56:06 +000078/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
79int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
80 u16 vlan_tci, int polling)
Herbert Xue1c096e2009-01-06 10:50:09 -080081{
Eric Dumazetb93ab832009-11-13 06:33:11 +000082 __vlan_hwaccel_put_tag(skb, vlan_tci);
Jesse Gross3701e512010-10-20 13:56:06 +000083 return polling ? netif_receive_skb(skb) : netif_rx(skb);
Herbert Xue1c096e2009-01-06 10:50:09 -080084}
Jesse Gross3701e512010-10-20 13:56:06 +000085EXPORT_SYMBOL(__vlan_hwaccel_rx);
Herbert Xue1c096e2009-01-06 10:50:09 -080086
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -070087gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
88 unsigned int vlan_tci, struct sk_buff *skb)
Herbert Xue1c096e2009-01-06 10:50:09 -080089{
Jesse Gross3701e512010-10-20 13:56:06 +000090 __vlan_hwaccel_put_tag(skb, vlan_tci);
91 return napi_gro_receive(napi, skb);
Herbert Xue1c096e2009-01-06 10:50:09 -080092}
93EXPORT_SYMBOL(vlan_gro_receive);
94
Ben Hutchingsc7c4b3b2009-10-29 21:36:53 -070095gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
96 unsigned int vlan_tci)
Herbert Xue1c096e2009-01-06 10:50:09 -080097{
Jesse Gross3701e512010-10-20 13:56:06 +000098 __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
99 return napi_gro_frags(napi);
Herbert Xue1c096e2009-01-06 10:50:09 -0800100}
101EXPORT_SYMBOL(vlan_gro_frags);
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000102
Jiri Pirko0b5c9db2011-06-10 06:56:58 +0000103static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000104{
Jiri Pirko0b5c9db2011-06-10 06:56:58 +0000105 if (skb_cow(skb, skb_headroom(skb)) < 0)
106 return NULL;
107 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
108 skb->mac_header += VLAN_HLEN;
109 skb_reset_mac_len(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000110 return skb;
111}
112
113static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
114{
115 __be16 proto;
116 unsigned char *rawp;
117
118 /*
119 * Was a VLAN packet, grab the encapsulated protocol, which the layer
120 * three protocols care about.
121 */
122
123 proto = vhdr->h_vlan_encapsulated_proto;
124 if (ntohs(proto) >= 1536) {
125 skb->protocol = proto;
126 return;
127 }
128
129 rawp = skb->data;
130 if (*(unsigned short *) rawp == 0xFFFF)
131 /*
132 * This is a magic hack to spot IPX packets. Older Novell
133 * breaks the protocol design and runs IPX over 802.3 without
134 * an 802.2 LLC layer. We look for FFFF which isn't a used
135 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
136 * but does for the rest.
137 */
138 skb->protocol = htons(ETH_P_802_3);
139 else
140 /*
141 * Real 802.2 LLC
142 */
143 skb->protocol = htons(ETH_P_802_2);
144}
145
146struct sk_buff *vlan_untag(struct sk_buff *skb)
147{
148 struct vlan_hdr *vhdr;
149 u16 vlan_tci;
150
151 if (unlikely(vlan_tx_tag_present(skb))) {
152 /* vlan_tci is already set-up so leave this for another time */
153 return skb;
154 }
155
156 skb = skb_share_check(skb, GFP_ATOMIC);
157 if (unlikely(!skb))
158 goto err_free;
159
160 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
161 goto err_free;
162
163 vhdr = (struct vlan_hdr *) skb->data;
164 vlan_tci = ntohs(vhdr->h_vlan_TCI);
165 __vlan_hwaccel_put_tag(skb, vlan_tci);
166
167 skb_pull_rcsum(skb, VLAN_HLEN);
168 vlan_set_encap_proto(skb, vhdr);
169
Jiri Pirko0b5c9db2011-06-10 06:56:58 +0000170 skb = vlan_reorder_header(skb);
Jiri Pirkobcc6d472011-04-07 19:48:33 +0000171 if (unlikely(!skb))
172 goto err_free;
173
174 return skb;
175
176err_free:
177 kfree_skb(skb);
178 return NULL;
179}